id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
17,001 | import json
from functools import wraps
from flask import abort, current_app, request
from flask_login import current_user
from controllers.console.workspace.error import AccountNotInitializedError
from services.feature_service import FeatureService
from services.operation_service import OperationService
class FeatureService:
def get_features(cls, tenant_id: str) -> FeatureModel:
features = FeatureModel()
cls._fulfill_params_from_env(features)
if current_app.config['BILLING_ENABLED']:
cls._fulfill_params_from_billing_api(features, tenant_id)
return features
def _fulfill_params_from_env(cls, features: FeatureModel):
features.can_replace_logo = current_app.config['CAN_REPLACE_LOGO']
def _fulfill_params_from_billing_api(cls, features: FeatureModel, tenant_id: str):
billing_info = BillingService.get_info(tenant_id)
features.billing.enabled = billing_info['enabled']
features.billing.subscription.plan = billing_info['subscription']['plan']
features.billing.subscription.interval = billing_info['subscription']['interval']
features.members.size = billing_info['members']['size']
features.members.limit = billing_info['members']['limit']
features.apps.size = billing_info['apps']['size']
features.apps.limit = billing_info['apps']['limit']
features.vector_space.size = billing_info['vector_space']['size']
features.vector_space.limit = billing_info['vector_space']['limit']
features.documents_upload_quota.size = billing_info['documents_upload_quota']['size']
features.documents_upload_quota.limit = billing_info['documents_upload_quota']['limit']
features.annotation_quota_limit.size = billing_info['annotation_quota_limit']['size']
features.annotation_quota_limit.limit = billing_info['annotation_quota_limit']['limit']
features.docs_processing = billing_info['docs_processing']
features.can_replace_logo = billing_info['can_replace_logo']
def cloud_edition_billing_resource_check(resource: str,
error_msg: str = "You have reached the limit of your subscription."):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
features = FeatureService.get_features(current_user.current_tenant_id)
if features.billing.enabled:
members = features.members
apps = features.apps
vector_space = features.vector_space
documents_upload_quota = features.documents_upload_quota
annotation_quota_limit = features.annotation_quota_limit
if resource == 'members' and 0 < members.limit <= members.size:
abort(403, error_msg)
elif resource == 'apps' and 0 < apps.limit <= apps.size:
abort(403, error_msg)
elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size:
abort(403, error_msg)
elif resource == 'documents' and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
# The api of file upload is used in the multiple places, so we need to check the source of the request from datasets
source = request.args.get('source')
if source == 'datasets':
abort(403, error_msg)
else:
return view(*args, **kwargs)
elif resource == 'workspace_custom' and not features.can_replace_logo:
abort(403, error_msg)
elif resource == 'annotation' and 0 < annotation_quota_limit.limit < annotation_quota_limit.size:
abort(403, error_msg)
else:
return view(*args, **kwargs)
return view(*args, **kwargs)
return decorated
return interceptor | null |
17,002 | import json
from functools import wraps
from flask import abort, current_app, request
from flask_login import current_user
from controllers.console.workspace.error import AccountNotInitializedError
from services.feature_service import FeatureService
from services.operation_service import OperationService
class FeatureService:
def get_features(cls, tenant_id: str) -> FeatureModel:
features = FeatureModel()
cls._fulfill_params_from_env(features)
if current_app.config['BILLING_ENABLED']:
cls._fulfill_params_from_billing_api(features, tenant_id)
return features
def _fulfill_params_from_env(cls, features: FeatureModel):
features.can_replace_logo = current_app.config['CAN_REPLACE_LOGO']
def _fulfill_params_from_billing_api(cls, features: FeatureModel, tenant_id: str):
billing_info = BillingService.get_info(tenant_id)
features.billing.enabled = billing_info['enabled']
features.billing.subscription.plan = billing_info['subscription']['plan']
features.billing.subscription.interval = billing_info['subscription']['interval']
features.members.size = billing_info['members']['size']
features.members.limit = billing_info['members']['limit']
features.apps.size = billing_info['apps']['size']
features.apps.limit = billing_info['apps']['limit']
features.vector_space.size = billing_info['vector_space']['size']
features.vector_space.limit = billing_info['vector_space']['limit']
features.documents_upload_quota.size = billing_info['documents_upload_quota']['size']
features.documents_upload_quota.limit = billing_info['documents_upload_quota']['limit']
features.annotation_quota_limit.size = billing_info['annotation_quota_limit']['size']
features.annotation_quota_limit.limit = billing_info['annotation_quota_limit']['limit']
features.docs_processing = billing_info['docs_processing']
features.can_replace_logo = billing_info['can_replace_logo']
class OperationService:
base_url = os.environ.get('BILLING_API_URL', 'BILLING_API_URL')
secret_key = os.environ.get('BILLING_API_SECRET_KEY', 'BILLING_API_SECRET_KEY')
def _send_request(cls, method, endpoint, json=None, params=None):
headers = {
"Content-Type": "application/json",
"Billing-Api-Secret-Key": cls.secret_key
}
url = f"{cls.base_url}{endpoint}"
response = requests.request(method, url, json=json, params=params, headers=headers)
return response.json()
def record_utm(cls, tenant_id: str, utm_info: dict):
params = {
'tenant_id': tenant_id,
'utm_source': utm_info.get('utm_source', ''),
'utm_medium': utm_info.get('utm_medium', ''),
'utm_campaign': utm_info.get('utm_campaign', ''),
'utm_content': utm_info.get('utm_content', ''),
'utm_term': utm_info.get('utm_term', '')
}
return cls._send_request('POST', '/tenant_utms', params=params)
def cloud_utm_record(view):
@wraps(view)
def decorated(*args, **kwargs):
try:
features = FeatureService.get_features(current_user.current_tenant_id)
if features.billing.enabled:
utm_info = request.cookies.get('utm_info')
if utm_info:
utm_info = json.loads(utm_info)
OperationService.record_utm(current_user.current_tenant_id, utm_info)
except Exception as e:
pass
return view(*args, **kwargs)
return decorated | null |
17,003 | from datetime import datetime
import pytz
from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
from flask_restful.inputs import int_range
from sqlalchemy import func, or_
from sqlalchemy.orm import joinedload
from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from fields.conversation_fields import (
conversation_detail_fields,
conversation_message_detail_fields,
conversation_pagination_fields,
conversation_with_summary_pagination_fields,
)
from libs.helper import datetime_string
from libs.login import login_required
from models.model import Conversation, Message, MessageAnnotation
def _get_app(app_id, mode=None):
app = db.session.query(App).filter(
App.id == app_id,
App.tenant_id == current_user.current_tenant_id,
App.status == 'normal'
).first()
if not app:
raise NotFound("App not found")
if mode and app.mode != mode:
raise NotFound("The {} app not found".format(mode))
return app
db = SQLAlchemy()
class Conversation(db.Model):
__tablename__ = 'conversations'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='conversation_pkey'),
db.Index('conversation_app_from_user_idx', 'app_id', 'from_source', 'from_end_user_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
app_model_config_id = db.Column(UUID, nullable=False)
model_provider = db.Column(db.String(255), nullable=False)
override_model_configs = db.Column(db.Text)
model_id = db.Column(db.String(255), nullable=False)
mode = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
summary = db.Column(db.Text)
inputs = db.Column(db.JSON)
introduction = db.Column(db.Text)
system_instruction = db.Column(db.Text)
system_instruction_tokens = db.Column(db.Integer, nullable=False, server_default=db.text('0'))
status = db.Column(db.String(255), nullable=False)
from_source = db.Column(db.String(255), nullable=False)
from_end_user_id = db.Column(UUID)
from_account_id = db.Column(UUID)
read_at = db.Column(db.DateTime)
read_account_id = db.Column(UUID)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
messages = db.relationship("Message", backref="conversation", lazy='select', passive_deletes="all")
message_annotations = db.relationship("MessageAnnotation", backref="conversation", lazy='select',
passive_deletes="all")
is_deleted = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
def model_config(self):
model_config = {}
if self.override_model_configs:
override_model_configs = json.loads(self.override_model_configs)
if 'model' in override_model_configs:
app_model_config = AppModelConfig()
app_model_config = app_model_config.from_model_config_dict(override_model_configs)
model_config = app_model_config.to_dict()
else:
model_config['configs'] = override_model_configs
else:
app_model_config = db.session.query(AppModelConfig).filter(
AppModelConfig.id == self.app_model_config_id).first()
model_config = app_model_config.to_dict()
model_config['model_id'] = self.model_id
model_config['provider'] = self.model_provider
return model_config
def summary_or_query(self):
if self.summary:
return self.summary
else:
first_message = self.first_message
if first_message:
return first_message.query
else:
return ''
def annotated(self):
return db.session.query(MessageAnnotation).filter(MessageAnnotation.conversation_id == self.id).count() > 0
def annotation(self):
return db.session.query(MessageAnnotation).filter(MessageAnnotation.conversation_id == self.id).first()
def message_count(self):
return db.session.query(Message).filter(Message.conversation_id == self.id).count()
def user_feedback_stats(self):
like = db.session.query(MessageFeedback) \
.filter(MessageFeedback.conversation_id == self.id,
MessageFeedback.from_source == 'user',
MessageFeedback.rating == 'like').count()
dislike = db.session.query(MessageFeedback) \
.filter(MessageFeedback.conversation_id == self.id,
MessageFeedback.from_source == 'user',
MessageFeedback.rating == 'dislike').count()
return {'like': like, 'dislike': dislike}
def admin_feedback_stats(self):
like = db.session.query(MessageFeedback) \
.filter(MessageFeedback.conversation_id == self.id,
MessageFeedback.from_source == 'admin',
MessageFeedback.rating == 'like').count()
dislike = db.session.query(MessageFeedback) \
.filter(MessageFeedback.conversation_id == self.id,
MessageFeedback.from_source == 'admin',
MessageFeedback.rating == 'dislike').count()
return {'like': like, 'dislike': dislike}
def first_message(self):
return db.session.query(Message).filter(Message.conversation_id == self.id).first()
def app(self):
return db.session.query(App).filter(App.id == self.app_id).first()
def from_end_user_session_id(self):
if self.from_end_user_id:
end_user = db.session.query(EndUser).filter(EndUser.id == self.from_end_user_id).first()
if end_user:
return end_user.session_id
return None
def in_debug_mode(self):
return self.override_model_configs is not None
def _get_conversation(app_id, conversation_id, mode):
# get app info
app = _get_app(app_id, mode)
conversation = db.session.query(Conversation) \
.filter(Conversation.id == conversation_id, Conversation.app_id == app.id).first()
if not conversation:
raise NotFound("Conversation Not Exists.")
if not conversation.read_at:
conversation.read_at = datetime.utcnow()
conversation.read_account_id = current_user.id
db.session.commit()
return conversation | null |
17,004 | import json
import logging
from datetime import datetime
from flask_login import current_user
from flask_restful import Resource, abort, inputs, marshal_with, reqparse
from werkzeug.exceptions import Forbidden
from constants.languages import demo_model_templates, languages
from constants.model_template import model_templates
from controllers.console import api
from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from events.app_event import app_was_created, app_was_deleted
from extensions.ext_database import db
from fields.app_fields import (
app_detail_fields,
app_detail_fields_with_site,
app_pagination_fields,
template_list_fields,
)
from libs.login import login_required
from models.model import App, AppModelConfig, Site
from services.app_model_config_service import AppModelConfigService
from core.tools.utils.configuration import ToolParameterConfigurationManager
from core.tools.tool_manager import ToolManager
from core.entities.application_entities import AgentToolEntity
class AppNotFoundError(BaseHTTPException):
error_code = 'app_not_found'
description = "App not found."
code = 404
db = SQLAlchemy()
class App(db.Model):
__tablename__ = 'apps'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_pkey'),
db.Index('app_tenant_id_idx', 'tenant_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
mode = db.Column(db.String(255), nullable=False)
icon = db.Column(db.String(255))
icon_background = db.Column(db.String(255))
app_model_config_id = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False, server_default=db.text("'normal'::character varying"))
enable_site = db.Column(db.Boolean, nullable=False)
enable_api = db.Column(db.Boolean, nullable=False)
api_rpm = db.Column(db.Integer, nullable=False)
api_rph = db.Column(db.Integer, nullable=False)
is_demo = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_universal = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def site(self):
site = db.session.query(Site).filter(Site.app_id == self.id).first()
return site
def app_model_config(self):
app_model_config = db.session.query(AppModelConfig).filter(
AppModelConfig.id == self.app_model_config_id).first()
return app_model_config
def api_base_url(self):
return (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
else request.host_url.rstrip('/')) + '/v1'
def tenant(self):
tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
return tenant
def is_agent(self) -> bool:
app_model_config = self.app_model_config
if not app_model_config:
return False
if not app_model_config.agent_mode:
return False
if self.app_model_config.agent_mode_dict.get('enabled', False) \
and self.app_model_config.agent_mode_dict.get('strategy', '') in ['function_call', 'react']:
return True
return False
def deleted_tools(self) -> list:
# get agent mode tools
app_model_config = self.app_model_config
if not app_model_config:
return []
if not app_model_config.agent_mode:
return []
agent_mode = app_model_config.agent_mode_dict
tools = agent_mode.get('tools', [])
provider_ids = []
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api':
# check if provider id is a uuid string, if not, skip
try:
uuid.UUID(provider_id)
except Exception:
continue
provider_ids.append(provider_id)
if not provider_ids:
return []
api_providers = db.session.execute(
text('SELECT id FROM tool_api_providers WHERE id IN :provider_ids'),
{'provider_ids': tuple(provider_ids)}
).fetchall()
deleted_tools = []
current_api_provider_ids = [str(api_provider.id) for api_provider in api_providers]
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api' and provider_id not in current_api_provider_ids:
deleted_tools.append(tool['tool_name'])
return deleted_tools
def _get_app(app_id, tenant_id):
app = db.session.query(App).filter(App.id == app_id, App.tenant_id == tenant_id).first()
if not app:
raise AppNotFoundError
return app | null |
17,005 | from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, NotFound
from constants.languages import supported_language
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from extensions.ext_database import db
from fields.app_fields import app_site_fields
from libs.login import login_required
from models.model import Site
def supported_language(lang):
def parse_app_site_args():
parser = reqparse.RequestParser()
parser.add_argument('title', type=str, required=False, location='json')
parser.add_argument('icon', type=str, required=False, location='json')
parser.add_argument('icon_background', type=str, required=False, location='json')
parser.add_argument('description', type=str, required=False, location='json')
parser.add_argument('default_language', type=supported_language, required=False, location='json')
parser.add_argument('customize_domain', type=str, required=False, location='json')
parser.add_argument('copyright', type=str, required=False, location='json')
parser.add_argument('privacy_policy', type=str, required=False, location='json')
parser.add_argument('customize_token_strategy', type=str, choices=['must', 'allow', 'not_allow'],
required=False,
location='json')
parser.add_argument('prompt_public', type=bool, required=False, location='json')
return parser.parse_args() | null |
17,006 | import json
import logging
from collections.abc import Generator
from typing import Union
from flask import Response, stream_with_context
from flask_login import current_user
from flask_restful import Resource, fields, marshal_with, reqparse
from flask_restful.inputs import int_range
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import (
AppMoreLikeThisDisabledError,
CompletionRequestError,
ProviderModelCurrentlyNotSupportError,
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from core.entities.application_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from extensions.ext_database import db
from fields.conversation_fields import annotation_fields, message_detail_fields
from libs.helper import uuid_value
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from libs.login import login_required
from models.model import Conversation, Message, MessageAnnotation, MessageFeedback
from services.annotation_service import AppAnnotationService
from services.completion_service import CompletionService
from services.errors.app import MoreLikeThisDisabledError
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError
from services.message_service import MessageService
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
yield from response
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream') | null |
17,007 | import json
import logging
from collections.abc import Generator
from typing import Union
import flask_login
from flask import Response, stream_with_context
from flask_restful import Resource, reqparse
from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import (
AppUnavailableError,
CompletionRequestError,
ConversationCompletedError,
ProviderModelCurrentlyNotSupportError,
ProviderNotInitializeError,
ProviderQuotaExceededError,
)
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.application_queue_manager import ApplicationQueueManager
from core.entities.application_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError
from libs.helper import uuid_value
from libs.login import login_required
from services.completion_service import CompletionService
def compact_response(response: Union[dict, Generator]) -> Response:
if isinstance(response, dict):
return Response(response=json.dumps(response), status=200, mimetype='application/json')
else:
def generate() -> Generator:
yield from response
return Response(stream_with_context(generate()), status=200,
mimetype='text/event-stream') | null |
17,008 | import flask_restful
from flask_login import current_user
from flask_restful import Resource, fields, marshal_with
from werkzeug.exceptions import Forbidden
from extensions.ext_database import db
from libs.helper import TimestampField
from libs.login import login_required
from models.dataset import Dataset
from models.model import ApiToken, App
from . import api
from .setup import setup_required
from .wraps import account_initialization_required
def _get_resource(resource_id, tenant_id, resource_model):
resource = resource_model.query.filter_by(
id=resource_id, tenant_id=tenant_id
).first()
if resource is None:
flask_restful.abort(
404, message=f"{resource_model.__name__} not found.")
return resource | null |
17,009 | import flask_restful
from flask import current_app, request
from flask_login import current_user
from flask_restful import Resource, marshal, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, NotFound
import services
from controllers.console import api
from controllers.console.apikey import api_key_fields, api_key_list
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import DatasetNameDuplicateError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db
from fields.app_fields import related_app_list
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
from fields.document_fields import document_status_fields
from libs.login import login_required
from models.dataset import Dataset, Document, DocumentSegment
from models.model import ApiToken, UploadFile
from services.dataset_service import DatasetService, DocumentService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 40:
raise ValueError('Name must be between 1 to 40 characters.')
return name | null |
17,010 | import flask_restful
from flask import current_app, request
from flask_login import current_user
from flask_restful import Resource, marshal, marshal_with, reqparse
from werkzeug.exceptions import Forbidden, NotFound
import services
from controllers.console import api
from controllers.console.apikey import api_key_fields, api_key_list
from controllers.console.app.error import ProviderNotInitializeError
from controllers.console.datasets.error import DatasetNameDuplicateError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType
from core.provider_manager import ProviderManager
from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db
from fields.app_fields import related_app_list
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
from fields.document_fields import document_status_fields
from libs.login import login_required
from models.dataset import Dataset, Document, DocumentSegment
from models.model import ApiToken, UploadFile
from services.dataset_service import DatasetService, DocumentService
def _validate_description_length(description):
if len(description) > 400:
raise ValueError('Description cannot exceed 400 characters.')
return description | null |
17,011 | from core.generator.llm_generator import LLMGenerator
from events.message_event import message_was_created
from extensions.ext_database import db
class LLMGenerator:
def generate_conversation_name(cls, tenant_id: str, query):
prompt = CONVERSATION_TITLE_PROMPT
if len(query) > 2000:
query = query[:300] + "...[TRUNCATED]..." + query[-300:]
query = query.replace("\n", " ")
prompt += query + "\n"
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
prompts = [UserPromptMessage(content=prompt)]
response = model_instance.invoke_llm(
prompt_messages=prompts,
model_parameters={
"max_tokens": 100,
"temperature": 1
},
stream=False
)
answer = response.message.content
result_dict = json.loads(answer)
answer = result_dict['Your Output']
name = answer.strip()
if len(name) > 75:
name = name[:75] + '...'
return name
def generate_suggested_questions_after_answer(cls, tenant_id: str, histories: str):
output_parser = SuggestedQuestionsAfterAnswerOutputParser()
format_instructions = output_parser.get_format_instructions()
prompt_template = PromptTemplateParser(
template="{{histories}}\n{{format_instructions}}\nquestions:\n"
)
prompt = prompt_template.format({
"histories": histories,
"format_instructions": format_instructions
})
try:
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
except InvokeAuthorizationError:
return []
prompt_messages = [UserPromptMessage(content=prompt)]
try:
response = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters={
"max_tokens": 256,
"temperature": 0
},
stream=False
)
questions = output_parser.parse(response.message.content)
except InvokeError:
questions = []
except Exception as e:
logging.exception(e)
questions = []
return questions
def generate_rule_config(cls, tenant_id: str, audiences: str, hoping_to_solve: str) -> dict:
output_parser = RuleConfigGeneratorOutputParser()
prompt_template = PromptTemplateParser(
template=output_parser.get_format_instructions()
)
prompt = prompt_template.format(
inputs={
"audiences": audiences,
"hoping_to_solve": hoping_to_solve,
"variable": "{{variable}}",
"lanA": "{{lanA}}",
"lanB": "{{lanB}}",
"topic": "{{topic}}"
},
remove_template_variables=False
)
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
prompt_messages = [UserPromptMessage(content=prompt)]
try:
response = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters={
"max_tokens": 512,
"temperature": 0
},
stream=False
)
rule_config = output_parser.parse(response.message.content)
except InvokeError as e:
raise e
except OutputParserException:
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
except Exception as e:
logging.exception(e)
rule_config = {
"prompt": "",
"variables": [],
"opening_statement": ""
}
return rule_config
def generate_qa_document(cls, tenant_id: str, query, document_language: str):
prompt = GENERATOR_QA_PROMPT.format(language=document_language)
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
prompt_messages = [
SystemPromptMessage(content=prompt),
UserPromptMessage(content=query)
]
response = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters={
"max_tokens": 2000
},
stream=False
)
answer = response.message.content
return answer.strip()
db = SQLAlchemy()
def handle(sender, **kwargs):
message = sender
conversation = kwargs.get('conversation')
is_first_message = kwargs.get('is_first_message')
extras = kwargs.get('extras', {})
auto_generate_conversation_name = True
if extras:
auto_generate_conversation_name = extras.get('auto_generate_conversation_name', True)
if auto_generate_conversation_name and is_first_message:
if conversation.mode == 'chat':
app_model = conversation.app
if not app_model:
return
# generate conversation name
try:
name = LLMGenerator.generate_conversation_name(app_model.tenant_id, message.query)
conversation.name = name
except:
pass
db.session.commit() | null |
17,012 | from events.app_event import app_was_deleted
from extensions.ext_database import db
from models.model import InstalledApp
db = SQLAlchemy()
class InstalledApp(db.Model):
__tablename__ = 'installed_apps'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='installed_app_pkey'),
db.Index('installed_app_tenant_id_idx', 'tenant_id'),
db.Index('installed_app_app_id_idx', 'app_id'),
db.UniqueConstraint('tenant_id', 'app_id', name='unique_tenant_app')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
app_id = db.Column(UUID, nullable=False)
app_owner_tenant_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False, default=0)
is_pinned = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
last_used_at = db.Column(db.DateTime, nullable=True)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def app(self):
app = db.session.query(App).filter(App.id == self.app_id).first()
return app
def tenant(self):
tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
return tenant
def is_agent(self) -> bool:
app = self.app
if not app:
return False
return app.is_agent
def handle(sender, **kwargs):
app = sender
installed_apps = db.session.query(InstalledApp).filter(InstalledApp.app_id == app.id).all()
for installed_app in installed_apps:
db.session.delete(installed_app)
db.session.commit() | null |
17,013 | from core.entities.application_entities import ApplicationGenerateEntity
from core.entities.provider_entities import QuotaUnit
from events.message_event import message_was_created
from extensions.ext_database import db
from models.provider import Provider, ProviderType
class ApplicationGenerateEntity(BaseModel):
"""
Application Generate Entity.
"""
task_id: str
tenant_id: str
app_id: str
app_model_config_id: str
# for save
app_model_config_dict: dict
app_model_config_override: bool
# Converted from app_model_config to Entity object, or directly covered by external input
app_orchestration_config_entity: AppOrchestrationConfigEntity
conversation_id: Optional[str] = None
inputs: dict[str, str]
query: Optional[str] = None
files: list[FileObj] = []
user_id: str
# extras
stream: bool
invoke_from: InvokeFrom
# extra parameters, like: auto_generate_conversation_name
extras: dict[str, Any] = {}
class QuotaUnit(Enum):
TIMES = 'times'
TOKENS = 'tokens'
CREDITS = 'credits'
db = SQLAlchemy()
class ProviderType(Enum):
CUSTOM = 'custom'
SYSTEM = 'system'
def value_of(value):
for member in ProviderType:
if member.value == value:
return member
raise ValueError(f"No matching enum found for value '{value}'")
class Provider(db.Model):
"""
Provider model representing the API providers and their configurations.
"""
__tablename__ = 'providers'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='provider_pkey'),
db.Index('provider_tenant_id_provider_idx', 'tenant_id', 'provider_name'),
db.UniqueConstraint('tenant_id', 'provider_name', 'provider_type', 'quota_type', name='unique_provider_name_type_quota')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
provider_name = db.Column(db.String(40), nullable=False)
provider_type = db.Column(db.String(40), nullable=False, server_default=db.text("'custom'::character varying"))
encrypted_config = db.Column(db.Text, nullable=True)
is_valid = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
last_used = db.Column(db.DateTime, nullable=True)
quota_type = db.Column(db.String(40), nullable=True, server_default=db.text("''::character varying"))
quota_limit = db.Column(db.BigInteger, nullable=True)
quota_used = db.Column(db.BigInteger, default=0)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def __repr__(self):
return f"<Provider(id={self.id}, tenant_id={self.tenant_id}, provider_name='{self.provider_name}', provider_type='{self.provider_type}')>"
def token_is_set(self):
"""
Returns True if the encrypted_config is not None, indicating that the token is set.
"""
return self.encrypted_config is not None
def is_enabled(self):
"""
Returns True if the provider is enabled.
"""
if self.provider_type == ProviderType.SYSTEM.value:
return self.is_valid
else:
return self.is_valid and self.token_is_set
def handle(sender, **kwargs):
message = sender
application_generate_entity: ApplicationGenerateEntity = kwargs.get('application_generate_entity')
model_config = application_generate_entity.app_orchestration_config_entity.model_config
provider_model_bundle = model_config.provider_model_bundle
provider_configuration = provider_model_bundle.configuration
if provider_configuration.using_provider_type != ProviderType.SYSTEM:
return
system_configuration = provider_configuration.system_configuration
quota_unit = None
for quota_configuration in system_configuration.quota_configurations:
if quota_configuration.quota_type == system_configuration.current_quota_type:
quota_unit = quota_configuration.quota_unit
if quota_configuration.quota_limit == -1:
return
break
used_quota = None
if quota_unit:
if quota_unit == QuotaUnit.TOKENS:
used_quota = message.message_tokens + message.answer_tokens
elif quota_unit == QuotaUnit.CREDITS:
used_quota = 1
if 'gpt-4' in model_config.model:
used_quota = 20
else:
used_quota = 1
if used_quota is not None:
db.session.query(Provider).filter(
Provider.tenant_id == application_generate_entity.tenant_id,
Provider.provider_name == model_config.provider,
Provider.provider_type == ProviderType.SYSTEM.value,
Provider.quota_type == system_configuration.current_quota_type.value,
Provider.quota_limit > Provider.quota_used
).update({'quota_used': Provider.quota_used + used_quota})
db.session.commit() | null |
17,014 | import datetime
import logging
import time
import click
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedException, IndexingRunner
from events.event_handlers.document_index_event import document_index_created
from extensions.ext_database import db
from models.dataset import Document
class IndexingRunner:
def __init__(self):
self.storage = storage
self.model_manager = ModelManager()
def run(self, dataset_documents: list[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except ObjectDeletedError:
logging.warning('Document deleted, document id: {}'.format(dataset_document.id))
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
for document_segment in document_segments:
db.session.delete(document_segment)
db.session.commit()
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def indexing_estimate(self, tenant_id: str, extract_settings: list[ExtractSetting], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None,
indexing_technique: str = 'economy') -> dict:
"""
Estimate the indexing for the document.
"""
# check document limit
features = FeatureService.get_features(tenant_id)
if features.billing.enabled:
count = len(extract_settings)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
embedding_model_instance = None
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
if dataset.indexing_technique == 'high_quality' or indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
else:
if indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
tokens = 0
preview_texts = []
total_segments = 0
total_price = 0
currency = 'USD'
index_type = doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
all_text_docs = []
for extract_setting in extract_settings:
# extract
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
all_text_docs.extend(text_docs)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule, embedding_model_instance)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
texts=[self.filter_string(document.page_content)]
)
if doc_form and doc_form == 'qa_model':
model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM
)
model_type_instance = model_instance.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0],
doc_language)
document_qa_list = self.format_split_text(response)
price_info = model_type_instance.get_price(
model=model_instance.model,
credentials=model_instance.credentials,
price_type=PriceType.INPUT,
tokens=total_segments * 2000,
)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(price_info.total_amount),
"currency": price_info.currency,
"qa_preview": document_qa_list,
"preview": preview_texts
}
if embedding_model_instance:
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_instance.model_type_instance)
embedding_price_info = embedding_model_type_instance.get_price(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
total_price = '{:f}'.format(embedding_price_info.total_amount)
currency = embedding_price_info.currency
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": total_price,
"currency": currency,
"preview": preview_texts
}
def _extract(self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict) \
-> list[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
if file_detail:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
elif dataset_document.data_source_type == 'notion_import':
if (not data_source_info or 'notion_workspace_id' not in data_source_info
or 'notion_page_id' not in data_source_info):
raise ValueError("no notion import info found")
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"document": dataset_document,
"tenant_id": dataset_document.tenant_id
},
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(list[Document], text_docs)
for text_doc in text_docs:
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
# Unicode U+FFFE
text = re.sub('\uFFFE', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule,
embedding_model_instance: Optional[ModelInstance]) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
if 'chunk_overlap' in segmentation and segmentation['chunk_overlap']:
chunk_overlap = segmentation['chunk_overlap']
else:
chunk_overlap = 0
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=chunk_overlap,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
else:
# Automatic segmentation
character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['chunk_overlap'],
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
return character_splitter
def _step_split(self, text_docs: list[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> list[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
if document_node.page_content.strip():
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
# delete Spliter character
page_content = document_node.page_content
if page_content.startswith(".") or page_content.startswith("。"):
page_content = page_content[1:]
else:
page_content = page_content
document_node.page_content = page_content
if document_node.page_content:
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
matches = re.findall(regex, text, re.UNICODE)
return [
{
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
}
for q, a in matches if q and a
]
def _load(self, index_processor: BaseIndexProcessor, dataset: Dataset,
dataset_document: DatasetDocument, documents: list[Document]) -> None:
"""
insert index and update document/segment status to completed
"""
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
[document.page_content]
)
for document in chunk_documents
)
# load index
index_processor.load(dataset, chunk_documents)
db.session.add(dataset)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
document = DatasetDocument.query.filter_by(id=document_id).first()
if not document:
raise DocumentIsDeletedPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: list[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
def _transform(self, index_processor: BaseIndexProcessor, dataset: Dataset,
text_docs: list[Document], doc_language: str, process_rule: dict) -> list[Document]:
# get embedding model instance
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=dataset.tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
documents = index_processor.transform(text_docs, embedding_model_instance=embedding_model_instance,
process_rule=process_rule, tenant_id=dataset.tenant_id,
doc_language=doc_language)
return documents
def _load_segments(self, dataset, dataset_document, documents):
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
pass
class DocumentIsPausedException(Exception):
pass
db = SQLAlchemy()
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
def handle(sender, **kwargs):
dataset_id = sender
document_ids = kwargs.get('document_ids', None)
documents = []
start_at = time.perf_counter()
for document_id in document_ids:
logging.info(click.style('Start process document: {}'.format(document_id), fg='green'))
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
if not document:
raise NotFound('Document not found')
document.indexing_status = 'parsing'
document.processing_started_at = datetime.datetime.utcnow()
documents.append(document)
db.session.add(document)
db.session.commit()
try:
indexing_runner = IndexingRunner()
indexing_runner.run(documents)
end_at = time.perf_counter()
logging.info(click.style('Processed dataset: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
except DocumentIsPausedException as ex:
logging.info(click.style(str(ex), fg='yellow'))
except Exception:
pass | null |
17,015 | from events.app_event import app_was_created
from extensions.ext_database import db
from models.model import InstalledApp
db = SQLAlchemy()
class InstalledApp(db.Model):
__tablename__ = 'installed_apps'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='installed_app_pkey'),
db.Index('installed_app_tenant_id_idx', 'tenant_id'),
db.Index('installed_app_app_id_idx', 'app_id'),
db.UniqueConstraint('tenant_id', 'app_id', name='unique_tenant_app')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
app_id = db.Column(UUID, nullable=False)
app_owner_tenant_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False, default=0)
is_pinned = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
last_used_at = db.Column(db.DateTime, nullable=True)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def app(self):
app = db.session.query(App).filter(App.id == self.app_id).first()
return app
def tenant(self):
tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
return tenant
def is_agent(self) -> bool:
app = self.app
if not app:
return False
return app.is_agent
The provided code snippet includes necessary dependencies for implementing the `handle` function. Write a Python function `def handle(sender, **kwargs)` to solve the following problem:
Create an installed app when an app is created.
Here is the function:
def handle(sender, **kwargs):
"""Create an installed app when an app is created."""
app = sender
installed_app = InstalledApp(
tenant_id=app.tenant_id,
app_id=app.id,
app_owner_tenant_id=app.tenant_id
)
db.session.add(installed_app)
db.session.commit() | Create an installed app when an app is created. |
17,016 | from events.app_event import app_model_config_was_updated
from extensions.ext_database import db
from models.dataset import AppDatasetJoin
from models.model import AppModelConfig
def get_dataset_ids_from_model_config(app_model_config: AppModelConfig) -> set:
dataset_ids = set()
if not app_model_config:
return dataset_ids
agent_mode = app_model_config.agent_mode_dict
tools = agent_mode.get('tools', []) or []
for tool in tools:
if len(list(tool.keys())) != 1:
continue
tool_type = list(tool.keys())[0]
tool_config = list(tool.values())[0]
if tool_type == "dataset":
dataset_ids.add(tool_config.get("id"))
# get dataset from dataset_configs
dataset_configs = app_model_config.dataset_configs_dict
datasets = dataset_configs.get('datasets', {}) or {}
for dataset in datasets.get('datasets', []) or []:
keys = list(dataset.keys())
if len(keys) == 1 and keys[0] == 'dataset':
if dataset['dataset'].get('id'):
dataset_ids.add(dataset['dataset'].get('id'))
return dataset_ids
db = SQLAlchemy()
class AppDatasetJoin(db.Model):
__tablename__ = 'app_dataset_joins'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_dataset_join_pkey'),
db.Index('app_dataset_join_app_dataset_idx', 'dataset_id', 'app_id'),
)
id = db.Column(UUID, primary_key=True, nullable=False, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp())
def app(self):
return App.query.get(self.app_id)
def handle(sender, **kwargs):
app = sender
app_model_config = kwargs.get('app_model_config')
dataset_ids = get_dataset_ids_from_model_config(app_model_config)
app_dataset_joins = db.session.query(AppDatasetJoin).filter(
AppDatasetJoin.app_id == app.id
).all()
removed_dataset_ids = []
if not app_dataset_joins:
added_dataset_ids = dataset_ids
else:
old_dataset_ids = set()
for app_dataset_join in app_dataset_joins:
old_dataset_ids.add(app_dataset_join.dataset_id)
added_dataset_ids = dataset_ids - old_dataset_ids
removed_dataset_ids = old_dataset_ids - dataset_ids
if removed_dataset_ids:
for dataset_id in removed_dataset_ids:
db.session.query(AppDatasetJoin).filter(
AppDatasetJoin.app_id == app.id,
AppDatasetJoin.dataset_id == dataset_id
).delete()
if added_dataset_ids:
for dataset_id in added_dataset_ids:
app_dataset_join = AppDatasetJoin(
app_id=app.id,
dataset_id=dataset_id
)
db.session.add(app_dataset_join)
db.session.commit() | null |
17,017 | from events.document_event import document_was_deleted
from tasks.clean_document_task import clean_document_task
def clean_document_task(document_id: str, dataset_id: str, doc_form: str):
"""
Clean document when document deleted.
:param document_id: document id
:param dataset_id: dataset id
:param doc_form: doc_form
Usage: clean_document_task.delay(document_id, dataset_id)
"""
logging.info(click.style('Start clean document when document deleted: {}'.format(document_id), fg='green'))
start_at = time.perf_counter()
try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
raise Exception('Document has no dataset')
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
# check segment is exist
if segments:
index_node_ids = [segment.index_node_id for segment in segments]
index_processor = IndexProcessorFactory(doc_form).init_index_processor()
index_processor.clean(dataset, index_node_ids)
for segment in segments:
db.session.delete(segment)
db.session.commit()
end_at = time.perf_counter()
logging.info(
click.style('Cleaned document when document deleted: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
except Exception:
logging.exception("Cleaned document when document deleted failed")
def handle(sender, **kwargs):
document_id = sender
dataset_id = kwargs.get('dataset_id')
doc_form = kwargs.get('doc_form')
clean_document_task.delay(document_id, dataset_id, doc_form) | null |
17,018 | from datetime import datetime
from core.entities.application_entities import ApplicationGenerateEntity
from events.message_event import message_was_created
from extensions.ext_database import db
from models.provider import Provider
class ApplicationGenerateEntity(BaseModel):
"""
Application Generate Entity.
"""
task_id: str
tenant_id: str
app_id: str
app_model_config_id: str
# for save
app_model_config_dict: dict
app_model_config_override: bool
# Converted from app_model_config to Entity object, or directly covered by external input
app_orchestration_config_entity: AppOrchestrationConfigEntity
conversation_id: Optional[str] = None
inputs: dict[str, str]
query: Optional[str] = None
files: list[FileObj] = []
user_id: str
# extras
stream: bool
invoke_from: InvokeFrom
# extra parameters, like: auto_generate_conversation_name
extras: dict[str, Any] = {}
db = SQLAlchemy()
class Provider(db.Model):
"""
Provider model representing the API providers and their configurations.
"""
__tablename__ = 'providers'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='provider_pkey'),
db.Index('provider_tenant_id_provider_idx', 'tenant_id', 'provider_name'),
db.UniqueConstraint('tenant_id', 'provider_name', 'provider_type', 'quota_type', name='unique_provider_name_type_quota')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
provider_name = db.Column(db.String(40), nullable=False)
provider_type = db.Column(db.String(40), nullable=False, server_default=db.text("'custom'::character varying"))
encrypted_config = db.Column(db.Text, nullable=True)
is_valid = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
last_used = db.Column(db.DateTime, nullable=True)
quota_type = db.Column(db.String(40), nullable=True, server_default=db.text("''::character varying"))
quota_limit = db.Column(db.BigInteger, nullable=True)
quota_used = db.Column(db.BigInteger, default=0)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def __repr__(self):
return f"<Provider(id={self.id}, tenant_id={self.tenant_id}, provider_name='{self.provider_name}', provider_type='{self.provider_type}')>"
def token_is_set(self):
"""
Returns True if the encrypted_config is not None, indicating that the token is set.
"""
return self.encrypted_config is not None
def is_enabled(self):
"""
Returns True if the provider is enabled.
"""
if self.provider_type == ProviderType.SYSTEM.value:
return self.is_valid
else:
return self.is_valid and self.token_is_set
def handle(sender, **kwargs):
message = sender
application_generate_entity: ApplicationGenerateEntity = kwargs.get('application_generate_entity')
db.session.query(Provider).filter(
Provider.tenant_id == application_generate_entity.tenant_id,
Provider.provider_name == application_generate_entity.app_orchestration_config_entity.model_config.provider
).update({'last_used': datetime.utcnow()})
db.session.commit() | null |
17,019 | from events.dataset_event import dataset_was_deleted
from tasks.clean_dataset_task import clean_dataset_task
def clean_dataset_task(dataset_id: str, tenant_id: str, indexing_technique: str,
index_struct: str, collection_binding_id: str, doc_form: str):
def handle(sender, **kwargs):
dataset = sender
clean_dataset_task.delay(dataset.id, dataset.tenant_id, dataset.indexing_technique,
dataset.index_struct, dataset.collection_binding_id, dataset.doc_form) | null |
17,020 | import os
import dotenv
def get_env(key):
return os.environ.get(key, DEFAULTS.get(key))
def get_bool_env(key):
value = get_env(key)
return value.lower() == 'true' if value is not None else False | null |
17,021 | import os
import dotenv
def get_env(key):
return os.environ.get(key, DEFAULTS.get(key))
def get_cors_allow_origins(env, default):
cors_allow_origins = []
if get_env(env):
for origin in get_env(env).split(','):
cors_allow_origins.append(origin)
else:
cors_allow_origins = [default]
return cors_allow_origins | null |
17,022 | import datetime
import logging
import time
import click
from celery import shared_task
from flask import current_app
from core.indexing_runner import DocumentIsPausedException, IndexingRunner
from extensions.ext_database import db
from models.dataset import Dataset, Document
from services.feature_service import FeatureService
class IndexingRunner:
def __init__(self):
self.storage = storage
self.model_manager = ModelManager()
def run(self, dataset_documents: list[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except ObjectDeletedError:
logging.warning('Document deleted, document id: {}'.format(dataset_document.id))
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
for document_segment in document_segments:
db.session.delete(document_segment)
db.session.commit()
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def indexing_estimate(self, tenant_id: str, extract_settings: list[ExtractSetting], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None,
indexing_technique: str = 'economy') -> dict:
"""
Estimate the indexing for the document.
"""
# check document limit
features = FeatureService.get_features(tenant_id)
if features.billing.enabled:
count = len(extract_settings)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
embedding_model_instance = None
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
if dataset.indexing_technique == 'high_quality' or indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
else:
if indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
tokens = 0
preview_texts = []
total_segments = 0
total_price = 0
currency = 'USD'
index_type = doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
all_text_docs = []
for extract_setting in extract_settings:
# extract
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
all_text_docs.extend(text_docs)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule, embedding_model_instance)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
texts=[self.filter_string(document.page_content)]
)
if doc_form and doc_form == 'qa_model':
model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM
)
model_type_instance = model_instance.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0],
doc_language)
document_qa_list = self.format_split_text(response)
price_info = model_type_instance.get_price(
model=model_instance.model,
credentials=model_instance.credentials,
price_type=PriceType.INPUT,
tokens=total_segments * 2000,
)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(price_info.total_amount),
"currency": price_info.currency,
"qa_preview": document_qa_list,
"preview": preview_texts
}
if embedding_model_instance:
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_instance.model_type_instance)
embedding_price_info = embedding_model_type_instance.get_price(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
total_price = '{:f}'.format(embedding_price_info.total_amount)
currency = embedding_price_info.currency
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": total_price,
"currency": currency,
"preview": preview_texts
}
def _extract(self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict) \
-> list[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
if file_detail:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
elif dataset_document.data_source_type == 'notion_import':
if (not data_source_info or 'notion_workspace_id' not in data_source_info
or 'notion_page_id' not in data_source_info):
raise ValueError("no notion import info found")
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"document": dataset_document,
"tenant_id": dataset_document.tenant_id
},
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(list[Document], text_docs)
for text_doc in text_docs:
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
# Unicode U+FFFE
text = re.sub('\uFFFE', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule,
embedding_model_instance: Optional[ModelInstance]) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
if 'chunk_overlap' in segmentation and segmentation['chunk_overlap']:
chunk_overlap = segmentation['chunk_overlap']
else:
chunk_overlap = 0
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=chunk_overlap,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
else:
# Automatic segmentation
character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['chunk_overlap'],
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
return character_splitter
def _step_split(self, text_docs: list[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> list[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
if document_node.page_content.strip():
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
# delete Spliter character
page_content = document_node.page_content
if page_content.startswith(".") or page_content.startswith("。"):
page_content = page_content[1:]
else:
page_content = page_content
document_node.page_content = page_content
if document_node.page_content:
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
matches = re.findall(regex, text, re.UNICODE)
return [
{
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
}
for q, a in matches if q and a
]
def _load(self, index_processor: BaseIndexProcessor, dataset: Dataset,
dataset_document: DatasetDocument, documents: list[Document]) -> None:
"""
insert index and update document/segment status to completed
"""
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
[document.page_content]
)
for document in chunk_documents
)
# load index
index_processor.load(dataset, chunk_documents)
db.session.add(dataset)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
document = DatasetDocument.query.filter_by(id=document_id).first()
if not document:
raise DocumentIsDeletedPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: list[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
def _transform(self, index_processor: BaseIndexProcessor, dataset: Dataset,
text_docs: list[Document], doc_language: str, process_rule: dict) -> list[Document]:
# get embedding model instance
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=dataset.tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
documents = index_processor.transform(text_docs, embedding_model_instance=embedding_model_instance,
process_rule=process_rule, tenant_id=dataset.tenant_id,
doc_language=doc_language)
return documents
def _load_segments(self, dataset, dataset_document, documents):
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
pass
class DocumentIsPausedException(Exception):
pass
db = SQLAlchemy()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class FeatureService:
def get_features(cls, tenant_id: str) -> FeatureModel:
features = FeatureModel()
cls._fulfill_params_from_env(features)
if current_app.config['BILLING_ENABLED']:
cls._fulfill_params_from_billing_api(features, tenant_id)
return features
def _fulfill_params_from_env(cls, features: FeatureModel):
features.can_replace_logo = current_app.config['CAN_REPLACE_LOGO']
def _fulfill_params_from_billing_api(cls, features: FeatureModel, tenant_id: str):
billing_info = BillingService.get_info(tenant_id)
features.billing.enabled = billing_info['enabled']
features.billing.subscription.plan = billing_info['subscription']['plan']
features.billing.subscription.interval = billing_info['subscription']['interval']
features.members.size = billing_info['members']['size']
features.members.limit = billing_info['members']['limit']
features.apps.size = billing_info['apps']['size']
features.apps.limit = billing_info['apps']['limit']
features.vector_space.size = billing_info['vector_space']['size']
features.vector_space.limit = billing_info['vector_space']['limit']
features.documents_upload_quota.size = billing_info['documents_upload_quota']['size']
features.documents_upload_quota.limit = billing_info['documents_upload_quota']['limit']
features.annotation_quota_limit.size = billing_info['annotation_quota_limit']['size']
features.annotation_quota_limit.limit = billing_info['annotation_quota_limit']['limit']
features.docs_processing = billing_info['docs_processing']
features.can_replace_logo = billing_info['can_replace_logo']
The provided code snippet includes necessary dependencies for implementing the `document_indexing_task` function. Write a Python function `def document_indexing_task(dataset_id: str, document_ids: list)` to solve the following problem:
Async process document :param dataset_id: :param document_ids: Usage: document_indexing_task.delay(dataset_id, document_id)
Here is the function:
def document_indexing_task(dataset_id: str, document_ids: list):
"""
Async process document
:param dataset_id:
:param document_ids:
Usage: document_indexing_task.delay(dataset_id, document_id)
"""
documents = []
start_at = time.perf_counter()
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
# check document limit
features = FeatureService.get_features(dataset.tenant_id)
try:
if features.billing.enabled:
vector_space = features.vector_space
count = len(document_ids)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
if 0 < vector_space.limit <= vector_space.size:
raise ValueError("Your total number of documents plus the number of uploads have over the limit of "
"your subscription.")
except Exception as e:
for document_id in document_ids:
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
if document:
document.indexing_status = 'error'
document.error = str(e)
document.stopped_at = datetime.datetime.utcnow()
db.session.add(document)
db.session.commit()
return
for document_id in document_ids:
logging.info(click.style('Start process document: {}'.format(document_id), fg='green'))
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
if document:
document.indexing_status = 'parsing'
document.processing_started_at = datetime.datetime.utcnow()
documents.append(document)
db.session.add(document)
db.session.commit()
try:
indexing_runner = IndexingRunner()
indexing_runner.run(documents)
end_at = time.perf_counter()
logging.info(click.style('Processed dataset: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
except DocumentIsPausedException as ex:
logging.info(click.style(str(ex), fg='yellow'))
except Exception:
pass | Async process document :param dataset_id: :param document_ids: Usage: document_indexing_task.delay(dataset_id, document_id) |
17,023 | import logging
import time
import click
from celery import shared_task
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.rag.models.document import Document
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment
from models.dataset import Document as DatasetDocument
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
db = SQLAlchemy()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `deal_dataset_vector_index_task` function. Write a Python function `def deal_dataset_vector_index_task(dataset_id: str, action: str)` to solve the following problem:
Async deal dataset from index :param dataset_id: dataset_id :param action: action Usage: deal_dataset_vector_index_task.delay(dataset_id, action)
Here is the function:
def deal_dataset_vector_index_task(dataset_id: str, action: str):
"""
Async deal dataset from index
:param dataset_id: dataset_id
:param action: action
Usage: deal_dataset_vector_index_task.delay(dataset_id, action)
"""
logging.info(click.style('Start deal dataset vector index: {}'.format(dataset_id), fg='green'))
start_at = time.perf_counter()
try:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise Exception('Dataset not found')
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
if action == "remove":
index_processor.clean(dataset, None, with_keywords=False)
elif action == "add":
dataset_documents = db.session.query(DatasetDocument).filter(
DatasetDocument.dataset_id == dataset_id,
DatasetDocument.indexing_status == 'completed',
DatasetDocument.enabled == True,
DatasetDocument.archived == False,
).all()
if dataset_documents:
documents = []
for dataset_document in dataset_documents:
# delete from vector index
segments = db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.enabled == True
) .order_by(DocumentSegment.position.asc()).all()
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_processor.load(dataset, documents, with_keywords=False)
end_at = time.perf_counter()
logging.info(
click.style('Deal dataset vector index: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
except Exception:
logging.exception("Deal dataset vector index failed") | Async deal dataset from index :param dataset_id: dataset_id :param action: action Usage: deal_dataset_vector_index_task.delay(dataset_id, action) |
17,024 | import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Document, DocumentSegment
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
redis_client = redis.Redis()
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `remove_document_from_index_task` function. Write a Python function `def remove_document_from_index_task(document_id: str)` to solve the following problem:
Async Remove document from index :param document_id: document id Usage: remove_document_from_index.delay(document_id)
Here is the function:
def remove_document_from_index_task(document_id: str):
"""
Async Remove document from index
:param document_id: document id
Usage: remove_document_from_index.delay(document_id)
"""
logging.info(click.style('Start remove document segments from index: {}'.format(document_id), fg='green'))
start_at = time.perf_counter()
document = db.session.query(Document).filter(Document.id == document_id).first()
if not document:
raise NotFound('Document not found')
if document.indexing_status != 'completed':
return
indexing_cache_key = 'document_{}_indexing'.format(document.id)
try:
dataset = document.dataset
if not dataset:
raise Exception('Document has no dataset')
index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document.id).all()
index_node_ids = [segment.index_node_id for segment in segments]
if index_node_ids:
try:
index_processor.clean(dataset, index_node_ids)
except Exception:
logging.exception(f"clean dataset {dataset.id} from index failed")
end_at = time.perf_counter()
logging.info(
click.style('Document removed from index: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
except Exception:
logging.exception("remove document from index failed")
if not document.archived:
document.enabled = True
db.session.commit()
finally:
redis_client.delete(indexing_cache_key) | Async Remove document from index :param document_id: document id Usage: remove_document_from_index.delay(document_id) |
17,025 | import logging
import time
import click
from celery import shared_task
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Dataset, Document
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
redis_client = redis.Redis()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
The provided code snippet includes necessary dependencies for implementing the `delete_segment_from_index_task` function. Write a Python function `def delete_segment_from_index_task(segment_id: str, index_node_id: str, dataset_id: str, document_id: str)` to solve the following problem:
Async Remove segment from index :param segment_id: :param index_node_id: :param dataset_id: :param document_id: Usage: delete_segment_from_index_task.delay(segment_id)
Here is the function:
def delete_segment_from_index_task(segment_id: str, index_node_id: str, dataset_id: str, document_id: str):
"""
Async Remove segment from index
:param segment_id:
:param index_node_id:
:param dataset_id:
:param document_id:
Usage: delete_segment_from_index_task.delay(segment_id)
"""
logging.info(click.style('Start delete segment from index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
indexing_cache_key = 'segment_{}_delete_indexing'.format(segment_id)
try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment_id), fg='cyan'))
return
dataset_document = db.session.query(Document).filter(Document.id == document_id).first()
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment_id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment_id), fg='cyan'))
return
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.clean(dataset, [index_node_id])
end_at = time.perf_counter()
logging.info(click.style('Segment deleted from index: {} latency: {}'.format(segment_id, end_at - start_at), fg='green'))
except Exception:
logging.exception("delete segment from index failed")
finally:
redis_client.delete(indexing_cache_key) | Async Remove segment from index :param segment_id: :param index_node_id: :param dataset_id: :param document_id: Usage: delete_segment_from_index_task.delay(segment_id) |
17,026 | import datetime
import logging
import time
from typing import Optional
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
db = SQLAlchemy()
redis_client = redis.Redis()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `create_segment_to_index_task` function. Write a Python function `def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]] = None)` to solve the following problem:
Async create segment to index :param segment_id: :param keywords: Usage: create_segment_to_index_task.delay(segment_id)
Here is the function:
def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]] = None):
"""
Async create segment to index
:param segment_id:
:param keywords:
Usage: create_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start create segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'waiting':
return
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
# update segment status to indexing
update_params = {
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
DocumentSegment.query.filter_by(id=segment.id).update(update_params)
db.session.commit()
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, [document])
# update segment to completed
update_params = {
DocumentSegment.status: "completed",
DocumentSegment.completed_at: datetime.datetime.utcnow()
}
DocumentSegment.query.filter_by(id=segment.id).update(update_params)
db.session.commit()
end_at = time.perf_counter()
logging.info(click.style('Segment created to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("create segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key) | Async create segment to index :param segment_id: :param keywords: Usage: create_segment_to_index_task.delay(segment_id) |
17,027 | import datetime
import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedException, IndexingRunner
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from models.dataset import Dataset, Document, DocumentSegment
class IndexingRunner:
def __init__(self):
self.storage = storage
self.model_manager = ModelManager()
def run(self, dataset_documents: list[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except ObjectDeletedError:
logging.warning('Document deleted, document id: {}'.format(dataset_document.id))
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
for document_segment in document_segments:
db.session.delete(document_segment)
db.session.commit()
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def indexing_estimate(self, tenant_id: str, extract_settings: list[ExtractSetting], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None,
indexing_technique: str = 'economy') -> dict:
"""
Estimate the indexing for the document.
"""
# check document limit
features = FeatureService.get_features(tenant_id)
if features.billing.enabled:
count = len(extract_settings)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
embedding_model_instance = None
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
if dataset.indexing_technique == 'high_quality' or indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
else:
if indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
tokens = 0
preview_texts = []
total_segments = 0
total_price = 0
currency = 'USD'
index_type = doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
all_text_docs = []
for extract_setting in extract_settings:
# extract
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
all_text_docs.extend(text_docs)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule, embedding_model_instance)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
texts=[self.filter_string(document.page_content)]
)
if doc_form and doc_form == 'qa_model':
model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM
)
model_type_instance = model_instance.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0],
doc_language)
document_qa_list = self.format_split_text(response)
price_info = model_type_instance.get_price(
model=model_instance.model,
credentials=model_instance.credentials,
price_type=PriceType.INPUT,
tokens=total_segments * 2000,
)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(price_info.total_amount),
"currency": price_info.currency,
"qa_preview": document_qa_list,
"preview": preview_texts
}
if embedding_model_instance:
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_instance.model_type_instance)
embedding_price_info = embedding_model_type_instance.get_price(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
total_price = '{:f}'.format(embedding_price_info.total_amount)
currency = embedding_price_info.currency
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": total_price,
"currency": currency,
"preview": preview_texts
}
def _extract(self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict) \
-> list[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
if file_detail:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
elif dataset_document.data_source_type == 'notion_import':
if (not data_source_info or 'notion_workspace_id' not in data_source_info
or 'notion_page_id' not in data_source_info):
raise ValueError("no notion import info found")
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"document": dataset_document,
"tenant_id": dataset_document.tenant_id
},
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(list[Document], text_docs)
for text_doc in text_docs:
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
# Unicode U+FFFE
text = re.sub('\uFFFE', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule,
embedding_model_instance: Optional[ModelInstance]) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
if 'chunk_overlap' in segmentation and segmentation['chunk_overlap']:
chunk_overlap = segmentation['chunk_overlap']
else:
chunk_overlap = 0
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=chunk_overlap,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
else:
# Automatic segmentation
character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['chunk_overlap'],
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
return character_splitter
def _step_split(self, text_docs: list[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> list[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
if document_node.page_content.strip():
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
# delete Spliter character
page_content = document_node.page_content
if page_content.startswith(".") or page_content.startswith("。"):
page_content = page_content[1:]
else:
page_content = page_content
document_node.page_content = page_content
if document_node.page_content:
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
matches = re.findall(regex, text, re.UNICODE)
return [
{
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
}
for q, a in matches if q and a
]
def _load(self, index_processor: BaseIndexProcessor, dataset: Dataset,
dataset_document: DatasetDocument, documents: list[Document]) -> None:
"""
insert index and update document/segment status to completed
"""
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
[document.page_content]
)
for document in chunk_documents
)
# load index
index_processor.load(dataset, chunk_documents)
db.session.add(dataset)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
document = DatasetDocument.query.filter_by(id=document_id).first()
if not document:
raise DocumentIsDeletedPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: list[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
def _transform(self, index_processor: BaseIndexProcessor, dataset: Dataset,
text_docs: list[Document], doc_language: str, process_rule: dict) -> list[Document]:
# get embedding model instance
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=dataset.tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
documents = index_processor.transform(text_docs, embedding_model_instance=embedding_model_instance,
process_rule=process_rule, tenant_id=dataset.tenant_id,
doc_language=doc_language)
return documents
def _load_segments(self, dataset, dataset_document, documents):
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
pass
class DocumentIsPausedException(Exception):
pass
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `document_indexing_update_task` function. Write a Python function `def document_indexing_update_task(dataset_id: str, document_id: str)` to solve the following problem:
Async update document :param dataset_id: :param document_id: Usage: document_indexing_update_task.delay(dataset_id, document_id)
Here is the function:
def document_indexing_update_task(dataset_id: str, document_id: str):
"""
Async update document
:param dataset_id:
:param document_id:
Usage: document_indexing_update_task.delay(dataset_id, document_id)
"""
logging.info(click.style('Start update document: {}'.format(document_id), fg='green'))
start_at = time.perf_counter()
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
if not document:
raise NotFound('Document not found')
document.indexing_status = 'parsing'
document.processing_started_at = datetime.datetime.utcnow()
db.session.commit()
# delete all document segment and index
try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
raise Exception('Dataset not found')
index_type = document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
index_node_ids = [segment.index_node_id for segment in segments]
# delete from vector index
index_processor.clean(dataset, index_node_ids)
for segment in segments:
db.session.delete(segment)
db.session.commit()
end_at = time.perf_counter()
logging.info(
click.style('Cleaned document when document update data source or process rule: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
except Exception:
logging.exception("Cleaned document when document update data source or process rule failed")
try:
indexing_runner = IndexingRunner()
indexing_runner.run([document])
end_at = time.perf_counter()
logging.info(click.style('update document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
except DocumentIsPausedException as ex:
logging.info(click.style(str(ex), fg='yellow'))
except Exception:
pass | Async update document :param dataset_id: :param document_id: Usage: document_indexing_update_task.delay(dataset_id, document_id) |
17,028 | import datetime
import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedException, IndexingRunner
from core.rag.extractor.notion_extractor import NotionExtractor
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from models.dataset import Dataset, Document, DocumentSegment
from models.source import DataSourceBinding
class IndexingRunner:
def __init__(self):
self.storage = storage
self.model_manager = ModelManager()
def run(self, dataset_documents: list[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except ObjectDeletedError:
logging.warning('Document deleted, document id: {}'.format(dataset_document.id))
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
for document_segment in document_segments:
db.session.delete(document_segment)
db.session.commit()
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def indexing_estimate(self, tenant_id: str, extract_settings: list[ExtractSetting], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None,
indexing_technique: str = 'economy') -> dict:
"""
Estimate the indexing for the document.
"""
# check document limit
features = FeatureService.get_features(tenant_id)
if features.billing.enabled:
count = len(extract_settings)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
embedding_model_instance = None
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
if dataset.indexing_technique == 'high_quality' or indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
else:
if indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
tokens = 0
preview_texts = []
total_segments = 0
total_price = 0
currency = 'USD'
index_type = doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
all_text_docs = []
for extract_setting in extract_settings:
# extract
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
all_text_docs.extend(text_docs)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule, embedding_model_instance)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
texts=[self.filter_string(document.page_content)]
)
if doc_form and doc_form == 'qa_model':
model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM
)
model_type_instance = model_instance.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0],
doc_language)
document_qa_list = self.format_split_text(response)
price_info = model_type_instance.get_price(
model=model_instance.model,
credentials=model_instance.credentials,
price_type=PriceType.INPUT,
tokens=total_segments * 2000,
)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(price_info.total_amount),
"currency": price_info.currency,
"qa_preview": document_qa_list,
"preview": preview_texts
}
if embedding_model_instance:
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_instance.model_type_instance)
embedding_price_info = embedding_model_type_instance.get_price(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
total_price = '{:f}'.format(embedding_price_info.total_amount)
currency = embedding_price_info.currency
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": total_price,
"currency": currency,
"preview": preview_texts
}
def _extract(self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict) \
-> list[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
if file_detail:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
elif dataset_document.data_source_type == 'notion_import':
if (not data_source_info or 'notion_workspace_id' not in data_source_info
or 'notion_page_id' not in data_source_info):
raise ValueError("no notion import info found")
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"document": dataset_document,
"tenant_id": dataset_document.tenant_id
},
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(list[Document], text_docs)
for text_doc in text_docs:
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
# Unicode U+FFFE
text = re.sub('\uFFFE', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule,
embedding_model_instance: Optional[ModelInstance]) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
if 'chunk_overlap' in segmentation and segmentation['chunk_overlap']:
chunk_overlap = segmentation['chunk_overlap']
else:
chunk_overlap = 0
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=chunk_overlap,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
else:
# Automatic segmentation
character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['chunk_overlap'],
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
return character_splitter
def _step_split(self, text_docs: list[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> list[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
if document_node.page_content.strip():
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
# delete Spliter character
page_content = document_node.page_content
if page_content.startswith(".") or page_content.startswith("。"):
page_content = page_content[1:]
else:
page_content = page_content
document_node.page_content = page_content
if document_node.page_content:
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
matches = re.findall(regex, text, re.UNICODE)
return [
{
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
}
for q, a in matches if q and a
]
def _load(self, index_processor: BaseIndexProcessor, dataset: Dataset,
dataset_document: DatasetDocument, documents: list[Document]) -> None:
"""
insert index and update document/segment status to completed
"""
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
[document.page_content]
)
for document in chunk_documents
)
# load index
index_processor.load(dataset, chunk_documents)
db.session.add(dataset)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
document = DatasetDocument.query.filter_by(id=document_id).first()
if not document:
raise DocumentIsDeletedPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: list[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
def _transform(self, index_processor: BaseIndexProcessor, dataset: Dataset,
text_docs: list[Document], doc_language: str, process_rule: dict) -> list[Document]:
# get embedding model instance
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=dataset.tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
documents = index_processor.transform(text_docs, embedding_model_instance=embedding_model_instance,
process_rule=process_rule, tenant_id=dataset.tenant_id,
doc_language=doc_language)
return documents
def _load_segments(self, dataset, dataset_document, documents):
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
pass
class DocumentIsPausedException(Exception):
pass
class NotionExtractor(BaseExtractor):
def __init__(
self,
notion_workspace_id: str,
notion_obj_id: str,
notion_page_type: str,
tenant_id: str,
document_model: Optional[DocumentModel] = None,
notion_access_token: Optional[str] = None,
):
self._notion_access_token = None
self._document_model = document_model
self._notion_workspace_id = notion_workspace_id
self._notion_obj_id = notion_obj_id
self._notion_page_type = notion_page_type
if notion_access_token:
self._notion_access_token = notion_access_token
else:
self._notion_access_token = self._get_access_token(tenant_id,
self._notion_workspace_id)
if not self._notion_access_token:
integration_token = current_app.config.get('NOTION_INTEGRATION_TOKEN')
if integration_token is None:
raise ValueError(
"Must specify `integration_token` or set environment "
"variable `NOTION_INTEGRATION_TOKEN`."
)
self._notion_access_token = integration_token
def extract(self) -> list[Document]:
self.update_last_edited_time(
self._document_model
)
text_docs = self._load_data_as_documents(self._notion_obj_id, self._notion_page_type)
return text_docs
def _load_data_as_documents(
self, notion_obj_id: str, notion_page_type: str
) -> list[Document]:
docs = []
if notion_page_type == 'database':
# get all the pages in the database
page_text_documents = self._get_notion_database_data(notion_obj_id)
docs.extend(page_text_documents)
elif notion_page_type == 'page':
page_text_list = self._get_notion_block_data(notion_obj_id)
for page_text in page_text_list:
docs.append(Document(page_content=page_text))
else:
raise ValueError("notion page type not supported")
return docs
def _get_notion_database_data(
self, database_id: str, query_dict: dict[str, Any] = {}
) -> list[Document]:
"""Get all the pages from a Notion database."""
res = requests.post(
DATABASE_URL_TMPL.format(database_id=database_id),
headers={
"Authorization": "Bearer " + self._notion_access_token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
},
json=query_dict,
)
data = res.json()
database_content_list = []
if 'results' not in data or data["results"] is None:
return []
for result in data["results"]:
properties = result['properties']
data = {}
for property_name, property_value in properties.items():
type = property_value['type']
if type == 'multi_select':
value = []
multi_select_list = property_value[type]
for multi_select in multi_select_list:
value.append(multi_select['name'])
elif type == 'rich_text' or type == 'title':
if len(property_value[type]) > 0:
value = property_value[type][0]['plain_text']
else:
value = ''
elif type == 'select' or type == 'status':
if property_value[type]:
value = property_value[type]['name']
else:
value = ''
else:
value = property_value[type]
data[property_name] = value
row_dict = {k: v for k, v in data.items() if v}
row_content = ''
for key, value in row_dict.items():
if isinstance(value, dict):
value_dict = {k: v for k, v in value.items() if v}
value_content = ''.join(f'{k}:{v} ' for k, v in value_dict.items())
row_content = row_content + f'{key}:{value_content}\n'
else:
row_content = row_content + f'{key}:{value}\n'
document = Document(page_content=row_content)
database_content_list.append(document)
return database_content_list
def _get_notion_block_data(self, page_id: str) -> list[str]:
result_lines_arr = []
cur_block_id = page_id
while True:
block_url = BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
query_dict: dict[str, Any] = {}
res = requests.request(
"GET",
block_url,
headers={
"Authorization": "Bearer " + self._notion_access_token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
},
json=query_dict
)
data = res.json()
# current block's heading
heading = ''
for result in data["results"]:
result_type = result["type"]
result_obj = result[result_type]
cur_result_text_arr = []
if result_type == 'table':
result_block_id = result["id"]
text = self._read_table_rows(result_block_id)
text += "\n\n"
result_lines_arr.append(text)
else:
if "rich_text" in result_obj:
for rich_text in result_obj["rich_text"]:
# skip if doesn't have text object
if "text" in rich_text:
text = rich_text["text"]["content"]
cur_result_text_arr.append(text)
if result_type in HEADING_TYPE:
heading = text
result_block_id = result["id"]
has_children = result["has_children"]
block_type = result["type"]
if has_children and block_type != 'child_page':
children_text = self._read_block(
result_block_id, num_tabs=1
)
cur_result_text_arr.append(children_text)
cur_result_text = "\n".join(cur_result_text_arr)
cur_result_text += "\n\n"
if result_type in HEADING_TYPE:
result_lines_arr.append(cur_result_text)
else:
result_lines_arr.append(f'{heading}\n{cur_result_text}')
if data["next_cursor"] is None:
break
else:
cur_block_id = data["next_cursor"]
return result_lines_arr
def _read_block(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block."""
result_lines_arr = []
cur_block_id = block_id
while True:
block_url = BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
query_dict: dict[str, Any] = {}
res = requests.request(
"GET",
block_url,
headers={
"Authorization": "Bearer " + self._notion_access_token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
},
json=query_dict
)
data = res.json()
if 'results' not in data or data["results"] is None:
break
heading = ''
for result in data["results"]:
result_type = result["type"]
result_obj = result[result_type]
cur_result_text_arr = []
if result_type == 'table':
result_block_id = result["id"]
text = self._read_table_rows(result_block_id)
result_lines_arr.append(text)
else:
if "rich_text" in result_obj:
for rich_text in result_obj["rich_text"]:
# skip if doesn't have text object
if "text" in rich_text:
text = rich_text["text"]["content"]
prefix = "\t" * num_tabs
cur_result_text_arr.append(prefix + text)
if result_type in HEADING_TYPE:
heading = text
result_block_id = result["id"]
has_children = result["has_children"]
block_type = result["type"]
if has_children and block_type != 'child_page':
children_text = self._read_block(
result_block_id, num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
cur_result_text = "\n".join(cur_result_text_arr)
if result_type in HEADING_TYPE:
result_lines_arr.append(cur_result_text)
else:
result_lines_arr.append(f'{heading}\n{cur_result_text}')
if data["next_cursor"] is None:
break
else:
cur_block_id = data["next_cursor"]
result_lines = "\n".join(result_lines_arr)
return result_lines
def _read_table_rows(self, block_id: str) -> str:
"""Read table rows."""
done = False
result_lines_arr = []
cur_block_id = block_id
while not done:
block_url = BLOCK_CHILD_URL_TMPL.format(block_id=cur_block_id)
query_dict: dict[str, Any] = {}
res = requests.request(
"GET",
block_url,
headers={
"Authorization": "Bearer " + self._notion_access_token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
},
json=query_dict
)
data = res.json()
# get table headers text
table_header_cell_texts = []
tabel_header_cells = data["results"][0]['table_row']['cells']
for tabel_header_cell in tabel_header_cells:
if tabel_header_cell:
for table_header_cell_text in tabel_header_cell:
text = table_header_cell_text["text"]["content"]
table_header_cell_texts.append(text)
# get table columns text and format
results = data["results"]
for i in range(len(results) - 1):
column_texts = []
tabel_column_cells = data["results"][i + 1]['table_row']['cells']
for j in range(len(tabel_column_cells)):
if tabel_column_cells[j]:
for table_column_cell_text in tabel_column_cells[j]:
column_text = table_column_cell_text["text"]["content"]
column_texts.append(f'{table_header_cell_texts[j]}:{column_text}')
cur_result_text = "\n".join(column_texts)
result_lines_arr.append(cur_result_text)
if data["next_cursor"] is None:
done = True
break
else:
cur_block_id = data["next_cursor"]
result_lines = "\n".join(result_lines_arr)
return result_lines
def update_last_edited_time(self, document_model: DocumentModel):
if not document_model:
return
last_edited_time = self.get_notion_last_edited_time()
data_source_info = document_model.data_source_info_dict
data_source_info['last_edited_time'] = last_edited_time
update_params = {
DocumentModel.data_source_info: json.dumps(data_source_info)
}
DocumentModel.query.filter_by(id=document_model.id).update(update_params)
db.session.commit()
def get_notion_last_edited_time(self) -> str:
obj_id = self._notion_obj_id
page_type = self._notion_page_type
if page_type == 'database':
retrieve_page_url = RETRIEVE_DATABASE_URL_TMPL.format(database_id=obj_id)
else:
retrieve_page_url = RETRIEVE_PAGE_URL_TMPL.format(page_id=obj_id)
query_dict: dict[str, Any] = {}
res = requests.request(
"GET",
retrieve_page_url,
headers={
"Authorization": "Bearer " + self._notion_access_token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
},
json=query_dict
)
data = res.json()
return data["last_edited_time"]
def _get_access_token(cls, tenant_id: str, notion_workspace_id: str) -> str:
data_source_binding = DataSourceBinding.query.filter(
db.and_(
DataSourceBinding.tenant_id == tenant_id,
DataSourceBinding.provider == 'notion',
DataSourceBinding.disabled == False,
DataSourceBinding.source_info['workspace_id'] == f'"{notion_workspace_id}"'
)
).first()
if not data_source_binding:
raise Exception(f'No notion data source binding found for tenant {tenant_id} '
f'and notion workspace {notion_workspace_id}')
return data_source_binding.access_token
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
class DataSourceBinding(db.Model):
__tablename__ = 'data_source_bindings'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='source_binding_pkey'),
db.Index('source_binding_tenant_id_idx', 'tenant_id'),
db.Index('source_info_idx', "source_info", postgresql_using='gin')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
access_token = db.Column(db.String(255), nullable=False)
provider = db.Column(db.String(255), nullable=False)
source_info = db.Column(JSONB, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
disabled = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
The provided code snippet includes necessary dependencies for implementing the `document_indexing_sync_task` function. Write a Python function `def document_indexing_sync_task(dataset_id: str, document_id: str)` to solve the following problem:
Async update document :param dataset_id: :param document_id: Usage: document_indexing_sync_task.delay(dataset_id, document_id)
Here is the function:
def document_indexing_sync_task(dataset_id: str, document_id: str):
"""
Async update document
:param dataset_id:
:param document_id:
Usage: document_indexing_sync_task.delay(dataset_id, document_id)
"""
logging.info(click.style('Start sync document: {}'.format(document_id), fg='green'))
start_at = time.perf_counter()
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
if not document:
raise NotFound('Document not found')
data_source_info = document.data_source_info_dict
if document.data_source_type == 'notion_import':
if not data_source_info or 'notion_page_id' not in data_source_info \
or 'notion_workspace_id' not in data_source_info:
raise ValueError("no notion page found")
workspace_id = data_source_info['notion_workspace_id']
page_id = data_source_info['notion_page_id']
page_type = data_source_info['type']
page_edited_time = data_source_info['last_edited_time']
data_source_binding = DataSourceBinding.query.filter(
db.and_(
DataSourceBinding.tenant_id == document.tenant_id,
DataSourceBinding.provider == 'notion',
DataSourceBinding.disabled == False,
DataSourceBinding.source_info['workspace_id'] == f'"{workspace_id}"'
)
).first()
if not data_source_binding:
raise ValueError('Data source binding not found.')
loader = NotionExtractor(
notion_workspace_id=workspace_id,
notion_obj_id=page_id,
notion_page_type=page_type,
notion_access_token=data_source_binding.access_token,
tenant_id=document.tenant_id
)
last_edited_time = loader.get_notion_last_edited_time()
# check the page is updated
if last_edited_time != page_edited_time:
document.indexing_status = 'parsing'
document.processing_started_at = datetime.datetime.utcnow()
db.session.commit()
# delete all document segment and index
try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
raise Exception('Dataset not found')
index_type = document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
index_node_ids = [segment.index_node_id for segment in segments]
# delete from vector index
index_processor.clean(dataset, index_node_ids)
for segment in segments:
db.session.delete(segment)
end_at = time.perf_counter()
logging.info(
click.style('Cleaned document when document update data source or process rule: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
except Exception:
logging.exception("Cleaned document when document update data source or process rule failed")
try:
indexing_runner = IndexingRunner()
indexing_runner.run([document])
end_at = time.perf_counter()
logging.info(click.style('update document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
except DocumentIsPausedException as ex:
logging.info(click.style(str(ex), fg='yellow'))
except Exception:
pass | Async update document :param dataset_id: :param document_id: Usage: document_indexing_sync_task.delay(dataset_id, document_id) |
17,029 | import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedException, IndexingRunner
from extensions.ext_database import db
from models.dataset import Document
class IndexingRunner:
def __init__(self):
self.storage = storage
self.model_manager = ModelManager()
def run(self, dataset_documents: list[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except ObjectDeletedError:
logging.warning('Document deleted, document id: {}'.format(dataset_document.id))
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
for document_segment in document_segments:
db.session.delete(document_segment)
db.session.commit()
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def indexing_estimate(self, tenant_id: str, extract_settings: list[ExtractSetting], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None,
indexing_technique: str = 'economy') -> dict:
"""
Estimate the indexing for the document.
"""
# check document limit
features = FeatureService.get_features(tenant_id)
if features.billing.enabled:
count = len(extract_settings)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
embedding_model_instance = None
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
if dataset.indexing_technique == 'high_quality' or indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
else:
if indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
tokens = 0
preview_texts = []
total_segments = 0
total_price = 0
currency = 'USD'
index_type = doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
all_text_docs = []
for extract_setting in extract_settings:
# extract
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
all_text_docs.extend(text_docs)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule, embedding_model_instance)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
texts=[self.filter_string(document.page_content)]
)
if doc_form and doc_form == 'qa_model':
model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM
)
model_type_instance = model_instance.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0],
doc_language)
document_qa_list = self.format_split_text(response)
price_info = model_type_instance.get_price(
model=model_instance.model,
credentials=model_instance.credentials,
price_type=PriceType.INPUT,
tokens=total_segments * 2000,
)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(price_info.total_amount),
"currency": price_info.currency,
"qa_preview": document_qa_list,
"preview": preview_texts
}
if embedding_model_instance:
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_instance.model_type_instance)
embedding_price_info = embedding_model_type_instance.get_price(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
total_price = '{:f}'.format(embedding_price_info.total_amount)
currency = embedding_price_info.currency
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": total_price,
"currency": currency,
"preview": preview_texts
}
def _extract(self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict) \
-> list[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
if file_detail:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
elif dataset_document.data_source_type == 'notion_import':
if (not data_source_info or 'notion_workspace_id' not in data_source_info
or 'notion_page_id' not in data_source_info):
raise ValueError("no notion import info found")
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"document": dataset_document,
"tenant_id": dataset_document.tenant_id
},
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(list[Document], text_docs)
for text_doc in text_docs:
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
# Unicode U+FFFE
text = re.sub('\uFFFE', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule,
embedding_model_instance: Optional[ModelInstance]) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
if 'chunk_overlap' in segmentation and segmentation['chunk_overlap']:
chunk_overlap = segmentation['chunk_overlap']
else:
chunk_overlap = 0
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=chunk_overlap,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
else:
# Automatic segmentation
character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['chunk_overlap'],
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
return character_splitter
def _step_split(self, text_docs: list[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> list[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
if document_node.page_content.strip():
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
# delete Spliter character
page_content = document_node.page_content
if page_content.startswith(".") or page_content.startswith("。"):
page_content = page_content[1:]
else:
page_content = page_content
document_node.page_content = page_content
if document_node.page_content:
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
matches = re.findall(regex, text, re.UNICODE)
return [
{
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
}
for q, a in matches if q and a
]
def _load(self, index_processor: BaseIndexProcessor, dataset: Dataset,
dataset_document: DatasetDocument, documents: list[Document]) -> None:
"""
insert index and update document/segment status to completed
"""
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
[document.page_content]
)
for document in chunk_documents
)
# load index
index_processor.load(dataset, chunk_documents)
db.session.add(dataset)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
document = DatasetDocument.query.filter_by(id=document_id).first()
if not document:
raise DocumentIsDeletedPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: list[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
def _transform(self, index_processor: BaseIndexProcessor, dataset: Dataset,
text_docs: list[Document], doc_language: str, process_rule: dict) -> list[Document]:
# get embedding model instance
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=dataset.tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
documents = index_processor.transform(text_docs, embedding_model_instance=embedding_model_instance,
process_rule=process_rule, tenant_id=dataset.tenant_id,
doc_language=doc_language)
return documents
def _load_segments(self, dataset, dataset_document, documents):
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
pass
class DocumentIsPausedException(Exception):
pass
db = SQLAlchemy()
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
The provided code snippet includes necessary dependencies for implementing the `recover_document_indexing_task` function. Write a Python function `def recover_document_indexing_task(dataset_id: str, document_id: str)` to solve the following problem:
Async recover document :param dataset_id: :param document_id: Usage: recover_document_indexing_task.delay(dataset_id, document_id)
Here is the function:
def recover_document_indexing_task(dataset_id: str, document_id: str):
"""
Async recover document
:param dataset_id:
:param document_id:
Usage: recover_document_indexing_task.delay(dataset_id, document_id)
"""
logging.info(click.style('Recover document: {}'.format(document_id), fg='green'))
start_at = time.perf_counter()
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
if not document:
raise NotFound('Document not found')
try:
indexing_runner = IndexingRunner()
if document.indexing_status in ["waiting", "parsing", "cleaning"]:
indexing_runner.run([document])
elif document.indexing_status == "splitting":
indexing_runner.run_in_splitting_status(document)
elif document.indexing_status == "indexing":
indexing_runner.run_in_indexing_status(document)
end_at = time.perf_counter()
logging.info(click.style('Processed document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
except DocumentIsPausedException as ex:
logging.info(click.style(str(ex), fg='yellow'))
except Exception:
pass | Async recover document :param dataset_id: :param document_id: Usage: recover_document_indexing_task.delay(dataset_id, document_id) |
17,030 | import datetime
import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Document as DatasetDocument
from models.dataset import DocumentSegment
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
db = SQLAlchemy()
redis_client = redis.Redis()
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `add_document_to_index_task` function. Write a Python function `def add_document_to_index_task(dataset_document_id: str)` to solve the following problem:
Async Add document to index :param document_id: Usage: add_document_to_index.delay(document_id)
Here is the function:
def add_document_to_index_task(dataset_document_id: str):
"""
Async Add document to index
:param document_id:
Usage: add_document_to_index.delay(document_id)
"""
logging.info(click.style('Start add document to index: {}'.format(dataset_document_id), fg='green'))
start_at = time.perf_counter()
dataset_document = db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document_id).first()
if not dataset_document:
raise NotFound('Document not found')
if dataset_document.indexing_status != 'completed':
return
indexing_cache_key = 'document_{}_indexing'.format(dataset_document.id)
try:
segments = db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.enabled == True
) \
.order_by(DocumentSegment.position.asc()).all()
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
dataset = dataset_document.dataset
if not dataset:
raise Exception('Document has no dataset')
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
end_at = time.perf_counter()
logging.info(
click.style('Document added to index: {} latency: {}'.format(dataset_document.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("add document to index failed")
dataset_document.enabled = False
dataset_document.disabled_at = datetime.datetime.utcnow()
dataset_document.status = 'error'
dataset_document.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key) | Async Add document to index :param document_id: Usage: add_document_to_index.delay(document_id) |
17,031 | import logging
import time
import click
from celery import shared_task
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from models.dataset import Dataset, Document, DocumentSegment
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `clean_notion_document_task` function. Write a Python function `def clean_notion_document_task(document_ids: list[str], dataset_id: str)` to solve the following problem:
Clean document when document deleted. :param document_ids: document ids :param dataset_id: dataset id Usage: clean_notion_document_task.delay(document_ids, dataset_id)
Here is the function:
def clean_notion_document_task(document_ids: list[str], dataset_id: str):
"""
Clean document when document deleted.
:param document_ids: document ids
:param dataset_id: dataset id
Usage: clean_notion_document_task.delay(document_ids, dataset_id)
"""
logging.info(click.style('Start clean document when import form notion document deleted: {}'.format(dataset_id), fg='green'))
start_at = time.perf_counter()
try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
raise Exception('Document has no dataset')
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
for document_id in document_ids:
document = db.session.query(Document).filter(
Document.id == document_id
).first()
db.session.delete(document)
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
index_node_ids = [segment.index_node_id for segment in segments]
index_processor.clean(dataset, index_node_ids)
for segment in segments:
db.session.delete(segment)
db.session.commit()
end_at = time.perf_counter()
logging.info(
click.style('Clean document when import form notion document deleted end :: {} latency: {}'.format(
dataset_id, end_at - start_at),
fg='green'))
except Exception:
logging.exception("Cleaned document when import form notion document deleted failed") | Clean document when document deleted. :param document_ids: document ids :param dataset_id: dataset id Usage: clean_notion_document_task.delay(document_ids, dataset_id) |
17,032 | import datetime
import logging
import time
import uuid
from typing import cast
import click
from celery import shared_task
from sqlalchemy import func
from core.indexing_runner import IndexingRunner
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from libs import helper
from models.dataset import Dataset, Document, DocumentSegment
class IndexingRunner:
def __init__(self):
self.storage = storage
self.model_manager = ModelManager()
def run(self, dataset_documents: list[DatasetDocument]):
"""Run the indexing process."""
for dataset_document in dataset_documents:
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except ObjectDeletedError:
logging.warning('Document deleted, document id: {}'.format(dataset_document.id))
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_splitting_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is splitting."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
for document_segment in document_segments:
db.session.delete(document_segment)
db.session.commit()
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
# extract
text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
# transform
documents = self._transform(index_processor, dataset, text_docs, dataset_document.doc_language,
processing_rule.to_dict())
# save segment
self._load_segments(dataset, dataset_document, documents)
# load
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def run_in_indexing_status(self, dataset_document: DatasetDocument):
"""Run the indexing process when the index_status is indexing."""
try:
# get dataset
dataset = Dataset.query.filter_by(
id=dataset_document.dataset_id
).first()
if not dataset:
raise ValueError("no dataset found")
# get exist document_segment list and delete
document_segments = DocumentSegment.query.filter_by(
dataset_id=dataset.id,
document_id=dataset_document.id
).all()
documents = []
if document_segments:
for document_segment in document_segments:
# transform segment to node
if document_segment.status != "completed":
document = Document(
page_content=document_segment.content,
metadata={
"doc_id": document_segment.index_node_id,
"doc_hash": document_segment.index_node_hash,
"document_id": document_segment.document_id,
"dataset_id": document_segment.dataset_id,
}
)
documents.append(document)
# build index
# get the process rule
processing_rule = db.session.query(DatasetProcessRule). \
filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id). \
first()
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(
index_processor=index_processor,
dataset=dataset,
dataset_document=dataset_document,
documents=documents
)
except DocumentIsPausedException:
raise DocumentIsPausedException('Document paused, document id: {}'.format(dataset_document.id))
except ProviderTokenNotInitError as e:
dataset_document.indexing_status = 'error'
dataset_document.error = str(e.description)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
except Exception as e:
logging.exception("consume document failed")
dataset_document.indexing_status = 'error'
dataset_document.error = str(e)
dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit()
def indexing_estimate(self, tenant_id: str, extract_settings: list[ExtractSetting], tmp_processing_rule: dict,
doc_form: str = None, doc_language: str = 'English', dataset_id: str = None,
indexing_technique: str = 'economy') -> dict:
"""
Estimate the indexing for the document.
"""
# check document limit
features = FeatureService.get_features(tenant_id)
if features.billing.enabled:
count = len(extract_settings)
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
embedding_model_instance = None
if dataset_id:
dataset = Dataset.query.filter_by(
id=dataset_id
).first()
if not dataset:
raise ValueError('Dataset not found.')
if dataset.indexing_technique == 'high_quality' or indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
else:
if indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
tokens = 0
preview_texts = []
total_segments = 0
total_price = 0
currency = 'USD'
index_type = doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
all_text_docs = []
for extract_setting in extract_settings:
# extract
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
all_text_docs.extend(text_docs)
processing_rule = DatasetProcessRule(
mode=tmp_processing_rule["mode"],
rules=json.dumps(tmp_processing_rule["rules"])
)
# get splitter
splitter = self._get_splitter(processing_rule, embedding_model_instance)
# split to documents
documents = self._split_to_documents_for_estimate(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule
)
total_segments += len(documents)
for document in documents:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
texts=[self.filter_string(document.page_content)]
)
if doc_form and doc_form == 'qa_model':
model_instance = self.model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM
)
model_type_instance = model_instance.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
if len(preview_texts) > 0:
# qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0],
doc_language)
document_qa_list = self.format_split_text(response)
price_info = model_type_instance.get_price(
model=model_instance.model,
credentials=model_instance.credentials,
price_type=PriceType.INPUT,
tokens=total_segments * 2000,
)
return {
"total_segments": total_segments * 20,
"tokens": total_segments * 2000,
"total_price": '{:f}'.format(price_info.total_amount),
"currency": price_info.currency,
"qa_preview": document_qa_list,
"preview": preview_texts
}
if embedding_model_instance:
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_instance.model_type_instance)
embedding_price_info = embedding_model_type_instance.get_price(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
price_type=PriceType.INPUT,
tokens=tokens
)
total_price = '{:f}'.format(embedding_price_info.total_amount)
currency = embedding_price_info.currency
return {
"total_segments": total_segments,
"tokens": tokens,
"total_price": total_price,
"currency": currency,
"preview": preview_texts
}
def _extract(self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict) \
-> list[Document]:
# load file
if dataset_document.data_source_type not in ["upload_file", "notion_import"]:
return []
data_source_info = dataset_document.data_source_info_dict
text_docs = []
if dataset_document.data_source_type == 'upload_file':
if not data_source_info or 'upload_file_id' not in data_source_info:
raise ValueError("no upload file found")
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info['upload_file_id']). \
one_or_none()
if file_detail:
extract_setting = ExtractSetting(
datasource_type="upload_file",
upload_file=file_detail,
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
elif dataset_document.data_source_type == 'notion_import':
if (not data_source_info or 'notion_workspace_id' not in data_source_info
or 'notion_page_id' not in data_source_info):
raise ValueError("no notion import info found")
extract_setting = ExtractSetting(
datasource_type="notion_import",
notion_info={
"notion_workspace_id": data_source_info['notion_workspace_id'],
"notion_obj_id": data_source_info['notion_page_id'],
"notion_page_type": data_source_info['type'],
"document": dataset_document,
"tenant_id": dataset_document.tenant_id
},
document_model=dataset_document.doc_form
)
text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule['mode'])
# update document status to splitting
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="splitting",
extra_update_params={
DatasetDocument.word_count: sum([len(text_doc.page_content) for text_doc in text_docs]),
DatasetDocument.parsing_completed_at: datetime.datetime.utcnow()
}
)
# replace doc id to document model id
text_docs = cast(list[Document], text_docs)
for text_doc in text_docs:
text_doc.metadata['document_id'] = dataset_document.id
text_doc.metadata['dataset_id'] = dataset_document.dataset_id
return text_docs
def filter_string(self, text):
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
# Unicode U+FFFE
text = re.sub('\uFFFE', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule,
embedding_model_instance: Optional[ModelInstance]) -> TextSplitter:
"""
Get the NodeParser object according to the processing rule.
"""
if processing_rule.mode == "custom":
# The user-defined segmentation rule
rules = json.loads(processing_rule.rules)
segmentation = rules["segmentation"]
if segmentation["max_tokens"] < 50 or segmentation["max_tokens"] > 1000:
raise ValueError("Custom segment length should be between 50 and 1000.")
separator = segmentation["separator"]
if separator:
separator = separator.replace('\\n', '\n')
if 'chunk_overlap' in segmentation and segmentation['chunk_overlap']:
chunk_overlap = segmentation['chunk_overlap']
else:
chunk_overlap = 0
character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
chunk_size=segmentation["max_tokens"],
chunk_overlap=chunk_overlap,
fixed_separator=separator,
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
else:
# Automatic segmentation
character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
chunk_size=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['max_tokens'],
chunk_overlap=DatasetProcessRule.AUTOMATIC_RULES['segmentation']['chunk_overlap'],
separators=["\n\n", "。", ".", " ", ""],
embedding_model_instance=embedding_model_instance
)
return character_splitter
def _step_split(self, text_docs: list[Document], splitter: TextSplitter,
dataset: Dataset, dataset_document: DatasetDocument, processing_rule: DatasetProcessRule) \
-> list[Document]:
"""
Split the text documents into documents and save them to the document segment.
"""
documents = self._split_to_documents(
text_docs=text_docs,
splitter=splitter,
processing_rule=processing_rule,
tenant_id=dataset.tenant_id,
document_form=dataset_document.doc_form,
document_language=dataset_document.doc_language
)
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
return documents
def _split_to_documents(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule, tenant_id: str,
document_form: str, document_language: str) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
all_qa_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document_node in documents:
if document_node.page_content.strip():
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document_node.page_content)
document_node.metadata['doc_id'] = doc_id
document_node.metadata['doc_hash'] = hash
# delete Spliter character
page_content = document_node.page_content
if page_content.startswith(".") or page_content.startswith("。"):
page_content = page_content[1:]
else:
page_content = page_content
document_node.page_content = page_content
if document_node.page_content:
split_documents.append(document_node)
all_documents.extend(split_documents)
# processing qa document
if document_form == 'qa_model':
for i in range(0, len(all_documents), 10):
threads = []
sub_documents = all_documents[i:i + 10]
for doc in sub_documents:
document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={
'flask_app': current_app._get_current_object(),
'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents,
'document_language': document_language})
threads.append(document_format_thread)
document_format_thread.start()
for thread in threads:
thread.join()
return all_qa_documents
return all_documents
def format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
format_documents = []
if document_node.page_content is None or not document_node.page_content.strip():
return
with flask_app.app_context():
try:
# qa model document
response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content, document_language)
document_qa_list = self.format_split_text(response)
qa_documents = []
for result in document_qa_list:
qa_document = Document(page_content=result['question'], metadata=document_node.metadata.copy())
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(result['question'])
qa_document.metadata['answer'] = result['answer']
qa_document.metadata['doc_id'] = doc_id
qa_document.metadata['doc_hash'] = hash
qa_documents.append(qa_document)
format_documents.extend(qa_documents)
except Exception as e:
logging.exception(e)
all_qa_documents.extend(format_documents)
def _split_to_documents_for_estimate(self, text_docs: list[Document], splitter: TextSplitter,
processing_rule: DatasetProcessRule) -> list[Document]:
"""
Split the text documents into nodes.
"""
all_documents = []
for text_doc in text_docs:
# document clean
document_text = self._document_clean(text_doc.page_content, processing_rule)
text_doc.page_content = document_text
# parse document to nodes
documents = splitter.split_documents([text_doc])
split_documents = []
for document in documents:
if document.page_content is None or not document.page_content.strip():
continue
doc_id = str(uuid.uuid4())
hash = helper.generate_text_hash(document.page_content)
document.metadata['doc_id'] = doc_id
document.metadata['doc_hash'] = hash
split_documents.append(document)
all_documents.extend(split_documents)
return all_documents
def _document_clean(self, text: str, processing_rule: DatasetProcessRule) -> str:
"""
Clean the document text according to the processing rules.
"""
if processing_rule.mode == "automatic":
rules = DatasetProcessRule.AUTOMATIC_RULES
else:
rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
if 'pre_processing_rules' in rules:
pre_processing_rules = rules["pre_processing_rules"]
for pre_processing_rule in pre_processing_rules:
if pre_processing_rule["id"] == "remove_extra_spaces" and pre_processing_rule["enabled"] is True:
# Remove extra spaces
pattern = r'\n{3,}'
text = re.sub(pattern, '\n\n', text)
pattern = r'[\t\f\r\x20\u00a0\u1680\u180e\u2000-\u200a\u202f\u205f\u3000]{2,}'
text = re.sub(pattern, ' ', text)
elif pre_processing_rule["id"] == "remove_urls_emails" and pre_processing_rule["enabled"] is True:
# Remove email
pattern = r'([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)'
text = re.sub(pattern, '', text)
# Remove URL
pattern = r'https?://[^\s]+'
text = re.sub(pattern, '', text)
return text
def format_split_text(self, text):
regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
matches = re.findall(regex, text, re.UNICODE)
return [
{
"question": q,
"answer": re.sub(r"\n\s*", "\n", a.strip())
}
for q, a in matches if q and a
]
def _load(self, index_processor: BaseIndexProcessor, dataset: Dataset,
dataset_document: DatasetDocument, documents: list[Document]) -> None:
"""
insert index and update document/segment status to completed
"""
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
tokens = 0
chunk_size = 100
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
for i in range(0, len(documents), chunk_size):
# check document is paused
self._check_document_paused_status(dataset_document.id)
chunk_documents = documents[i:i + chunk_size]
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
[document.page_content]
)
for document in chunk_documents
)
# load index
index_processor.load(dataset, chunk_documents)
db.session.add(dataset)
document_ids = [document.metadata['doc_id'] for document in chunk_documents]
db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.index_node_id.in_(document_ids),
DocumentSegment.status == "indexing"
).update({
DocumentSegment.status: "completed",
DocumentSegment.enabled: True,
DocumentSegment.completed_at: datetime.datetime.utcnow()
})
db.session.commit()
indexing_end_at = time.perf_counter()
# update document status to completed
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="completed",
extra_update_params={
DatasetDocument.tokens: tokens,
DatasetDocument.completed_at: datetime.datetime.utcnow(),
DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
}
)
def _check_document_paused_status(self, document_id: str):
indexing_cache_key = 'document_{}_is_paused'.format(document_id)
result = redis_client.get(indexing_cache_key)
if result:
raise DocumentIsPausedException()
def _update_document_index_status(self, document_id: str, after_indexing_status: str,
extra_update_params: Optional[dict] = None) -> None:
"""
Update the document indexing status.
"""
count = DatasetDocument.query.filter_by(id=document_id, is_paused=True).count()
if count > 0:
raise DocumentIsPausedException()
document = DatasetDocument.query.filter_by(id=document_id).first()
if not document:
raise DocumentIsDeletedPausedException()
update_params = {
DatasetDocument.indexing_status: after_indexing_status
}
if extra_update_params:
update_params.update(extra_update_params)
DatasetDocument.query.filter_by(id=document_id).update(update_params)
db.session.commit()
def _update_segments_by_document(self, dataset_document_id: str, update_params: dict) -> None:
"""
Update the document segment by document id.
"""
DocumentSegment.query.filter_by(document_id=dataset_document_id).update(update_params)
db.session.commit()
def batch_add_segments(self, segments: list[DocumentSegment], dataset: Dataset):
"""
Batch add segments index processing
"""
documents = []
for segment in segments:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
documents.append(document)
# save vector index
index_type = dataset.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.load(dataset, documents)
def _transform(self, index_processor: BaseIndexProcessor, dataset: Dataset,
text_docs: list[Document], doc_language: str, process_rule: dict) -> list[Document]:
# get embedding model instance
embedding_model_instance = None
if dataset.indexing_technique == 'high_quality':
if dataset.embedding_model_provider:
embedding_model_instance = self.model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
else:
embedding_model_instance = self.model_manager.get_default_model_instance(
tenant_id=dataset.tenant_id,
model_type=ModelType.TEXT_EMBEDDING,
)
documents = index_processor.transform(text_docs, embedding_model_instance=embedding_model_instance,
process_rule=process_rule, tenant_id=dataset.tenant_id,
doc_language=doc_language)
return documents
def _load_segments(self, dataset, dataset_document, documents):
# save node to document segment
doc_store = DatasetDocumentStore(
dataset=dataset,
user_id=dataset_document.created_by,
document_id=dataset_document.id
)
# add document segments
doc_store.add_documents(documents)
# update document status to indexing
cur_time = datetime.datetime.utcnow()
self._update_document_index_status(
document_id=dataset_document.id,
after_indexing_status="indexing",
extra_update_params={
DatasetDocument.cleaning_completed_at: cur_time,
DatasetDocument.splitting_completed_at: cur_time,
}
)
# update segment status to indexing
self._update_segments_by_document(
dataset_document_id=dataset_document.id,
update_params={
DocumentSegment.status: "indexing",
DocumentSegment.indexing_at: datetime.datetime.utcnow()
}
)
pass
class ModelManager:
def __init__(self) -> None:
self._provider_manager = ProviderManager()
def get_model_instance(self, tenant_id: str, provider: str, model_type: ModelType, model: str) -> ModelInstance:
"""
Get model instance
:param tenant_id: tenant id
:param provider: provider name
:param model_type: model type
:param model: model name
:return:
"""
if not provider:
return self.get_default_model_instance(tenant_id, model_type)
provider_model_bundle = self._provider_manager.get_provider_model_bundle(
tenant_id=tenant_id,
provider=provider,
model_type=model_type
)
return ModelInstance(provider_model_bundle, model)
def get_default_model_instance(self, tenant_id: str, model_type: ModelType) -> ModelInstance:
"""
Get default model instance
:param tenant_id: tenant id
:param model_type: model type
:return:
"""
default_model_entity = self._provider_manager.get_default_model(
tenant_id=tenant_id,
model_type=model_type
)
if not default_model_entity:
raise ProviderTokenNotInitError(f"Default model not found for {model_type}")
return self.get_model_instance(
tenant_id=tenant_id,
provider=default_model_entity.provider.provider,
model_type=model_type,
model=default_model_entity.model
)
class ModelType(Enum):
"""
Enum class for model type.
"""
LLM = "llm"
TEXT_EMBEDDING = "text-embedding"
RERANK = "rerank"
SPEECH2TEXT = "speech2text"
MODERATION = "moderation"
TTS = "tts"
TEXT2IMG = "text2img"
def value_of(cls, origin_model_type: str) -> "ModelType":
"""
Get model type from origin model type.
:return: model type
"""
if origin_model_type == 'text-generation' or origin_model_type == cls.LLM.value:
return cls.LLM
elif origin_model_type == 'embeddings' or origin_model_type == cls.TEXT_EMBEDDING.value:
return cls.TEXT_EMBEDDING
elif origin_model_type == 'reranking' or origin_model_type == cls.RERANK.value:
return cls.RERANK
elif origin_model_type == 'speech2text' or origin_model_type == cls.SPEECH2TEXT.value:
return cls.SPEECH2TEXT
elif origin_model_type == 'tts' or origin_model_type == cls.TTS.value:
return cls.TTS
elif origin_model_type == 'text2img' or origin_model_type == cls.TEXT2IMG.value:
return cls.TEXT2IMG
elif origin_model_type == cls.MODERATION.value:
return cls.MODERATION
else:
raise ValueError(f'invalid origin model type {origin_model_type}')
def to_origin_model_type(self) -> str:
"""
Get origin model type from model type.
:return: origin model type
"""
if self == self.LLM:
return 'text-generation'
elif self == self.TEXT_EMBEDDING:
return 'embeddings'
elif self == self.RERANK:
return 'reranking'
elif self == self.SPEECH2TEXT:
return 'speech2text'
elif self == self.TTS:
return 'tts'
elif self == self.MODERATION:
return 'moderation'
elif self == self.TEXT2IMG:
return 'text2img'
else:
raise ValueError(f'invalid model type {self}')
class TextEmbeddingModel(AIModel):
"""
Model class for text embedding model.
"""
model_type: ModelType = ModelType.TEXT_EMBEDDING
def invoke(self, model: str, credentials: dict,
texts: list[str], user: Optional[str] = None) \
-> TextEmbeddingResult:
"""
Invoke large language model
:param model: model name
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
:return: embeddings result
"""
self.started_at = time.perf_counter()
try:
return self._invoke(model, credentials, texts, user)
except Exception as e:
raise self._transform_invoke_error(e)
def _invoke(self, model: str, credentials: dict,
texts: list[str], user: Optional[str] = None) \
-> TextEmbeddingResult:
"""
Invoke large language model
:param model: model name
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
:return: embeddings result
"""
raise NotImplementedError
def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
"""
Get number of tokens for given prompt messages
:param model: model name
:param credentials: model credentials
:param texts: texts to embed
:return:
"""
raise NotImplementedError
def _get_context_size(self, model: str, credentials: dict) -> int:
"""
Get context size for given embedding model
:param model: model name
:param credentials: model credentials
:return: context size
"""
model_schema = self.get_model_schema(model, credentials)
if model_schema and ModelPropertyKey.CONTEXT_SIZE in model_schema.model_properties:
return model_schema.model_properties[ModelPropertyKey.CONTEXT_SIZE]
return 1000
def _get_max_chunks(self, model: str, credentials: dict) -> int:
"""
Get max chunks for given embedding model
:param model: model name
:param credentials: model credentials
:return: max chunks
"""
model_schema = self.get_model_schema(model, credentials)
if model_schema and ModelPropertyKey.MAX_CHUNKS in model_schema.model_properties:
return model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS]
return 1
db = SQLAlchemy()
redis_client = redis.Redis()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `batch_create_segment_to_index_task` function. Write a Python function `def batch_create_segment_to_index_task(job_id: str, content: list, dataset_id: str, document_id: str, tenant_id: str, user_id: str)` to solve the following problem:
Async batch create segment to index :param job_id: :param content: :param dataset_id: :param document_id: :param tenant_id: :param user_id: Usage: batch_create_segment_to_index_task.delay(segment_id)
Here is the function:
def batch_create_segment_to_index_task(job_id: str, content: list, dataset_id: str, document_id: str,
tenant_id: str, user_id: str):
"""
Async batch create segment to index
:param job_id:
:param content:
:param dataset_id:
:param document_id:
:param tenant_id:
:param user_id:
Usage: batch_create_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start batch create segment jobId: {}'.format(job_id), fg='green'))
start_at = time.perf_counter()
indexing_cache_key = 'segment_batch_import_{}'.format(job_id)
try:
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
raise ValueError('Dataset not exist.')
dataset_document = db.session.query(Document).filter(Document.id == document_id).first()
if not dataset_document:
raise ValueError('Document not exist.')
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
raise ValueError('Document is not available.')
document_segments = []
embedding_model = None
if dataset.indexing_technique == 'high_quality':
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=dataset.tenant_id,
provider=dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=dataset.embedding_model
)
model_type_instance = embedding_model.model_type_instance
model_type_instance = cast(TextEmbeddingModel, model_type_instance)
for segment in content:
content = segment['content']
doc_id = str(uuid.uuid4())
segment_hash = helper.generate_text_hash(content)
# calc embedding use tokens
tokens = model_type_instance.get_num_tokens(
model=embedding_model.model,
credentials=embedding_model.credentials,
texts=[content]
) if embedding_model else 0
max_position = db.session.query(func.max(DocumentSegment.position)).filter(
DocumentSegment.document_id == dataset_document.id
).scalar()
segment_document = DocumentSegment(
tenant_id=tenant_id,
dataset_id=dataset_id,
document_id=document_id,
index_node_id=doc_id,
index_node_hash=segment_hash,
position=max_position + 1 if max_position else 1,
content=content,
word_count=len(content),
tokens=tokens,
created_by=user_id,
indexing_at=datetime.datetime.utcnow(),
status='completed',
completed_at=datetime.datetime.utcnow()
)
if dataset_document.doc_form == 'qa_model':
segment_document.answer = segment['answer']
db.session.add(segment_document)
document_segments.append(segment_document)
# add index to db
indexing_runner = IndexingRunner()
indexing_runner.batch_add_segments(document_segments, dataset)
db.session.commit()
redis_client.setex(indexing_cache_key, 600, 'completed')
end_at = time.perf_counter()
logging.info(click.style('Segment batch created job: {} latency: {}'.format(job_id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("Segments batch created index failed:{}".format(str(e)))
redis_client.setex(indexing_cache_key, 600, 'error') | Async batch create segment to index :param job_id: :param content: :param dataset_id: :param document_id: :param tenant_id: :param user_id: Usage: batch_create_segment_to_index_task.delay(segment_id) |
17,033 | import logging
import time
import click
from celery import shared_task
from flask import current_app, render_template
from extensions.ext_mail import mail
mail = Mail()
The provided code snippet includes necessary dependencies for implementing the `send_invite_member_mail_task` function. Write a Python function `def send_invite_member_mail_task(language: str, to: str, token: str, inviter_name: str, workspace_name: str)` to solve the following problem:
Async Send invite member mail :param language :param to :param token :param inviter_name :param workspace_name Usage: send_invite_member_mail_task.delay(langauge, to, token, inviter_name, workspace_name)
Here is the function:
def send_invite_member_mail_task(language: str, to: str, token: str, inviter_name: str, workspace_name: str):
"""
Async Send invite member mail
:param language
:param to
:param token
:param inviter_name
:param workspace_name
Usage: send_invite_member_mail_task.delay(langauge, to, token, inviter_name, workspace_name)
"""
if not mail.is_inited():
return
logging.info(click.style('Start send invite member mail to {} in workspace {}'.format(to, workspace_name),
fg='green'))
start_at = time.perf_counter()
# TODO send invite member mail using different languages
try:
url = f'{current_app.config.get("CONSOLE_WEB_URL")}/activate?token={token}'
if language == 'zh-Hans':
html_content = render_template('invite_member_mail_template_zh-CN.html',
to=to,
inviter_name=inviter_name,
workspace_name=workspace_name,
url=url)
mail.send(to=to, subject="立即加入 Dify 工作空间", html=html_content)
else:
html_content = render_template('invite_member_mail_template_en-US.html',
to=to,
inviter_name=inviter_name,
workspace_name=workspace_name,
url=url)
mail.send(to=to, subject="Join Dify Workspace Now", html=html_content)
end_at = time.perf_counter()
logging.info(
click.style('Send invite member mail to {} succeeded: latency: {}'.format(to, end_at - start_at),
fg='green'))
except Exception:
logging.exception("Send invite member mail to {} failed".format(to)) | Async Send invite member mail :param language :param to :param token :param inviter_name :param workspace_name Usage: send_invite_member_mail_task.delay(langauge, to, token, inviter_name, workspace_name) |
17,034 | import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
redis_client = redis.Redis()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `disable_segment_from_index_task` function. Write a Python function `def disable_segment_from_index_task(segment_id: str)` to solve the following problem:
Async disable segment from index :param segment_id: Usage: disable_segment_from_index_task.delay(segment_id)
Here is the function:
def disable_segment_from_index_task(segment_id: str):
"""
Async disable segment from index
:param segment_id:
Usage: disable_segment_from_index_task.delay(segment_id)
"""
logging.info(click.style('Start disable segment from index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
raise NotFound('Segment is not completed , disable action is not allowed.')
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
index_processor.clean(dataset, [segment.index_node_id])
end_at = time.perf_counter()
logging.info(click.style('Segment removed from index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception:
logging.exception("remove segment from index failed")
segment.enabled = True
db.session.commit()
finally:
redis_client.delete(indexing_cache_key) | Async disable segment from index :param segment_id: Usage: disable_segment_from_index_task.delay(segment_id) |
17,035 | import datetime
import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import DocumentSegment
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
db = SQLAlchemy()
redis_client = redis.Redis()
class DocumentSegment(db.Model):
__tablename__ = 'document_segments'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_segment_pkey'),
db.Index('document_segment_dataset_id_idx', 'dataset_id'),
db.Index('document_segment_document_id_idx', 'document_id'),
db.Index('document_segment_tenant_dataset_idx', 'dataset_id', 'tenant_id'),
db.Index('document_segment_tenant_document_idx', 'document_id', 'tenant_id'),
db.Index('document_segment_dataset_node_idx', 'dataset_id', 'index_node_id'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
document_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
content = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=False)
tokens = db.Column(db.Integer, nullable=False)
# indexing fields
keywords = db.Column(db.JSON, nullable=True)
index_node_id = db.Column(db.String(255), nullable=True)
index_node_hash = db.Column(db.String(255), nullable=True)
# basic fields
hit_count = db.Column(db.Integer, nullable=False, default=0)
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False,
server_default=db.text("'waiting'::character varying"))
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
indexing_at = db.Column(db.DateTime, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).first()
def document(self):
return db.session.query(Document).filter(Document.id == self.document_id).first()
def previous_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position - 1
).first()
def next_segment(self):
return db.session.query(DocumentSegment).filter(
DocumentSegment.document_id == self.document_id,
DocumentSegment.position == self.position + 1
).first()
The provided code snippet includes necessary dependencies for implementing the `enable_segment_to_index_task` function. Write a Python function `def enable_segment_to_index_task(segment_id: str)` to solve the following problem:
Async enable segment to index :param segment_id: Usage: enable_segment_to_index_task.delay(segment_id)
Here is the function:
def enable_segment_to_index_task(segment_id: str):
"""
Async enable segment to index
:param segment_id:
Usage: enable_segment_to_index_task.delay(segment_id)
"""
logging.info(click.style('Start enable segment to index: {}'.format(segment_id), fg='green'))
start_at = time.perf_counter()
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
if not segment:
raise NotFound('Segment not found')
if segment.status != 'completed':
raise NotFound('Segment is not completed, enable action is not allowed.')
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
try:
document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
}
)
dataset = segment.dataset
if not dataset:
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
return
dataset_document = segment.document
if not dataset_document:
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
return
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
return
index_processor = IndexProcessorFactory(dataset_document.doc_form).init_index_processor()
# save vector index
index_processor.load(dataset, [document])
end_at = time.perf_counter()
logging.info(click.style('Segment enabled to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
except Exception as e:
logging.exception("enable segment to index failed")
segment.enabled = False
segment.disabled_at = datetime.datetime.utcnow()
segment.status = 'error'
segment.error = str(e)
db.session.commit()
finally:
redis_client.delete(indexing_cache_key) | Async enable segment to index :param segment_id: Usage: enable_segment_to_index_task.delay(segment_id) |
17,036 | import logging
import time
import click
from celery import shared_task
from core.rag.datasource.vdb.vector_factory import Vector
from models.dataset import Dataset
from services.dataset_service import DatasetCollectionBindingService
class Vector:
def __init__(self, dataset: Dataset, attributes: list = None):
if attributes is None:
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
self._vector_processor = self._init_vector()
def _init_vector(self) -> BaseVector:
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if self._dataset.index_struct_dict:
vector_type = self._dataset.index_struct_dict['type']
if not vector_type:
raise ValueError("Vector store must be specified.")
if vector_type == "weaviate":
from core.rag.datasource.vdb.weaviate.weaviate_vector import WeaviateConfig, WeaviateVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return WeaviateVector(
collection_name=collection_name,
config=WeaviateConfig(
endpoint=config.get('WEAVIATE_ENDPOINT'),
api_key=config.get('WEAVIATE_API_KEY'),
batch_size=int(config.get('WEAVIATE_BATCH_SIZE'))
),
attributes=self._attributes
)
elif vector_type == "qdrant":
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig, QdrantVector
if self._dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == self._dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
if self._dataset.index_struct_dict:
class_prefix: str = self.dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
if not self._dataset.index_struct_dict:
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return QdrantVector(
collection_name=collection_name,
group_id=self._dataset.id,
config=QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT')
)
)
elif vector_type == "milvus":
from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig, MilvusVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return MilvusVector(
collection_name=collection_name,
config=MilvusConfig(
host=config.get('MILVUS_HOST'),
port=config.get('MILVUS_PORT'),
user=config.get('MILVUS_USER'),
password=config.get('MILVUS_PASSWORD'),
secure=config.get('MILVUS_SECURE'),
)
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list = None, **kwargs):
if texts:
embeddings = self._embeddings.embed_documents([document.page_content for document in texts])
self._vector_processor.create(
texts=texts,
embeddings=embeddings,
**kwargs
)
def add_texts(self, documents: list[Document], **kwargs):
if kwargs.get('duplicate_check', False):
documents = self._filter_duplicate_texts(documents)
embeddings = self._embeddings.embed_documents([document.page_content for document in documents])
self._vector_processor.add_texts(
documents=documents,
embeddings=embeddings,
**kwargs
)
def text_exists(self, id: str) -> bool:
return self._vector_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._vector_processor.delete_by_ids(ids)
def delete_by_metadata_field(self, key: str, value: str) -> None:
self._vector_processor.delete_by_metadata_field(key, value)
def search_by_vector(
self, query: str,
**kwargs: Any
) -> list[Document]:
query_vector = self._embeddings.embed_query(query)
return self._vector_processor.search_by_vector(query_vector, **kwargs)
def search_by_full_text(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._vector_processor.search_by_full_text(query, **kwargs)
def delete(self) -> None:
self._vector_processor.delete()
def _get_embeddings(self) -> Embeddings:
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=self._dataset.tenant_id,
provider=self._dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=self._dataset.embedding_model
)
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def __getattr__(self, name):
if self._vector_processor is not None:
method = getattr(self._vector_processor, name)
if callable(method):
return method
raise AttributeError(f"'vector_processor' object has no attribute '{name}'")
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class DatasetCollectionBindingService:
def get_dataset_collection_binding(cls, provider_name: str, model_name: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.provider_name == provider_name,
DatasetCollectionBinding.model_name == model_name,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
if not dataset_collection_binding:
dataset_collection_binding = DatasetCollectionBinding(
provider_name=provider_name,
model_name=model_name,
collection_name=Dataset.gen_collection_name_by_id(str(uuid.uuid4())),
type=collection_type
)
db.session.add(dataset_collection_binding)
db.session.commit()
return dataset_collection_binding
def get_dataset_collection_binding_by_id_and_type(cls, collection_binding_id: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == collection_binding_id,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
return dataset_collection_binding
The provided code snippet includes necessary dependencies for implementing the `delete_annotation_index_task` function. Write a Python function `def delete_annotation_index_task(annotation_id: str, app_id: str, tenant_id: str, collection_binding_id: str)` to solve the following problem:
Async delete annotation index task
Here is the function:
def delete_annotation_index_task(annotation_id: str, app_id: str, tenant_id: str,
collection_binding_id: str):
"""
Async delete annotation index task
"""
logging.info(click.style('Start delete app annotation index: {}'.format(app_id), fg='green'))
start_at = time.perf_counter()
try:
dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(
collection_binding_id,
'annotation'
)
dataset = Dataset(
id=app_id,
tenant_id=tenant_id,
indexing_technique='high_quality',
collection_binding_id=dataset_collection_binding.id
)
try:
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
vector.delete_by_metadata_field('annotation_id', annotation_id)
except Exception:
logging.exception("Delete annotation index failed when annotation deleted.")
end_at = time.perf_counter()
logging.info(
click.style('App annotations index deleted : {} latency: {}'.format(app_id, end_at - start_at),
fg='green'))
except Exception as e:
logging.exception("Annotation deleted index failed:{}".format(str(e))) | Async delete annotation index task |
17,037 | import logging
import time
import click
from celery import shared_task
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document
from models.dataset import Dataset
from services.dataset_service import DatasetCollectionBindingService
class Vector:
def __init__(self, dataset: Dataset, attributes: list = None):
if attributes is None:
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
self._vector_processor = self._init_vector()
def _init_vector(self) -> BaseVector:
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if self._dataset.index_struct_dict:
vector_type = self._dataset.index_struct_dict['type']
if not vector_type:
raise ValueError("Vector store must be specified.")
if vector_type == "weaviate":
from core.rag.datasource.vdb.weaviate.weaviate_vector import WeaviateConfig, WeaviateVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return WeaviateVector(
collection_name=collection_name,
config=WeaviateConfig(
endpoint=config.get('WEAVIATE_ENDPOINT'),
api_key=config.get('WEAVIATE_API_KEY'),
batch_size=int(config.get('WEAVIATE_BATCH_SIZE'))
),
attributes=self._attributes
)
elif vector_type == "qdrant":
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig, QdrantVector
if self._dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == self._dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
if self._dataset.index_struct_dict:
class_prefix: str = self.dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
if not self._dataset.index_struct_dict:
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return QdrantVector(
collection_name=collection_name,
group_id=self._dataset.id,
config=QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT')
)
)
elif vector_type == "milvus":
from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig, MilvusVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return MilvusVector(
collection_name=collection_name,
config=MilvusConfig(
host=config.get('MILVUS_HOST'),
port=config.get('MILVUS_PORT'),
user=config.get('MILVUS_USER'),
password=config.get('MILVUS_PASSWORD'),
secure=config.get('MILVUS_SECURE'),
)
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list = None, **kwargs):
if texts:
embeddings = self._embeddings.embed_documents([document.page_content for document in texts])
self._vector_processor.create(
texts=texts,
embeddings=embeddings,
**kwargs
)
def add_texts(self, documents: list[Document], **kwargs):
if kwargs.get('duplicate_check', False):
documents = self._filter_duplicate_texts(documents)
embeddings = self._embeddings.embed_documents([document.page_content for document in documents])
self._vector_processor.add_texts(
documents=documents,
embeddings=embeddings,
**kwargs
)
def text_exists(self, id: str) -> bool:
return self._vector_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._vector_processor.delete_by_ids(ids)
def delete_by_metadata_field(self, key: str, value: str) -> None:
self._vector_processor.delete_by_metadata_field(key, value)
def search_by_vector(
self, query: str,
**kwargs: Any
) -> list[Document]:
query_vector = self._embeddings.embed_query(query)
return self._vector_processor.search_by_vector(query_vector, **kwargs)
def search_by_full_text(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._vector_processor.search_by_full_text(query, **kwargs)
def delete(self) -> None:
self._vector_processor.delete()
def _get_embeddings(self) -> Embeddings:
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=self._dataset.tenant_id,
provider=self._dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=self._dataset.embedding_model
)
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def __getattr__(self, name):
if self._vector_processor is not None:
method = getattr(self._vector_processor, name)
if callable(method):
return method
raise AttributeError(f"'vector_processor' object has no attribute '{name}'")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class DatasetCollectionBindingService:
def get_dataset_collection_binding(cls, provider_name: str, model_name: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.provider_name == provider_name,
DatasetCollectionBinding.model_name == model_name,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
if not dataset_collection_binding:
dataset_collection_binding = DatasetCollectionBinding(
provider_name=provider_name,
model_name=model_name,
collection_name=Dataset.gen_collection_name_by_id(str(uuid.uuid4())),
type=collection_type
)
db.session.add(dataset_collection_binding)
db.session.commit()
return dataset_collection_binding
def get_dataset_collection_binding_by_id_and_type(cls, collection_binding_id: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == collection_binding_id,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
return dataset_collection_binding
The provided code snippet includes necessary dependencies for implementing the `add_annotation_to_index_task` function. Write a Python function `def add_annotation_to_index_task(annotation_id: str, question: str, tenant_id: str, app_id: str, collection_binding_id: str)` to solve the following problem:
Add annotation to index. :param annotation_id: annotation id :param question: question :param tenant_id: tenant id :param app_id: app id :param collection_binding_id: embedding binding id Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
Here is the function:
def add_annotation_to_index_task(annotation_id: str, question: str, tenant_id: str, app_id: str,
collection_binding_id: str):
"""
Add annotation to index.
:param annotation_id: annotation id
:param question: question
:param tenant_id: tenant id
:param app_id: app id
:param collection_binding_id: embedding binding id
Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
"""
logging.info(click.style('Start build index for annotation: {}'.format(annotation_id), fg='green'))
start_at = time.perf_counter()
try:
dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(
collection_binding_id,
'annotation'
)
dataset = Dataset(
id=app_id,
tenant_id=tenant_id,
indexing_technique='high_quality',
embedding_model_provider=dataset_collection_binding.provider_name,
embedding_model=dataset_collection_binding.model_name,
collection_binding_id=dataset_collection_binding.id
)
document = Document(
page_content=question,
metadata={
"annotation_id": annotation_id,
"app_id": app_id,
"doc_id": annotation_id
}
)
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
vector.create([document], duplicate_check=True)
end_at = time.perf_counter()
logging.info(
click.style(
'Build index successful for annotation: {} latency: {}'.format(annotation_id, end_at - start_at),
fg='green'))
except Exception:
logging.exception("Build index for annotation failed") | Add annotation to index. :param annotation_id: annotation id :param question: question :param tenant_id: tenant id :param app_id: app id :param collection_binding_id: embedding binding id Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct) |
17,038 | import datetime
import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Dataset
from models.model import App, AppAnnotationSetting, MessageAnnotation
from services.dataset_service import DatasetCollectionBindingService
class Vector:
def __init__(self, dataset: Dataset, attributes: list = None):
if attributes is None:
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
self._vector_processor = self._init_vector()
def _init_vector(self) -> BaseVector:
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if self._dataset.index_struct_dict:
vector_type = self._dataset.index_struct_dict['type']
if not vector_type:
raise ValueError("Vector store must be specified.")
if vector_type == "weaviate":
from core.rag.datasource.vdb.weaviate.weaviate_vector import WeaviateConfig, WeaviateVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return WeaviateVector(
collection_name=collection_name,
config=WeaviateConfig(
endpoint=config.get('WEAVIATE_ENDPOINT'),
api_key=config.get('WEAVIATE_API_KEY'),
batch_size=int(config.get('WEAVIATE_BATCH_SIZE'))
),
attributes=self._attributes
)
elif vector_type == "qdrant":
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig, QdrantVector
if self._dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == self._dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
if self._dataset.index_struct_dict:
class_prefix: str = self.dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
if not self._dataset.index_struct_dict:
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return QdrantVector(
collection_name=collection_name,
group_id=self._dataset.id,
config=QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT')
)
)
elif vector_type == "milvus":
from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig, MilvusVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return MilvusVector(
collection_name=collection_name,
config=MilvusConfig(
host=config.get('MILVUS_HOST'),
port=config.get('MILVUS_PORT'),
user=config.get('MILVUS_USER'),
password=config.get('MILVUS_PASSWORD'),
secure=config.get('MILVUS_SECURE'),
)
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list = None, **kwargs):
if texts:
embeddings = self._embeddings.embed_documents([document.page_content for document in texts])
self._vector_processor.create(
texts=texts,
embeddings=embeddings,
**kwargs
)
def add_texts(self, documents: list[Document], **kwargs):
if kwargs.get('duplicate_check', False):
documents = self._filter_duplicate_texts(documents)
embeddings = self._embeddings.embed_documents([document.page_content for document in documents])
self._vector_processor.add_texts(
documents=documents,
embeddings=embeddings,
**kwargs
)
def text_exists(self, id: str) -> bool:
return self._vector_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._vector_processor.delete_by_ids(ids)
def delete_by_metadata_field(self, key: str, value: str) -> None:
self._vector_processor.delete_by_metadata_field(key, value)
def search_by_vector(
self, query: str,
**kwargs: Any
) -> list[Document]:
query_vector = self._embeddings.embed_query(query)
return self._vector_processor.search_by_vector(query_vector, **kwargs)
def search_by_full_text(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._vector_processor.search_by_full_text(query, **kwargs)
def delete(self) -> None:
self._vector_processor.delete()
def _get_embeddings(self) -> Embeddings:
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=self._dataset.tenant_id,
provider=self._dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=self._dataset.embedding_model
)
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def __getattr__(self, name):
if self._vector_processor is not None:
method = getattr(self._vector_processor, name)
if callable(method):
return method
raise AttributeError(f"'vector_processor' object has no attribute '{name}'")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
db = SQLAlchemy()
redis_client = redis.Redis()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class App(db.Model):
__tablename__ = 'apps'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_pkey'),
db.Index('app_tenant_id_idx', 'tenant_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
mode = db.Column(db.String(255), nullable=False)
icon = db.Column(db.String(255))
icon_background = db.Column(db.String(255))
app_model_config_id = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False, server_default=db.text("'normal'::character varying"))
enable_site = db.Column(db.Boolean, nullable=False)
enable_api = db.Column(db.Boolean, nullable=False)
api_rpm = db.Column(db.Integer, nullable=False)
api_rph = db.Column(db.Integer, nullable=False)
is_demo = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_universal = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def site(self):
site = db.session.query(Site).filter(Site.app_id == self.id).first()
return site
def app_model_config(self):
app_model_config = db.session.query(AppModelConfig).filter(
AppModelConfig.id == self.app_model_config_id).first()
return app_model_config
def api_base_url(self):
return (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
else request.host_url.rstrip('/')) + '/v1'
def tenant(self):
tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
return tenant
def is_agent(self) -> bool:
app_model_config = self.app_model_config
if not app_model_config:
return False
if not app_model_config.agent_mode:
return False
if self.app_model_config.agent_mode_dict.get('enabled', False) \
and self.app_model_config.agent_mode_dict.get('strategy', '') in ['function_call', 'react']:
return True
return False
def deleted_tools(self) -> list:
# get agent mode tools
app_model_config = self.app_model_config
if not app_model_config:
return []
if not app_model_config.agent_mode:
return []
agent_mode = app_model_config.agent_mode_dict
tools = agent_mode.get('tools', [])
provider_ids = []
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api':
# check if provider id is a uuid string, if not, skip
try:
uuid.UUID(provider_id)
except Exception:
continue
provider_ids.append(provider_id)
if not provider_ids:
return []
api_providers = db.session.execute(
text('SELECT id FROM tool_api_providers WHERE id IN :provider_ids'),
{'provider_ids': tuple(provider_ids)}
).fetchall()
deleted_tools = []
current_api_provider_ids = [str(api_provider.id) for api_provider in api_providers]
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api' and provider_id not in current_api_provider_ids:
deleted_tools.append(tool['tool_name'])
return deleted_tools
class MessageAnnotation(db.Model):
__tablename__ = 'message_annotations'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='message_annotation_pkey'),
db.Index('message_annotation_app_idx', 'app_id'),
db.Index('message_annotation_conversation_idx', 'conversation_id'),
db.Index('message_annotation_message_idx', 'message_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
conversation_id = db.Column(UUID, db.ForeignKey('conversations.id'), nullable=True)
message_id = db.Column(UUID, nullable=True)
question = db.Column(db.Text, nullable=True)
content = db.Column(db.Text, nullable=False)
hit_count = db.Column(db.Integer, nullable=False, server_default=db.text('0'))
account_id = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def account(self):
account = db.session.query(Account).filter(Account.id == self.account_id).first()
return account
def annotation_create_account(self):
account = db.session.query(Account).filter(Account.id == self.account_id).first()
return account
class AppAnnotationSetting(db.Model):
__tablename__ = 'app_annotation_settings'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_annotation_settings_pkey'),
db.Index('app_annotation_settings_app_idx', 'app_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
score_threshold = db.Column(Float, nullable=False, server_default=db.text('0'))
collection_binding_id = db.Column(UUID, nullable=False)
created_user_id = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_user_id = db.Column(UUID, nullable=False)
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def created_account(self):
account = (db.session.query(Account)
.join(AppAnnotationSetting, AppAnnotationSetting.created_user_id == Account.id)
.filter(AppAnnotationSetting.id == self.annotation_id).first())
return account
def updated_account(self):
account = (db.session.query(Account)
.join(AppAnnotationSetting, AppAnnotationSetting.updated_user_id == Account.id)
.filter(AppAnnotationSetting.id == self.annotation_id).first())
return account
def collection_binding_detail(self):
from .dataset import DatasetCollectionBinding
collection_binding_detail = (db.session.query(DatasetCollectionBinding)
.filter(DatasetCollectionBinding.id == self.collection_binding_id).first())
return collection_binding_detail
class DatasetCollectionBindingService:
def get_dataset_collection_binding(cls, provider_name: str, model_name: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.provider_name == provider_name,
DatasetCollectionBinding.model_name == model_name,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
if not dataset_collection_binding:
dataset_collection_binding = DatasetCollectionBinding(
provider_name=provider_name,
model_name=model_name,
collection_name=Dataset.gen_collection_name_by_id(str(uuid.uuid4())),
type=collection_type
)
db.session.add(dataset_collection_binding)
db.session.commit()
return dataset_collection_binding
def get_dataset_collection_binding_by_id_and_type(cls, collection_binding_id: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == collection_binding_id,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
return dataset_collection_binding
The provided code snippet includes necessary dependencies for implementing the `enable_annotation_reply_task` function. Write a Python function `def enable_annotation_reply_task(job_id: str, app_id: str, user_id: str, tenant_id: str, score_threshold: float, embedding_provider_name: str, embedding_model_name: str)` to solve the following problem:
Async enable annotation reply task
Here is the function:
def enable_annotation_reply_task(job_id: str, app_id: str, user_id: str, tenant_id: str, score_threshold: float,
embedding_provider_name: str, embedding_model_name: str):
"""
Async enable annotation reply task
"""
logging.info(click.style('Start add app annotation to index: {}'.format(app_id), fg='green'))
start_at = time.perf_counter()
# get app info
app = db.session.query(App).filter(
App.id == app_id,
App.tenant_id == tenant_id,
App.status == 'normal'
).first()
if not app:
raise NotFound("App not found")
annotations = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app_id).all()
enable_app_annotation_key = 'enable_app_annotation_{}'.format(str(app_id))
enable_app_annotation_job_key = 'enable_app_annotation_job_{}'.format(str(job_id))
try:
documents = []
dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding(
embedding_provider_name,
embedding_model_name,
'annotation'
)
annotation_setting = db.session.query(AppAnnotationSetting).filter(
AppAnnotationSetting.app_id == app_id).first()
if annotation_setting:
annotation_setting.score_threshold = score_threshold
annotation_setting.collection_binding_id = dataset_collection_binding.id
annotation_setting.updated_user_id = user_id
annotation_setting.updated_at = datetime.datetime.utcnow()
db.session.add(annotation_setting)
else:
new_app_annotation_setting = AppAnnotationSetting(
app_id=app_id,
score_threshold=score_threshold,
collection_binding_id=dataset_collection_binding.id,
created_user_id=user_id,
updated_user_id=user_id
)
db.session.add(new_app_annotation_setting)
dataset = Dataset(
id=app_id,
tenant_id=tenant_id,
indexing_technique='high_quality',
embedding_model_provider=embedding_provider_name,
embedding_model=embedding_model_name,
collection_binding_id=dataset_collection_binding.id
)
if annotations:
for annotation in annotations:
document = Document(
page_content=annotation.question,
metadata={
"annotation_id": annotation.id,
"app_id": app_id,
"doc_id": annotation.id
}
)
documents.append(document)
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
try:
vector.delete_by_metadata_field('app_id', app_id)
except Exception as e:
logging.info(
click.style('Delete annotation index error: {}'.format(str(e)),
fg='red'))
vector.add_texts(documents)
db.session.commit()
redis_client.setex(enable_app_annotation_job_key, 600, 'completed')
end_at = time.perf_counter()
logging.info(
click.style('App annotations added to index: {} latency: {}'.format(app_id, end_at - start_at),
fg='green'))
except Exception as e:
logging.exception("Annotation batch created index failed:{}".format(str(e)))
redis_client.setex(enable_app_annotation_job_key, 600, 'error')
enable_app_annotation_error_key = 'enable_app_annotation_error_{}'.format(str(job_id))
redis_client.setex(enable_app_annotation_error_key, 600, str(e))
db.session.rollback()
finally:
redis_client.delete(enable_app_annotation_key) | Async enable annotation reply task |
17,039 | import logging
import time
import click
from celery import shared_task
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document
from models.dataset import Dataset
from services.dataset_service import DatasetCollectionBindingService
class Vector:
def __init__(self, dataset: Dataset, attributes: list = None):
if attributes is None:
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
self._vector_processor = self._init_vector()
def _init_vector(self) -> BaseVector:
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if self._dataset.index_struct_dict:
vector_type = self._dataset.index_struct_dict['type']
if not vector_type:
raise ValueError("Vector store must be specified.")
if vector_type == "weaviate":
from core.rag.datasource.vdb.weaviate.weaviate_vector import WeaviateConfig, WeaviateVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return WeaviateVector(
collection_name=collection_name,
config=WeaviateConfig(
endpoint=config.get('WEAVIATE_ENDPOINT'),
api_key=config.get('WEAVIATE_API_KEY'),
batch_size=int(config.get('WEAVIATE_BATCH_SIZE'))
),
attributes=self._attributes
)
elif vector_type == "qdrant":
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig, QdrantVector
if self._dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == self._dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
if self._dataset.index_struct_dict:
class_prefix: str = self.dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
if not self._dataset.index_struct_dict:
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return QdrantVector(
collection_name=collection_name,
group_id=self._dataset.id,
config=QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT')
)
)
elif vector_type == "milvus":
from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig, MilvusVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return MilvusVector(
collection_name=collection_name,
config=MilvusConfig(
host=config.get('MILVUS_HOST'),
port=config.get('MILVUS_PORT'),
user=config.get('MILVUS_USER'),
password=config.get('MILVUS_PASSWORD'),
secure=config.get('MILVUS_SECURE'),
)
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list = None, **kwargs):
if texts:
embeddings = self._embeddings.embed_documents([document.page_content for document in texts])
self._vector_processor.create(
texts=texts,
embeddings=embeddings,
**kwargs
)
def add_texts(self, documents: list[Document], **kwargs):
if kwargs.get('duplicate_check', False):
documents = self._filter_duplicate_texts(documents)
embeddings = self._embeddings.embed_documents([document.page_content for document in documents])
self._vector_processor.add_texts(
documents=documents,
embeddings=embeddings,
**kwargs
)
def text_exists(self, id: str) -> bool:
return self._vector_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._vector_processor.delete_by_ids(ids)
def delete_by_metadata_field(self, key: str, value: str) -> None:
self._vector_processor.delete_by_metadata_field(key, value)
def search_by_vector(
self, query: str,
**kwargs: Any
) -> list[Document]:
query_vector = self._embeddings.embed_query(query)
return self._vector_processor.search_by_vector(query_vector, **kwargs)
def search_by_full_text(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._vector_processor.search_by_full_text(query, **kwargs)
def delete(self) -> None:
self._vector_processor.delete()
def _get_embeddings(self) -> Embeddings:
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=self._dataset.tenant_id,
provider=self._dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=self._dataset.embedding_model
)
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def __getattr__(self, name):
if self._vector_processor is not None:
method = getattr(self._vector_processor, name)
if callable(method):
return method
raise AttributeError(f"'vector_processor' object has no attribute '{name}'")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class DatasetCollectionBindingService:
def get_dataset_collection_binding(cls, provider_name: str, model_name: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.provider_name == provider_name,
DatasetCollectionBinding.model_name == model_name,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
if not dataset_collection_binding:
dataset_collection_binding = DatasetCollectionBinding(
provider_name=provider_name,
model_name=model_name,
collection_name=Dataset.gen_collection_name_by_id(str(uuid.uuid4())),
type=collection_type
)
db.session.add(dataset_collection_binding)
db.session.commit()
return dataset_collection_binding
def get_dataset_collection_binding_by_id_and_type(cls, collection_binding_id: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == collection_binding_id,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
return dataset_collection_binding
The provided code snippet includes necessary dependencies for implementing the `update_annotation_to_index_task` function. Write a Python function `def update_annotation_to_index_task(annotation_id: str, question: str, tenant_id: str, app_id: str, collection_binding_id: str)` to solve the following problem:
Update annotation to index. :param annotation_id: annotation id :param question: question :param tenant_id: tenant id :param app_id: app id :param collection_binding_id: embedding binding id Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
Here is the function:
def update_annotation_to_index_task(annotation_id: str, question: str, tenant_id: str, app_id: str,
collection_binding_id: str):
"""
Update annotation to index.
:param annotation_id: annotation id
:param question: question
:param tenant_id: tenant id
:param app_id: app id
:param collection_binding_id: embedding binding id
Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
"""
logging.info(click.style('Start update index for annotation: {}'.format(annotation_id), fg='green'))
start_at = time.perf_counter()
try:
dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(
collection_binding_id,
'annotation'
)
dataset = Dataset(
id=app_id,
tenant_id=tenant_id,
indexing_technique='high_quality',
embedding_model_provider=dataset_collection_binding.provider_name,
embedding_model=dataset_collection_binding.model_name,
collection_binding_id=dataset_collection_binding.id
)
document = Document(
page_content=question,
metadata={
"annotation_id": annotation_id,
"app_id": app_id,
"doc_id": annotation_id
}
)
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
vector.delete_by_metadata_field('annotation_id', annotation_id)
vector.add_texts([document])
end_at = time.perf_counter()
logging.info(
click.style(
'Build index successful for annotation: {} latency: {}'.format(annotation_id, end_at - start_at),
fg='green'))
except Exception:
logging.exception("Build index for annotation failed") | Update annotation to index. :param annotation_id: annotation id :param question: question :param tenant_id: tenant id :param app_id: app id :param collection_binding_id: embedding binding id Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct) |
17,040 | import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Dataset
from models.model import App, AppAnnotationSetting, MessageAnnotation
from services.dataset_service import DatasetCollectionBindingService
class Vector:
def __init__(self, dataset: Dataset, attributes: list = None):
if attributes is None:
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
self._vector_processor = self._init_vector()
def _init_vector(self) -> BaseVector:
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if self._dataset.index_struct_dict:
vector_type = self._dataset.index_struct_dict['type']
if not vector_type:
raise ValueError("Vector store must be specified.")
if vector_type == "weaviate":
from core.rag.datasource.vdb.weaviate.weaviate_vector import WeaviateConfig, WeaviateVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return WeaviateVector(
collection_name=collection_name,
config=WeaviateConfig(
endpoint=config.get('WEAVIATE_ENDPOINT'),
api_key=config.get('WEAVIATE_API_KEY'),
batch_size=int(config.get('WEAVIATE_BATCH_SIZE'))
),
attributes=self._attributes
)
elif vector_type == "qdrant":
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig, QdrantVector
if self._dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == self._dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
if self._dataset.index_struct_dict:
class_prefix: str = self.dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
if not self._dataset.index_struct_dict:
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return QdrantVector(
collection_name=collection_name,
group_id=self._dataset.id,
config=QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT')
)
)
elif vector_type == "milvus":
from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig, MilvusVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return MilvusVector(
collection_name=collection_name,
config=MilvusConfig(
host=config.get('MILVUS_HOST'),
port=config.get('MILVUS_PORT'),
user=config.get('MILVUS_USER'),
password=config.get('MILVUS_PASSWORD'),
secure=config.get('MILVUS_SECURE'),
)
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list = None, **kwargs):
if texts:
embeddings = self._embeddings.embed_documents([document.page_content for document in texts])
self._vector_processor.create(
texts=texts,
embeddings=embeddings,
**kwargs
)
def add_texts(self, documents: list[Document], **kwargs):
if kwargs.get('duplicate_check', False):
documents = self._filter_duplicate_texts(documents)
embeddings = self._embeddings.embed_documents([document.page_content for document in documents])
self._vector_processor.add_texts(
documents=documents,
embeddings=embeddings,
**kwargs
)
def text_exists(self, id: str) -> bool:
return self._vector_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._vector_processor.delete_by_ids(ids)
def delete_by_metadata_field(self, key: str, value: str) -> None:
self._vector_processor.delete_by_metadata_field(key, value)
def search_by_vector(
self, query: str,
**kwargs: Any
) -> list[Document]:
query_vector = self._embeddings.embed_query(query)
return self._vector_processor.search_by_vector(query_vector, **kwargs)
def search_by_full_text(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._vector_processor.search_by_full_text(query, **kwargs)
def delete(self) -> None:
self._vector_processor.delete()
def _get_embeddings(self) -> Embeddings:
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=self._dataset.tenant_id,
provider=self._dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=self._dataset.embedding_model
)
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def __getattr__(self, name):
if self._vector_processor is not None:
method = getattr(self._vector_processor, name)
if callable(method):
return method
raise AttributeError(f"'vector_processor' object has no attribute '{name}'")
class Document(BaseModel):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
metadata: Optional[dict] = Field(default_factory=dict)
db = SQLAlchemy()
redis_client = redis.Redis()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class App(db.Model):
__tablename__ = 'apps'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_pkey'),
db.Index('app_tenant_id_idx', 'tenant_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
mode = db.Column(db.String(255), nullable=False)
icon = db.Column(db.String(255))
icon_background = db.Column(db.String(255))
app_model_config_id = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False, server_default=db.text("'normal'::character varying"))
enable_site = db.Column(db.Boolean, nullable=False)
enable_api = db.Column(db.Boolean, nullable=False)
api_rpm = db.Column(db.Integer, nullable=False)
api_rph = db.Column(db.Integer, nullable=False)
is_demo = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_universal = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def site(self):
site = db.session.query(Site).filter(Site.app_id == self.id).first()
return site
def app_model_config(self):
app_model_config = db.session.query(AppModelConfig).filter(
AppModelConfig.id == self.app_model_config_id).first()
return app_model_config
def api_base_url(self):
return (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
else request.host_url.rstrip('/')) + '/v1'
def tenant(self):
tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
return tenant
def is_agent(self) -> bool:
app_model_config = self.app_model_config
if not app_model_config:
return False
if not app_model_config.agent_mode:
return False
if self.app_model_config.agent_mode_dict.get('enabled', False) \
and self.app_model_config.agent_mode_dict.get('strategy', '') in ['function_call', 'react']:
return True
return False
def deleted_tools(self) -> list:
# get agent mode tools
app_model_config = self.app_model_config
if not app_model_config:
return []
if not app_model_config.agent_mode:
return []
agent_mode = app_model_config.agent_mode_dict
tools = agent_mode.get('tools', [])
provider_ids = []
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api':
# check if provider id is a uuid string, if not, skip
try:
uuid.UUID(provider_id)
except Exception:
continue
provider_ids.append(provider_id)
if not provider_ids:
return []
api_providers = db.session.execute(
text('SELECT id FROM tool_api_providers WHERE id IN :provider_ids'),
{'provider_ids': tuple(provider_ids)}
).fetchall()
deleted_tools = []
current_api_provider_ids = [str(api_provider.id) for api_provider in api_providers]
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api' and provider_id not in current_api_provider_ids:
deleted_tools.append(tool['tool_name'])
return deleted_tools
class MessageAnnotation(db.Model):
__tablename__ = 'message_annotations'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='message_annotation_pkey'),
db.Index('message_annotation_app_idx', 'app_id'),
db.Index('message_annotation_conversation_idx', 'conversation_id'),
db.Index('message_annotation_message_idx', 'message_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
conversation_id = db.Column(UUID, db.ForeignKey('conversations.id'), nullable=True)
message_id = db.Column(UUID, nullable=True)
question = db.Column(db.Text, nullable=True)
content = db.Column(db.Text, nullable=False)
hit_count = db.Column(db.Integer, nullable=False, server_default=db.text('0'))
account_id = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def account(self):
account = db.session.query(Account).filter(Account.id == self.account_id).first()
return account
def annotation_create_account(self):
account = db.session.query(Account).filter(Account.id == self.account_id).first()
return account
class AppAnnotationSetting(db.Model):
__tablename__ = 'app_annotation_settings'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_annotation_settings_pkey'),
db.Index('app_annotation_settings_app_idx', 'app_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
score_threshold = db.Column(Float, nullable=False, server_default=db.text('0'))
collection_binding_id = db.Column(UUID, nullable=False)
created_user_id = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_user_id = db.Column(UUID, nullable=False)
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def created_account(self):
account = (db.session.query(Account)
.join(AppAnnotationSetting, AppAnnotationSetting.created_user_id == Account.id)
.filter(AppAnnotationSetting.id == self.annotation_id).first())
return account
def updated_account(self):
account = (db.session.query(Account)
.join(AppAnnotationSetting, AppAnnotationSetting.updated_user_id == Account.id)
.filter(AppAnnotationSetting.id == self.annotation_id).first())
return account
def collection_binding_detail(self):
from .dataset import DatasetCollectionBinding
collection_binding_detail = (db.session.query(DatasetCollectionBinding)
.filter(DatasetCollectionBinding.id == self.collection_binding_id).first())
return collection_binding_detail
class DatasetCollectionBindingService:
def get_dataset_collection_binding(cls, provider_name: str, model_name: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.provider_name == provider_name,
DatasetCollectionBinding.model_name == model_name,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
if not dataset_collection_binding:
dataset_collection_binding = DatasetCollectionBinding(
provider_name=provider_name,
model_name=model_name,
collection_name=Dataset.gen_collection_name_by_id(str(uuid.uuid4())),
type=collection_type
)
db.session.add(dataset_collection_binding)
db.session.commit()
return dataset_collection_binding
def get_dataset_collection_binding_by_id_and_type(cls, collection_binding_id: str,
collection_type: str = 'dataset') -> DatasetCollectionBinding:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == collection_binding_id,
DatasetCollectionBinding.type == collection_type). \
order_by(DatasetCollectionBinding.created_at). \
first()
return dataset_collection_binding
The provided code snippet includes necessary dependencies for implementing the `batch_import_annotations_task` function. Write a Python function `def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id: str, tenant_id: str, user_id: str)` to solve the following problem:
Add annotation to index. :param job_id: job_id :param content_list: content list :param tenant_id: tenant id :param app_id: app id :param user_id: user_id
Here is the function:
def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id: str, tenant_id: str,
user_id: str):
"""
Add annotation to index.
:param job_id: job_id
:param content_list: content list
:param tenant_id: tenant id
:param app_id: app id
:param user_id: user_id
"""
logging.info(click.style('Start batch import annotation: {}'.format(job_id), fg='green'))
start_at = time.perf_counter()
indexing_cache_key = 'app_annotation_batch_import_{}'.format(str(job_id))
# get app info
app = db.session.query(App).filter(
App.id == app_id,
App.tenant_id == tenant_id,
App.status == 'normal'
).first()
if app:
try:
documents = []
for content in content_list:
annotation = MessageAnnotation(
app_id=app.id,
content=content['answer'],
question=content['question'],
account_id=user_id
)
db.session.add(annotation)
db.session.flush()
document = Document(
page_content=content['question'],
metadata={
"annotation_id": annotation.id,
"app_id": app_id,
"doc_id": annotation.id
}
)
documents.append(document)
# if annotation reply is enabled , batch add annotations' index
app_annotation_setting = db.session.query(AppAnnotationSetting).filter(
AppAnnotationSetting.app_id == app_id
).first()
if app_annotation_setting:
dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(
app_annotation_setting.collection_binding_id,
'annotation'
)
if not dataset_collection_binding:
raise NotFound("App annotation setting not found")
dataset = Dataset(
id=app_id,
tenant_id=tenant_id,
indexing_technique='high_quality',
embedding_model_provider=dataset_collection_binding.provider_name,
embedding_model=dataset_collection_binding.model_name,
collection_binding_id=dataset_collection_binding.id
)
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
vector.create(documents, duplicate_check=True)
db.session.commit()
redis_client.setex(indexing_cache_key, 600, 'completed')
end_at = time.perf_counter()
logging.info(
click.style(
'Build index successful for batch import annotation: {} latency: {}'.format(job_id, end_at - start_at),
fg='green'))
except Exception as e:
db.session.rollback()
redis_client.setex(indexing_cache_key, 600, 'error')
indexing_error_msg_key = 'app_annotation_batch_import_error_msg_{}'.format(str(job_id))
redis_client.setex(indexing_error_msg_key, 600, str(e))
logging.exception("Build index for batch import annotations failed") | Add annotation to index. :param job_id: job_id :param content_list: content list :param tenant_id: tenant id :param app_id: app id :param user_id: user_id |
17,041 | import logging
import time
import click
from celery import shared_task
from werkzeug.exceptions import NotFound
from core.rag.datasource.vdb.vector_factory import Vector
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from models.dataset import Dataset
from models.model import App, AppAnnotationSetting
class Vector:
def __init__(self, dataset: Dataset, attributes: list = None):
if attributes is None:
attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
self._dataset = dataset
self._embeddings = self._get_embeddings()
self._attributes = attributes
self._vector_processor = self._init_vector()
def _init_vector(self) -> BaseVector:
config = current_app.config
vector_type = config.get('VECTOR_STORE')
if self._dataset.index_struct_dict:
vector_type = self._dataset.index_struct_dict['type']
if not vector_type:
raise ValueError("Vector store must be specified.")
if vector_type == "weaviate":
from core.rag.datasource.vdb.weaviate.weaviate_vector import WeaviateConfig, WeaviateVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'weaviate',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return WeaviateVector(
collection_name=collection_name,
config=WeaviateConfig(
endpoint=config.get('WEAVIATE_ENDPOINT'),
api_key=config.get('WEAVIATE_API_KEY'),
batch_size=int(config.get('WEAVIATE_BATCH_SIZE'))
),
attributes=self._attributes
)
elif vector_type == "qdrant":
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig, QdrantVector
if self._dataset.collection_binding_id:
dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
filter(DatasetCollectionBinding.id == self._dataset.collection_binding_id). \
one_or_none()
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
raise ValueError('Dataset Collection Bindings is not exist!')
else:
if self._dataset.index_struct_dict:
class_prefix: str = self.dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
if not self._dataset.index_struct_dict:
index_struct_dict = {
"type": 'qdrant',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return QdrantVector(
collection_name=collection_name,
group_id=self._dataset.id,
config=QdrantConfig(
endpoint=config.get('QDRANT_URL'),
api_key=config.get('QDRANT_API_KEY'),
root_path=current_app.root_path,
timeout=config.get('QDRANT_CLIENT_TIMEOUT')
)
)
elif vector_type == "milvus":
from core.rag.datasource.vdb.milvus.milvus_vector import MilvusConfig, MilvusVector
if self._dataset.index_struct_dict:
class_prefix: str = self._dataset.index_struct_dict['vector_store']['class_prefix']
collection_name = class_prefix
else:
dataset_id = self._dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": 'milvus',
"vector_store": {"class_prefix": collection_name}
}
self._dataset.index_struct = json.dumps(index_struct_dict)
return MilvusVector(
collection_name=collection_name,
config=MilvusConfig(
host=config.get('MILVUS_HOST'),
port=config.get('MILVUS_PORT'),
user=config.get('MILVUS_USER'),
password=config.get('MILVUS_PASSWORD'),
secure=config.get('MILVUS_SECURE'),
)
)
else:
raise ValueError(f"Vector store {config.get('VECTOR_STORE')} is not supported.")
def create(self, texts: list = None, **kwargs):
if texts:
embeddings = self._embeddings.embed_documents([document.page_content for document in texts])
self._vector_processor.create(
texts=texts,
embeddings=embeddings,
**kwargs
)
def add_texts(self, documents: list[Document], **kwargs):
if kwargs.get('duplicate_check', False):
documents = self._filter_duplicate_texts(documents)
embeddings = self._embeddings.embed_documents([document.page_content for document in documents])
self._vector_processor.add_texts(
documents=documents,
embeddings=embeddings,
**kwargs
)
def text_exists(self, id: str) -> bool:
return self._vector_processor.text_exists(id)
def delete_by_ids(self, ids: list[str]) -> None:
self._vector_processor.delete_by_ids(ids)
def delete_by_metadata_field(self, key: str, value: str) -> None:
self._vector_processor.delete_by_metadata_field(key, value)
def search_by_vector(
self, query: str,
**kwargs: Any
) -> list[Document]:
query_vector = self._embeddings.embed_query(query)
return self._vector_processor.search_by_vector(query_vector, **kwargs)
def search_by_full_text(
self, query: str,
**kwargs: Any
) -> list[Document]:
return self._vector_processor.search_by_full_text(query, **kwargs)
def delete(self) -> None:
self._vector_processor.delete()
def _get_embeddings(self) -> Embeddings:
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=self._dataset.tenant_id,
provider=self._dataset.embedding_model_provider,
model_type=ModelType.TEXT_EMBEDDING,
model=self._dataset.embedding_model
)
return CacheEmbedding(embedding_model)
def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]:
for text in texts:
doc_id = text.metadata['doc_id']
exists_duplicate_node = self.text_exists(doc_id)
if exists_duplicate_node:
texts.remove(text)
return texts
def __getattr__(self, name):
if self._vector_processor is not None:
method = getattr(self._vector_processor, name)
if callable(method):
return method
raise AttributeError(f"'vector_processor' object has no attribute '{name}'")
db = SQLAlchemy()
redis_client = redis.Redis()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class App(db.Model):
__tablename__ = 'apps'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_pkey'),
db.Index('app_tenant_id_idx', 'tenant_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
mode = db.Column(db.String(255), nullable=False)
icon = db.Column(db.String(255))
icon_background = db.Column(db.String(255))
app_model_config_id = db.Column(UUID, nullable=True)
status = db.Column(db.String(255), nullable=False, server_default=db.text("'normal'::character varying"))
enable_site = db.Column(db.Boolean, nullable=False)
enable_api = db.Column(db.Boolean, nullable=False)
api_rpm = db.Column(db.Integer, nullable=False)
api_rph = db.Column(db.Integer, nullable=False)
is_demo = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
is_universal = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def site(self):
site = db.session.query(Site).filter(Site.app_id == self.id).first()
return site
def app_model_config(self):
app_model_config = db.session.query(AppModelConfig).filter(
AppModelConfig.id == self.app_model_config_id).first()
return app_model_config
def api_base_url(self):
return (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
else request.host_url.rstrip('/')) + '/v1'
def tenant(self):
tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first()
return tenant
def is_agent(self) -> bool:
app_model_config = self.app_model_config
if not app_model_config:
return False
if not app_model_config.agent_mode:
return False
if self.app_model_config.agent_mode_dict.get('enabled', False) \
and self.app_model_config.agent_mode_dict.get('strategy', '') in ['function_call', 'react']:
return True
return False
def deleted_tools(self) -> list:
# get agent mode tools
app_model_config = self.app_model_config
if not app_model_config:
return []
if not app_model_config.agent_mode:
return []
agent_mode = app_model_config.agent_mode_dict
tools = agent_mode.get('tools', [])
provider_ids = []
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api':
# check if provider id is a uuid string, if not, skip
try:
uuid.UUID(provider_id)
except Exception:
continue
provider_ids.append(provider_id)
if not provider_ids:
return []
api_providers = db.session.execute(
text('SELECT id FROM tool_api_providers WHERE id IN :provider_ids'),
{'provider_ids': tuple(provider_ids)}
).fetchall()
deleted_tools = []
current_api_provider_ids = [str(api_provider.id) for api_provider in api_providers]
for tool in tools:
keys = list(tool.keys())
if len(keys) >= 4:
provider_type = tool.get('provider_type', '')
provider_id = tool.get('provider_id', '')
if provider_type == 'api' and provider_id not in current_api_provider_ids:
deleted_tools.append(tool['tool_name'])
return deleted_tools
class AppAnnotationSetting(db.Model):
__tablename__ = 'app_annotation_settings'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='app_annotation_settings_pkey'),
db.Index('app_annotation_settings_app_idx', 'app_id')
)
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
app_id = db.Column(UUID, nullable=False)
score_threshold = db.Column(Float, nullable=False, server_default=db.text('0'))
collection_binding_id = db.Column(UUID, nullable=False)
created_user_id = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_user_id = db.Column(UUID, nullable=False)
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
def created_account(self):
account = (db.session.query(Account)
.join(AppAnnotationSetting, AppAnnotationSetting.created_user_id == Account.id)
.filter(AppAnnotationSetting.id == self.annotation_id).first())
return account
def updated_account(self):
account = (db.session.query(Account)
.join(AppAnnotationSetting, AppAnnotationSetting.updated_user_id == Account.id)
.filter(AppAnnotationSetting.id == self.annotation_id).first())
return account
def collection_binding_detail(self):
from .dataset import DatasetCollectionBinding
collection_binding_detail = (db.session.query(DatasetCollectionBinding)
.filter(DatasetCollectionBinding.id == self.collection_binding_id).first())
return collection_binding_detail
The provided code snippet includes necessary dependencies for implementing the `disable_annotation_reply_task` function. Write a Python function `def disable_annotation_reply_task(job_id: str, app_id: str, tenant_id: str)` to solve the following problem:
Async enable annotation reply task
Here is the function:
def disable_annotation_reply_task(job_id: str, app_id: str, tenant_id: str):
"""
Async enable annotation reply task
"""
logging.info(click.style('Start delete app annotations index: {}'.format(app_id), fg='green'))
start_at = time.perf_counter()
# get app info
app = db.session.query(App).filter(
App.id == app_id,
App.tenant_id == tenant_id,
App.status == 'normal'
).first()
if not app:
raise NotFound("App not found")
app_annotation_setting = db.session.query(AppAnnotationSetting).filter(
AppAnnotationSetting.app_id == app_id
).first()
if not app_annotation_setting:
raise NotFound("App annotation setting not found")
disable_app_annotation_key = 'disable_app_annotation_{}'.format(str(app_id))
disable_app_annotation_job_key = 'disable_app_annotation_job_{}'.format(str(job_id))
try:
dataset = Dataset(
id=app_id,
tenant_id=tenant_id,
indexing_technique='high_quality',
collection_binding_id=app_annotation_setting.collection_binding_id
)
try:
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
vector.delete_by_metadata_field('app_id', app_id)
except Exception:
logging.exception("Delete annotation index failed when annotation deleted.")
redis_client.setex(disable_app_annotation_job_key, 600, 'completed')
# delete annotation setting
db.session.delete(app_annotation_setting)
db.session.commit()
end_at = time.perf_counter()
logging.info(
click.style('App annotations index deleted : {} latency: {}'.format(app_id, end_at - start_at),
fg='green'))
except Exception as e:
logging.exception("Annotation batch deleted index failed:{}".format(str(e)))
redis_client.setex(disable_app_annotation_job_key, 600, 'error')
disable_app_annotation_error_key = 'disable_app_annotation_error_{}'.format(str(job_id))
redis_client.setex(disable_app_annotation_error_key, 600, str(e))
finally:
redis_client.delete(disable_app_annotation_key) | Async enable annotation reply task |
17,042 | import datetime
import time
import click
from flask import current_app
from werkzeug.exceptions import NotFound
import app
from extensions.ext_database import db
from models.dataset import Embedding
db = SQLAlchemy()
class Embedding(db.Model):
def set_embedding(self, embedding_data: list[float]):
def get_embedding(self) -> list[float]:
def clean_embedding_cache_task():
click.echo(click.style('Start clean embedding cache.', fg='green'))
clean_days = int(current_app.config.get('CLEAN_DAY_SETTING'))
start_at = time.perf_counter()
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days)
page = 1
while True:
try:
embeddings = db.session.query(Embedding).filter(Embedding.created_at < thirty_days_ago) \
.order_by(Embedding.created_at.desc()).paginate(page=page, per_page=100)
except NotFound:
break
for embedding in embeddings:
db.session.delete(embedding)
db.session.commit()
page += 1
end_at = time.perf_counter()
click.echo(click.style('Cleaned embedding cache from db success latency: {}'.format(end_at - start_at), fg='green')) | null |
17,043 | import datetime
import time
import click
from flask import current_app
from werkzeug.exceptions import NotFound
import app
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db
from models.dataset import Dataset, DatasetQuery, Document
class IndexProcessorFactory:
"""IndexProcessorInit.
"""
def __init__(self, index_type: str):
self._index_type = index_type
def init_index_processor(self) -> BaseIndexProcessor:
"""Init index processor."""
if not self._index_type:
raise ValueError("Index type must be specified.")
if self._index_type == IndexType.PARAGRAPH_INDEX.value:
return ParagraphIndexProcessor()
elif self._index_type == IndexType.QA_INDEX.value:
return QAIndexProcessor()
else:
raise ValueError(f"Index type {self._index_type} is not supported.")
db = SQLAlchemy()
class Dataset(db.Model):
__tablename__ = 'datasets'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_pkey'),
db.Index('dataset_tenant_idx', 'tenant_id'),
db.Index('retrieval_model_idx', "retrieval_model", postgresql_using='gin')
)
INDEXING_TECHNIQUE_LIST = ['high_quality', 'economy', None]
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
name = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
provider = db.Column(db.String(255), nullable=False,
server_default=db.text("'vendor'::character varying"))
permission = db.Column(db.String(255), nullable=False,
server_default=db.text("'only_me'::character varying"))
data_source_type = db.Column(db.String(255))
indexing_technique = db.Column(db.String(255), nullable=True)
index_struct = db.Column(db.Text, nullable=True)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_by = db.Column(UUID, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
embedding_model = db.Column(db.String(255), nullable=True)
embedding_model_provider = db.Column(db.String(255), nullable=True)
collection_binding_id = db.Column(UUID, nullable=True)
retrieval_model = db.Column(JSONB, nullable=True)
def dataset_keyword_table(self):
dataset_keyword_table = db.session.query(DatasetKeywordTable).filter(
DatasetKeywordTable.dataset_id == self.id).first()
if dataset_keyword_table:
return dataset_keyword_table
return None
def index_struct_dict(self):
return json.loads(self.index_struct) if self.index_struct else None
def created_by_account(self):
return Account.query.get(self.created_by)
def latest_process_rule(self):
return DatasetProcessRule.query.filter(DatasetProcessRule.dataset_id == self.id) \
.order_by(DatasetProcessRule.created_at.desc()).first()
def app_count(self):
return db.session.query(func.count(AppDatasetJoin.id)).filter(AppDatasetJoin.dataset_id == self.id).scalar()
def document_count(self):
return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()
def available_document_count(self):
return db.session.query(func.count(Document.id)).filter(
Document.dataset_id == self.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False
).scalar()
def available_segment_count(self):
return db.session.query(func.count(DocumentSegment.id)).filter(
DocumentSegment.dataset_id == self.id,
DocumentSegment.status == 'completed',
DocumentSegment.enabled == True
).scalar()
def word_count(self):
return Document.query.with_entities(func.coalesce(func.sum(Document.word_count))) \
.filter(Document.dataset_id == self.id).scalar()
def doc_form(self):
document = db.session.query(Document).filter(
Document.dataset_id == self.id).first()
if document:
return document.doc_form
return None
def retrieval_model_dict(self):
default_retrieval_model = {
'search_method': 'semantic_search',
'reranking_enable': False,
'reranking_model': {
'reranking_provider_name': '',
'reranking_model_name': ''
},
'top_k': 2,
'score_threshold_enabled': False
}
return self.retrieval_model if self.retrieval_model else default_retrieval_model
def gen_collection_name_by_id(dataset_id: str) -> str:
normalized_dataset_id = dataset_id.replace("-", "_")
return f'Vector_index_{normalized_dataset_id}_Node'
class Document(db.Model):
__tablename__ = 'documents'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='document_pkey'),
db.Index('document_dataset_id_idx', 'dataset_id'),
db.Index('document_is_paused_idx', 'is_paused'),
)
# initial fields
id = db.Column(UUID, nullable=False,
server_default=db.text('uuid_generate_v4()'))
tenant_id = db.Column(UUID, nullable=False)
dataset_id = db.Column(UUID, nullable=False)
position = db.Column(db.Integer, nullable=False)
data_source_type = db.Column(db.String(255), nullable=False)
data_source_info = db.Column(db.Text, nullable=True)
dataset_process_rule_id = db.Column(UUID, nullable=True)
batch = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
created_from = db.Column(db.String(255), nullable=False)
created_by = db.Column(UUID, nullable=False)
created_api_request_id = db.Column(UUID, nullable=True)
created_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
# start processing
processing_started_at = db.Column(db.DateTime, nullable=True)
# parsing
file_id = db.Column(db.Text, nullable=True)
word_count = db.Column(db.Integer, nullable=True)
parsing_completed_at = db.Column(db.DateTime, nullable=True)
# cleaning
cleaning_completed_at = db.Column(db.DateTime, nullable=True)
# split
splitting_completed_at = db.Column(db.DateTime, nullable=True)
# indexing
tokens = db.Column(db.Integer, nullable=True)
indexing_latency = db.Column(db.Float, nullable=True)
completed_at = db.Column(db.DateTime, nullable=True)
# pause
is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text('false'))
paused_by = db.Column(UUID, nullable=True)
paused_at = db.Column(db.DateTime, nullable=True)
# error
error = db.Column(db.Text, nullable=True)
stopped_at = db.Column(db.DateTime, nullable=True)
# basic fields
indexing_status = db.Column(db.String(
255), nullable=False, server_default=db.text("'waiting'::character varying"))
enabled = db.Column(db.Boolean, nullable=False,
server_default=db.text('true'))
disabled_at = db.Column(db.DateTime, nullable=True)
disabled_by = db.Column(UUID, nullable=True)
archived = db.Column(db.Boolean, nullable=False,
server_default=db.text('false'))
archived_reason = db.Column(db.String(255), nullable=True)
archived_by = db.Column(UUID, nullable=True)
archived_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=False,
server_default=db.text('CURRENT_TIMESTAMP(0)'))
doc_type = db.Column(db.String(40), nullable=True)
doc_metadata = db.Column(db.JSON, nullable=True)
doc_form = db.Column(db.String(
255), nullable=False, server_default=db.text("'text_model'::character varying"))
doc_language = db.Column(db.String(255), nullable=True)
DATA_SOURCES = ['upload_file', 'notion_import']
def display_status(self):
status = None
if self.indexing_status == 'waiting':
status = 'queuing'
elif self.indexing_status not in ['completed', 'error', 'waiting'] and self.is_paused:
status = 'paused'
elif self.indexing_status in ['parsing', 'cleaning', 'splitting', 'indexing']:
status = 'indexing'
elif self.indexing_status == 'error':
status = 'error'
elif self.indexing_status == 'completed' and not self.archived and self.enabled:
status = 'available'
elif self.indexing_status == 'completed' and not self.archived and not self.enabled:
status = 'disabled'
elif self.indexing_status == 'completed' and self.archived:
status = 'archived'
return status
def data_source_info_dict(self):
if self.data_source_info:
try:
data_source_info_dict = json.loads(self.data_source_info)
except JSONDecodeError:
data_source_info_dict = {}
return data_source_info_dict
return None
def data_source_detail_dict(self):
if self.data_source_info:
if self.data_source_type == 'upload_file':
data_source_info_dict = json.loads(self.data_source_info)
file_detail = db.session.query(UploadFile). \
filter(UploadFile.id == data_source_info_dict['upload_file_id']). \
one_or_none()
if file_detail:
return {
'upload_file': {
'id': file_detail.id,
'name': file_detail.name,
'size': file_detail.size,
'extension': file_detail.extension,
'mime_type': file_detail.mime_type,
'created_by': file_detail.created_by,
'created_at': file_detail.created_at.timestamp()
}
}
elif self.data_source_type == 'notion_import':
return json.loads(self.data_source_info)
return {}
def average_segment_length(self):
if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0:
return self.word_count // self.segment_count
return 0
def dataset_process_rule(self):
if self.dataset_process_rule_id:
return DatasetProcessRule.query.get(self.dataset_process_rule_id)
return None
def dataset(self):
return db.session.query(Dataset).filter(Dataset.id == self.dataset_id).one_or_none()
def segment_count(self):
return DocumentSegment.query.filter(DocumentSegment.document_id == self.id).count()
def hit_count(self):
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
.filter(DocumentSegment.document_id == self.id).scalar()
class DatasetQuery(db.Model):
__tablename__ = 'dataset_queries'
__table_args__ = (
db.PrimaryKeyConstraint('id', name='dataset_query_pkey'),
db.Index('dataset_query_dataset_id_idx', 'dataset_id'),
)
id = db.Column(UUID, primary_key=True, nullable=False, server_default=db.text('uuid_generate_v4()'))
dataset_id = db.Column(UUID, nullable=False)
content = db.Column(db.Text, nullable=False)
source = db.Column(db.String(255), nullable=False)
source_app_id = db.Column(UUID, nullable=True)
created_by_role = db.Column(db.String, nullable=False)
created_by = db.Column(UUID, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.func.current_timestamp())
def clean_unused_datasets_task():
click.echo(click.style('Start clean unused datasets indexes.', fg='green'))
clean_days = int(current_app.config.get('CLEAN_DAY_SETTING'))
start_at = time.perf_counter()
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days)
page = 1
while True:
try:
datasets = db.session.query(Dataset).filter(Dataset.created_at < thirty_days_ago) \
.order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for dataset in datasets:
dataset_query = db.session.query(DatasetQuery).filter(
DatasetQuery.created_at > thirty_days_ago,
DatasetQuery.dataset_id == dataset.id
).all()
if not dataset_query or len(dataset_query) == 0:
documents = db.session.query(Document).filter(
Document.dataset_id == dataset.id,
Document.indexing_status == 'completed',
Document.enabled == True,
Document.archived == False,
Document.updated_at > thirty_days_ago
).all()
if not documents or len(documents) == 0:
try:
# remove index
index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor()
index_processor.clean(dataset, None)
# update document
update_params = {
Document.enabled: False
}
Document.query.filter_by(dataset_id=dataset.id).update(update_params)
db.session.commit()
click.echo(click.style('Cleaned unused dataset {} from db success!'.format(dataset.id),
fg='green'))
except Exception as e:
click.echo(
click.style('clean dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
end_at = time.perf_counter()
click.echo(click.style('Cleaned unused dataset from db success latency: {}'.format(end_at - start_at), fg='green')) | null |
17,044 | import nbformat
from nbformat import v4 as nbf
import ansi2html
import os
import argparse
nb = nbf.new_notebook()
def write_to_notebook():
if args.notebook:
with open(notebook_path, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
def add_code_cell_to_notebook(code):
code_cell = nbf.new_code_cell(source=code)
nb['cells'].append(code_cell)
write_to_notebook() | null |
17,045 | import nbformat
from nbformat import v4 as nbf
import ansi2html
import os
import argparse
nb = nbf.new_notebook()
def write_to_notebook():
if args.notebook:
with open(notebook_path, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
def add_markdown_to_notebook(content, title=None):
if title:
content = "##### " + title + ":\n" + content
markdown_cell = nbf.new_markdown_cell(content)
nb['cells'].append(markdown_cell)
write_to_notebook() | null |
17,046 | from response_parser import *
import copy
import json
from tqdm import tqdm
import logging
import argparse
import os
def initialization(state_dict: Dict) -> None:
if not os.path.exists('cache'):
os.mkdir('cache')
if state_dict["bot_backend"] is None:
state_dict["bot_backend"] = BotBackend()
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY'] | null |
17,047 | from response_parser import *
import copy
import json
from tqdm import tqdm
import logging
import argparse
import os
def get_bot_backend(state_dict: Dict) -> BotBackend:
def switch_to_gpt4(state_dict: Dict, whether_switch: bool) -> None:
bot_backend = get_bot_backend(state_dict)
if whether_switch:
bot_backend.update_gpt_model_choice("GPT-4")
else:
bot_backend.update_gpt_model_choice("GPT-3.5") | null |
17,048 | from response_parser import *
import copy
import json
from tqdm import tqdm
import logging
import argparse
import os
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
def add_text(state_dict, history, text):
bot_backend = get_bot_backend(state_dict)
bot_backend.add_text_message(user_text=text)
history = history + [[text, None]]
return history, state_dict | null |
17,049 | from response_parser import *
import copy
import json
from tqdm import tqdm
import logging
import argparse
import os
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
def bot(state_dict, history):
bot_backend = get_bot_backend(state_dict)
while bot_backend.finish_reason in ('new_input', 'function_call'):
if history[-1][1]:
history.append([None, ""])
else:
history[-1][1] = ""
logging.info("Start chat completion")
response = chat_completion(bot_backend=bot_backend)
logging.info(f"End chat completion, response: {response}")
logging.info("Start parse response")
history, _ = parse_response(
chunk=response,
history=history,
bot_backend=bot_backend
)
logging.info("End parse response")
return history | null |
17,050 | import json
import copy
import shutil
from jupyter_backend import *
from tools import *
from typing import *
from notebook_serializer import add_markdown_to_notebook, add_code_cell_to_notebook
if not config['API_KEY']:
config['API_KEY'] = os.getenv('OPENAI_API_KEY')
os.unsetenv('OPENAI_API_KEY')
def get_config():
return config | null |
17,051 | import json
import copy
import shutil
from jupyter_backend import *
from tools import *
from typing import *
from notebook_serializer import add_markdown_to_notebook, add_code_cell_to_notebook
def config_openai_api(api_type, api_base, api_version, api_key):
openai.api_type = api_type
openai.api_base = api_base
openai.api_version = api_version
openai.api_key = api_key | null |
17,052 | import openai
import base64
import os
import io
import time
from PIL import Image
from abc import ABCMeta, abstractmethod
def create_vision_chat_completion(vision_model, base64_image, prompt):
try:
response = openai.ChatCompletion.create(
model=vision_model,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
},
},
],
}
],
max_tokens=1000,
)
return response.choices[0].message.content
except:
return None
def image_to_base64(path):
try:
_, suffix = os.path.splitext(path)
if suffix not in {'.jpg', '.jpeg', '.png', '.webp'}:
img = Image.open(path)
img_png = img.convert('RGB')
img_png.tobytes()
byte_buffer = io.BytesIO()
img_png.save(byte_buffer, 'PNG')
encoded_string = base64.b64encode(byte_buffer.getvalue()).decode('utf-8')
else:
with open(path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
return encoded_string
except:
return None
def inquire_image(work_dir, vision_model, path, prompt):
image_base64 = image_to_base64(f'{work_dir}/{path}')
hypertext_to_display = None
if image_base64 is None:
return "Error: Image transform error", None
else:
response = create_vision_chat_completion(vision_model, image_base64, prompt)
if response is None:
return "Model response error", None
else:
return response, hypertext_to_display | null |
17,053 | import openai
import base64
import os
import io
import time
from PIL import Image
from abc import ABCMeta, abstractmethod
def create_image(prompt):
try:
response = openai.Image.create(
model="dall-e-3",
prompt=prompt,
response_format="b64_json"
)
return response.data[0]['b64_json']
except:
return None
def base64_to_image_bytes(image_base64):
try:
return base64.b64decode(image_base64)
except:
return None
def dalle(unique_id, prompt):
img_base64 = create_image(prompt)
text_to_gpt = "Image has been successfully generated and displayed to user."
if img_base64 is None:
return "Error: Model response error", None
img_bytes = base64_to_image_bytes(img_base64)
if img_bytes is None:
return "Error: Image transform error", None
temp_path = f'cache/temp_{unique_id}'
if not os.path.exists(temp_path):
os.mkdir(temp_path)
path = f'{temp_path}/{hash(time.time())}.png'
with open(path, 'wb') as f:
f.write(img_bytes)
hypertext_to_display = f'<img src=\"file={path}\" width="50%" style=\'max-width:none; max-height:none\'>'
return text_to_gpt, hypertext_to_display | null |
17,054 | import openai
import base64
import os
import io
import time
from PIL import Image
from abc import ABCMeta, abstractmethod
class ImageInquireTool(Tool):
def support(self):
return self.config['model']['GPT-4V']['available']
def get_tool_data(self):
return {
"tool_name": "inquire_image",
"tool": inquire_image,
"system_prompt": "If necessary, utilize the 'inquire_image' tool to query an AI model regarding the "
"content of images uploaded by users. Avoid phrases like\"based on the analysis\"; "
"instead, respond as if you viewed the image by yourself. Keep in mind that not every"
"tasks related to images require knowledge of the image content, such as converting "
"an image format or extracting image file attributes, which should use `execute_code` "
"tool instead. Use the tool only when understanding the image content is necessary.",
"tool_description": {
"name": "inquire_image",
"description": "This function enables you to inquire with an AI model about the contents of an image "
"and receive the model's response.",
"parameters": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "File path of the image"
},
"prompt": {
"type": "string",
"description": "The question you want to pose to the AI model about the image"
}
},
"required": ["path", "prompt"]
}
},
"additional_parameters": {
"work_dir": lambda bot_backend: bot_backend.jupyter_work_dir,
"vision_model": self.config['model']['GPT-4V']['model_name']
}
}
def get_available_tools(config):
tools = [ImageInquireTool]
available_tools = []
for tool in tools:
tool_instance = tool(config)
if tool_instance.support():
available_tools.append(tool_instance.get_tool_data())
return available_tools | null |
17,055 | from bot_backend import *
import base64
import time
import tiktoken
from notebook_serializer import add_code_cell_error_to_notebook, add_image_to_notebook, add_code_cell_output_to_notebook
def get_conversation_slice(conversation, model, encoding_for_which_model, min_output_tokens_count=500):
def chat_completion(bot_backend: BotBackend):
model_choice = bot_backend.gpt_model_choice
model_name = bot_backend.config['model'][model_choice]['model_name']
kwargs_for_chat_completion = copy.deepcopy(bot_backend.kwargs_for_chat_completion)
if bot_backend.config['API_TYPE'] == "azure":
kwargs_for_chat_completion['messages'], nb_tokens, sliced = \
get_conversation_slice(
conversation=kwargs_for_chat_completion['messages'],
model=model_name,
encoding_for_which_model='gpt-3.5-turbo' if model_choice == 'GPT-3.5' else 'gpt-4'
)
else:
kwargs_for_chat_completion['messages'], nb_tokens, sliced = \
get_conversation_slice(
conversation=kwargs_for_chat_completion['messages'],
model=model_name,
encoding_for_which_model=model_name
)
bot_backend.update_token_count(num_tokens=nb_tokens)
bot_backend.update_sliced_state(sliced=sliced)
assert config['model'][model_choice]['available'], f"{model_choice} is not available for your API key"
assert model_name in config['model_context_window'], \
f"{model_name} lacks context window information. Please check the config.json file."
response = openai.ChatCompletion.create(**kwargs_for_chat_completion)
return response | null |
17,056 | from bot_backend import *
import base64
import time
import tiktoken
from notebook_serializer import add_code_cell_error_to_notebook, add_image_to_notebook, add_code_cell_output_to_notebook
def get_image_size(image_path):
with Image.open(image_path) as img:
width, height = img.size
return width, height
def add_code_cell_output_to_notebook(output):
html_content = ansi_to_html(output)
cell_output = nbf.new_output(output_type='display_data', data={'text/html': html_content})
nb['cells'][-1]['outputs'].append(cell_output)
write_to_notebook()
def add_code_cell_error_to_notebook(error):
nbf_error_output = nbf.new_output(
output_type='error',
ename='Error',
evalue='Error message',
traceback=[error]
)
nb['cells'][-1]['outputs'].append(nbf_error_output)
write_to_notebook()
def add_image_to_notebook(image, mime_type):
image_output = nbf.new_output(output_type='display_data', data={mime_type: image})
nb['cells'][-1]['outputs'].append(image_output)
write_to_notebook()
def add_code_execution_result_to_bot_history(content_to_display, history, unique_id):
images, text = [], []
# terminal output
error_occurred = False
for mark, out_str in content_to_display:
if mark in ('stdout', 'execute_result_text', 'display_text'):
text.append(out_str)
add_code_cell_output_to_notebook(out_str)
elif mark in ('execute_result_png', 'execute_result_jpeg', 'display_png', 'display_jpeg'):
if 'png' in mark:
images.append(('png', out_str))
add_image_to_notebook(out_str, 'image/png')
else:
add_image_to_notebook(out_str, 'image/jpeg')
images.append(('jpg', out_str))
elif mark == 'error':
# Set output type to error
text.append(delete_color_control_char(out_str))
error_occurred = True
add_code_cell_error_to_notebook(out_str)
text = '\n'.join(text).strip('\n')
if error_occurred:
history.append([None, f'❌Terminal output:\n```shell\n\n{text}\n```'])
else:
history.append([None, f'✔️Terminal output:\n```shell\n{text}\n```'])
# image output
for filetype, img in images:
image_bytes = base64.b64decode(img)
temp_path = f'cache/temp_{unique_id}'
if not os.path.exists(temp_path):
os.mkdir(temp_path)
path = f'{temp_path}/{hash(time.time())}.{filetype}'
with open(path, 'wb') as f:
f.write(image_bytes)
width, height = get_image_size(path)
history.append(
[
None,
f'<img src=\"file={path}\" style=\'{"" if width < 800 else "width: 800px;"} max-width:none; '
f'max-height:none\'> '
]
) | null |
17,057 | from bot_backend import *
import base64
import time
import tiktoken
from notebook_serializer import add_code_cell_error_to_notebook, add_image_to_notebook, add_code_cell_output_to_notebook
def add_function_response_to_bot_history(hypertext_to_display, history):
if hypertext_to_display is not None:
if history[-1][1]:
history.append([None, hypertext_to_display])
else:
history[-1][1] = hypertext_to_display | null |
17,058 | from bot_backend import *
import base64
import time
import tiktoken
from notebook_serializer import add_code_cell_error_to_notebook, add_image_to_notebook, add_code_cell_output_to_notebook
The provided code snippet includes necessary dependencies for implementing the `parse_json` function. Write a Python function `def parse_json(function_args: str, finished: bool)` to solve the following problem:
GPT may generate non-standard JSON format string, which contains '\n' in string value, leading to error when using `json.loads()`. Here we implement a parser to extract code directly from non-standard JSON string. :return: code string if successfully parsed otherwise None
Here is the function:
def parse_json(function_args: str, finished: bool):
"""
GPT may generate non-standard JSON format string, which contains '\n' in string value, leading to error when using
`json.loads()`.
Here we implement a parser to extract code directly from non-standard JSON string.
:return: code string if successfully parsed otherwise None
"""
parser_log = {
'met_begin_{': False,
'begin_"code"': False,
'end_"code"': False,
'met_:': False,
'met_end_}': False,
'met_end_code_"': False,
"code_begin_index": 0,
"code_end_index": 0
}
try:
for index, char in enumerate(function_args):
if char == '{':
parser_log['met_begin_{'] = True
elif parser_log['met_begin_{'] and char == '"':
if parser_log['met_:']:
if finished:
parser_log['code_begin_index'] = index + 1
break
else:
if index + 1 == len(function_args):
return None
else:
temp_code_str = function_args[index + 1:]
if '\n' in temp_code_str:
try:
return json.loads(function_args + '"}')['code']
except json.JSONDecodeError:
try:
return json.loads(function_args + '}')['code']
except json.JSONDecodeError:
try:
return json.loads(function_args)['code']
except json.JSONDecodeError:
if temp_code_str[-1] in ('"', '\n'):
return None
else:
return temp_code_str.strip('\n')
else:
return json.loads(function_args + '"}')['code']
elif parser_log['begin_"code"']:
parser_log['end_"code"'] = True
else:
parser_log['begin_"code"'] = True
elif parser_log['end_"code"'] and char == ':':
parser_log['met_:'] = True
else:
continue
if finished:
for index, char in enumerate(function_args[::-1]):
back_index = -1 - index
if char == '}':
parser_log['met_end_}'] = True
elif parser_log['met_end_}'] and char == '"':
parser_log['code_end_index'] = back_index - 1
break
else:
continue
code_str = function_args[parser_log['code_begin_index']: parser_log['code_end_index'] + 1]
if '\n' in code_str:
return code_str.strip('\n')
else:
return json.loads(function_args)['code']
except Exception as e:
return None | GPT may generate non-standard JSON format string, which contains '\n' in string value, leading to error when using `json.loads()`. Here we implement a parser to extract code directly from non-standard JSON string. :return: code string if successfully parsed otherwise None |
17,059 |
def initialization(state_dict: Dict) -> None:
if not os.path.exists('cache'):
os.mkdir('cache')
if state_dict["bot_backend"] is None:
state_dict["bot_backend"] = BotBackend()
if 'OPENAI_API_KEY' in os.environ:
del os.environ['OPENAI_API_KEY'] | null |
17,060 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def switch_to_gpt4(state_dict: Dict, whether_switch: bool) -> None:
bot_backend = get_bot_backend(state_dict)
if whether_switch:
bot_backend.update_gpt_model_choice("GPT-4")
else:
bot_backend.update_gpt_model_choice("GPT-3.5") | null |
17,061 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def add_text(state_dict: Dict, history: List, text: str) -> Tuple[List, Dict]:
bot_backend = get_bot_backend(state_dict)
bot_backend.add_text_message(user_text=text)
history = history + [(text, None)]
return history, gr.update(value="", interactive=False) | null |
17,062 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def add_file(state_dict: Dict, history: List, files) -> List:
bot_backend = get_bot_backend(state_dict)
for file in files:
path = file.name
filename = os.path.basename(path)
bot_msg = [f'📁[{filename}]', None]
history.append(bot_msg)
bot_backend.add_file_message(path=path, bot_msg=bot_msg)
_, suffix = os.path.splitext(filename)
if suffix in {'.jpg', '.jpeg', '.png', '.bmp', '.webp'}:
copied_file_path = f'{bot_backend.jupyter_work_dir}/{filename}'
width, height = get_image_size(copied_file_path)
bot_msg[0] += \
f'\n<img src=\"file={copied_file_path}\" style=\'{"" if width < 800 else "width: 800px;"} max-width' \
f':none; max-height:none\'> '
return history | null |
17,063 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def undo_upload_file(state_dict: Dict, history: List) -> Tuple[List, Dict]:
bot_backend = get_bot_backend(state_dict)
bot_msg = bot_backend.revoke_file()
if bot_msg is None:
return history, gr.Button.update(interactive=False)
else:
assert history[-1] == bot_msg
del history[-1]
if bot_backend.revocable_files:
return history, gr.Button.update(interactive=True)
else:
return history, gr.Button.update(interactive=False) | null |
17,064 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def refresh_file_display(state_dict: Dict) -> List[str]:
bot_backend = get_bot_backend(state_dict)
work_dir = bot_backend.jupyter_work_dir
filenames = os.listdir(work_dir)
paths = []
for filename in filenames:
path = os.path.join(work_dir, filename)
if not os.path.isdir(path):
paths.append(path)
return paths | null |
17,065 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def refresh_token_count(state_dict: Dict):
bot_backend = get_bot_backend(state_dict)
model_choice = bot_backend.gpt_model_choice
sliced = bot_backend.sliced
token_count = bot_backend.context_window_tokens
token_limit = config['model_context_window'][config['model'][model_choice]['model_name']]
display_text = f"**Context token:** {token_count}/{token_limit}"
if sliced:
display_text += '\\\nToken limit exceeded, conversion has been sliced.'
return gr.Markdown.update(value=display_text) | null |
17,066 |
def restart_ui(history: List) -> Tuple[List, Dict, Dict, Dict, Dict, Dict, Dict]:
history.clear()
return (
history,
gr.Textbox.update(value="", interactive=False),
gr.Button.update(interactive=False),
gr.Button.update(interactive=False),
gr.Button.update(interactive=False),
gr.Button.update(interactive=False),
gr.Button.update(visible=False)
) | null |
17,067 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def restart_bot_backend(state_dict: Dict) -> None:
bot_backend = get_bot_backend(state_dict)
bot_backend.restart() | null |
17,068 | import gradio as gr
from response_parser import *
def get_bot_backend(state_dict: Dict) -> BotBackend:
return state_dict["bot_backend"]
def stop_generating(state_dict: Dict) -> None:
bot_backend = get_bot_backend(state_dict)
if bot_backend.code_executing:
bot_backend.send_interrupt_signal()
else:
bot_backend.update_stop_generating_state(stop_generating=True)
f __name__ == '__main__':
config = get_config()
with gr.Blocks(theme=gr.themes.Base()) as block:
"""
Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
"""
# UI components
state = gr.State(value={"bot_backend": None})
with gr.Tab("Chat"):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
with gr.Row():
with gr.Column(scale=0.85):
text_box = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload a file",
container=False
)
with gr.Column(scale=0.15, min_width=0):
file_upload_button = gr.UploadButton("📁", file_count='multiple', file_types=['file'])
with gr.Row(equal_height=True):
with gr.Column(scale=0.08, min_width=0):
check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
with gr.Column(scale=0.314, min_width=0):
model_token_limit = config['model_context_window'][config['model']['GPT-3.5']['model_name']]
token_count_display_text = f"**Context token:** 0/{model_token_limit}"
token_monitor = gr.Markdown(value=token_count_display_text)
with gr.Column(scale=0.15, min_width=0):
retry_button = gr.Button(value='🔂OpenAI Error, click here to retry', visible=False)
with gr.Column(scale=0.15, min_width=0):
stop_generation_button = gr.Button(value='⏹️ Stop generating', interactive=False)
with gr.Column(scale=0.15, min_width=0):
restart_button = gr.Button(value='🔄 Restart')
with gr.Column(scale=0.15, min_width=0):
undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
with gr.Tab("Files"):
file_output = gr.Files()
# Components function binding
txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False
).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
)
txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
txt_msg.then(fn=refresh_token_count, inputs=[state], outputs=[token_monitor])
retry_button.click(lambda: gr.Button.update(visible=False), None, [retry_button], queue=False).then(
bot, [state, chatbot], [chatbot, stop_generation_button, retry_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
lambda: gr.update(interactive=True), None, [text_box], queue=False
).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]).then(
fn=refresh_token_count, inputs=[state], outputs=[token_monitor]
)
file_msg = file_upload_button.upload(
add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
)
file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
undo_file_button.click(
fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
)
stop_generation_button.click(fn=stop_generating, inputs=[state], queue=False).then(
fn=lambda: gr.Button.update(interactive=False), inputs=None, outputs=[stop_generation_button], queue=False
)
restart_button.click(
fn=restart_ui, inputs=[chatbot],
outputs=[
chatbot, text_box, restart_button, file_upload_button, undo_file_button, stop_generation_button,
retry_button
]
).then(
fn=restart_bot_backend, inputs=[state], queue=False
).then(
fn=refresh_file_display, inputs=[state], outputs=[file_output]
).then(
fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
gr.Button.update(interactive=True)),
inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
).then(
fn=refresh_token_count,
inputs=[state], outputs=[token_monitor]
)
block.load(fn=initialization, inputs=[state])
block.queue()
block.launch(inbrowser=True)
def bot(state_dict: Dict, history: List) -> List:
bot_backend = get_bot_backend(state_dict)
while bot_backend.finish_reason in ('new_input', 'function_call'):
if history[-1][1]:
history.append([None, ""])
else:
history[-1][1] = ""
try:
response = chat_completion(bot_backend=bot_backend)
for chunk in response:
if chunk['choices'] and chunk['choices'][0]['finish_reason'] == 'function_call':
if bot_backend.function_name in bot_backend.jupyter_kernel.available_functions:
yield history, gr.Button.update(value='⏹️ Interrupt execution'), gr.Button.update(visible=False)
else:
yield history, gr.Button.update(interactive=False), gr.Button.update(visible=False)
if bot_backend.stop_generating:
response.close()
if bot_backend.content:
bot_backend.add_gpt_response_content_message()
if bot_backend.display_code_block:
bot_backend.update_display_code_block(
display_code_block="\n⚫Stopped:\n```python\n{}\n```".format(bot_backend.code_str)
)
history = copy.deepcopy(bot_backend.bot_history)
history[-1][1] += bot_backend.display_code_block
bot_backend.add_function_call_response_message(function_response=None)
bot_backend.reset_gpt_response_log_values()
break
history, weather_exit = parse_response(
chunk=chunk,
history=history,
bot_backend=bot_backend
)
yield (
history,
gr.Button.update(
interactive=False if bot_backend.stop_generating else True,
value='⏹️ Stop generating'
),
gr.Button.update(visible=False)
)
if weather_exit:
exit(-1)
except openai.OpenAIError as openai_error:
bot_backend.reset_gpt_response_log_values(exclude=['finish_reason'])
yield history, gr.Button.update(interactive=False), gr.Button.update(visible=True)
raise openai_error
yield history, gr.Button.update(interactive=False, value='⏹️ Stop generating'), gr.Button.update(visible=False) | null |
17,069 | import jupyter_client
import re
def delete_color_control_char(string):
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('', string) | null |
17,070 | from functional import *
class ChoiceHandler:
strategies = [
RoleChoiceStrategy, ContentChoiceStrategy, NameFunctionCallChoiceStrategy,
ArgumentsFunctionCallChoiceStrategy, FinishReasonChoiceStrategy
]
def __init__(self, choice):
self.choice = choice
def handle(self, bot_backend: BotBackend, history: List, whether_exit: bool):
for Strategy in self.strategies:
strategy_instance = Strategy(choice=self.choice)
if not strategy_instance.support():
continue
history, whether_exit = strategy_instance.execute(
bot_backend=bot_backend,
history=history,
whether_exit=whether_exit
)
return history, whether_exit
The provided code snippet includes necessary dependencies for implementing the `parse_response` function. Write a Python function `def parse_response(chunk, history: List, bot_backend: BotBackend)` to solve the following problem:
:return: history, whether_exit
Here is the function:
def parse_response(chunk, history: List, bot_backend: BotBackend):
"""
:return: history, whether_exit
"""
whether_exit = False
if chunk['choices']:
choice = chunk['choices'][0]
choice_handler = ChoiceHandler(choice=choice)
history, whether_exit = choice_handler.handle(
history=history,
bot_backend=bot_backend,
whether_exit=whether_exit
)
return history, whether_exit | :return: history, whether_exit |
17,071 | import argparse
import os
import torch
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
import logging
from evalplus.data import get_human_eval_plus,get_mbpp_plus, write_jsonl
def generate_one(example, lang, tokenizer, model, name, flags):
if flags.dataset=="humaneval":
prompt = build_humaneval_instruction(lang, example['prompt'])
else:
prompt = example['prompt']
prompt = build_mbpp_instruction.strip().format(prompt,lang,lang)
inputs = tokenizer.apply_chat_template(
[{'role': 'user', 'content': prompt }],
return_tensors="pt"
).to(model.device)
stop_id = tokenizer.convert_tokens_to_ids("<|EOT|>")
assert isinstance(stop_id, int), "Invalid tokenizer, EOT id not found"
max_new_tokens=1024
outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
temperature=0,
)
output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
return output
def gen_solution(args):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
model_path = args.model
logging.info(f"model:{model_path}")
model_name = model_path.replace("/", "_")
lang = "python"
os.makedirs(os.path.join(args.output_path,model_name),exist_ok=True)
output_file = os.path.join(args.output_path,model_name,f"single_{args.dataset}_plus_solutions.jsonl")
if os.path.exists(output_file):
logging.info(f"Old sample jsonl file exists, remove it. {output_file}")
os.remove(output_file)
tokenizer = AutoTokenizer.from_pretrained(model_path)
logging.info("load tokenizer {} from {} over.".format(tokenizer.__class__, model_path))
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
)
model.eval()
model_name = model_path.replace("/", "_")
if args.dataset=="humaneval":
examples = get_human_eval_plus().items()
else:
examples = get_mbpp_plus().items()
logging.info("Read {} examples for evaluation over.".format(len(examples)))
for task_id,example in tqdm(examples, desc='Generating'):
code = generate_one(example, lang, tokenizer, model,model_name,args)
gen_sample=[dict(task_id=task_id, solution=code)]
write_jsonl(output_file, gen_sample ,append=True) | null |
17,072 | import argparse
import os
import torch
from pathlib import Path
from tqdm import tqdm
import logging
from evalplus.data import (get_human_eval_plus,
write_jsonl,
get_human_eval_plus_hash,
get_mbpp_plus,
get_mbpp_plus_hash,
)
from utils import sanitize_solution,check_correctness,get_groundtruth,SUCCESS
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
from copy import deepcopy
from chat_with_gpt import gpt_predict
import json
def generate_multi_round(problem,expected_output, example, lang,name,flags):
if flags.dataset=="humaneval":
prompt = humaneval_build_instruction(lang, example['prompt'])
elif flags.dataset=="mbpp":
prompt = mbpp_build_deepseekcoder_instruction.strip().format(example['prompt'],"python","python")
inputs=[{'role': 'user', 'content': prompt}]
output = gpt_predict(inputs,model=name)
solution = {k:v for k,v in example.items()}
solution["solution"]=output
sanitized_solution = sanitize_solution(deepcopy(solution),flags.eofs)
attempt = 1
judge = False
modify = False
code = sanitized_solution["solution"]
while attempt==1 or sanitized_solution["solution"]!="":
args = (
flags.dataset,
0,
problem,
sanitized_solution["solution"],
expected_output,
flags.version,
True, # fast_check
example["task_id"]+f'_{attempt}',
flags.min_time_limit,
flags.gt_time_limit_factor,
)
result = check_correctness(*args)
if flags.version=="base" and result["base"][0]==SUCCESS:
code = sanitized_solution["solution"]
if attempt==2:
modify = True
judge = True
break
elif flags.version=="plus" and result["plus"][0]==result["base"][0]==SUCCESS:
code = sanitized_solution["solution"]
if attempt==2:
modify = True
judge = True
break
else:
attempt += 1
if attempt > MAX_TRY:
code = sanitized_solution["solution"]
break
execution_feedback=""
if flags.version=="base":
execution_feedback=result["base"][2]
elif flags.version=="plus":
if result["base"][0]!=SUCCESS:
execution_feedback+=result["base"][2]
if "The results aren't as expected." in execution_feedback:
if result["plus"][0]!=SUCCESS:
execution_feedback+="\n"+result["plus"][2]
else:
execution_feedback=result["plus"][2]
prompt +="""
{}
Execution result:
{}
""".format(solution["solution"],execution_feedback)
inputs = [{'role': 'user', 'content': prompt}]
output = gpt_predict(inputs,model=name)
solution = {k:v for k,v in example.items()}
solution["solution"]=output
sanitized_solution = sanitize_solution(deepcopy(solution),flags.eofs)
return code,judge,modify
def get_groundtruth(problems, hashcode, tasks_only_output_not_none):
cache_file = os.path.join(CACHE_DIR, f"{hashcode}.pkl")
if os.path.exists(cache_file):
print(f"Load from ground-truth from {cache_file}")
with open(cache_file, "rb") as f:
return pickle.load(f)
os.makedirs(CACHE_DIR, exist_ok=True)
print("Computing expected output...")
tbegin = time.time()
expected_output = {}
for task_id, problem in problems.items():
oracle = {}
oracle["base"], oracle["base_time"] = trusted_exec(
problem["prompt"] + problem["canonical_solution"],
problem["base_input"],
problem["entry_point"],
record_time=True,
output_not_none=problem["entry_point"] in tasks_only_output_not_none,
)
oracle["plus"], oracle["plus_time"] = trusted_exec(
problem["prompt"] + problem["canonical_solution"],
problem["plus_input"],
problem["entry_point"],
record_time=True,
output_not_none=problem["entry_point"] in tasks_only_output_not_none,
)
expected_output[task_id] = oracle
print(f"Expected outputs computed in {time.time() - tbegin:.2f}s")
with open(cache_file, "wb") as f:
pickle.dump(expected_output, f)
return expected_output
def gen_solution(args):
a,b = 0,0
total_modify = 0
os.environ["TOKENIZERS_PARALLELISM"] = "false"
fail_list=[]
model_path = args.model
logging.info(f"model:{model_path}")
model_name =args.model
lang = "python"
os.makedirs(os.path.join(args.output_path,model_name),exist_ok=True)
output_file = os.path.join(args.output_path,model_name,f"multiround_{args.dataset}_{args.version}_solutions-sanitized.jsonl")
if not args.resume:
if os.path.exists(output_file):
logging.info(f"Old sample jsonl file exists, remove it. {output_file}")
os.remove(output_file)
else:
existing_task_ids = set()
if os.path.exists(output_file):
with open(output_file, 'r') as file:
for line in file:
data = json.loads(line)
if 'task_id' in data:
existing_task_ids.add(data['task_id'])
a=data["pass_num"]
total_modify=data["total_modify"]
b=data["total_num"]-a
fail_list=data["fail_list"]
if args.dataset=="humaneval":
problems = get_human_eval_plus()
examples = problems.items()
dataset_hash = get_human_eval_plus_hash()
expected_outputs = get_groundtruth(problems, dataset_hash, [])
else:
problems = get_mbpp_plus()
examples = problems.items()
dataset_hash = get_mbpp_plus_hash()
expected_outputs = get_groundtruth(
problems,
dataset_hash,
MBPP_OUTPUT_NOT_NONE_TASKS,
)
logging.info("Read {} examples for evaluation over.".format(len(examples)))
for task_id,example in tqdm(examples, desc='Generating'):
if args.resume:
if task_id in existing_task_ids:
continue
problem = problems[task_id]
expected_output = expected_outputs[task_id]
code,judge,modify = generate_multi_round(problem,expected_output,example, lang, model_name,args)
if modify:
total_modify += 1
if judge:
a += 1
else:
b += 1
fail_list.append(task_id)
result = a/(a+b)
print ("pass num",a)
print ("total num",a+b)
print ('rate: '+str(result))
print ("judge: ",judge)
print ('modify: '+str(modify))
print ("num modify: "+str(total_modify))
print("fail list",fail_list)
gen_sample=[dict(task_id=task_id, solution=code, pass_num=a, total_modify=total_modify, total_num=a+b, fail_list=fail_list)]
write_jsonl(output_file, gen_sample ,append=True) | null |
17,073 | import argparse
import os
import torch
from pathlib import Path
from tqdm import tqdm
import json
from transformers import AutoTokenizer, AutoModelForCausalLM
import logging
from evalplus.data import (get_human_eval_plus,
write_jsonl,
get_human_eval_plus_hash,
get_mbpp_plus,
get_mbpp_plus_hash,
)
from utils import sanitize_solution,check_correctness,get_groundtruth,SUCCESS
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
from copy import deepcopy
from chat_with_gpt import gpt_predict
def generate_multi_round(problem, expected_output, example, lang, tokenizer, model,name,flags):
pre_messages=[]
if flags.dataset=="humaneval":
prompt = humaneval_build_instruction(lang, example['prompt'])
elif flags.dataset=="mbpp":
prompt = mbpp_build_deepseekcoder_instruction.strip().format(example['prompt'],"python","python")
pre_messages.append({"role":"user","content":prompt})
inputs = tokenizer.apply_chat_template(
[{'role': 'user', 'content': prompt}],
return_tensors="pt"
).to(model.device)
stop_id = tokenizer.convert_tokens_to_ids("<|EOT|>")
assert isinstance(stop_id, int), "Invalid tokenizer, EOT id not found"
max_new_tokens=1024
logging.info(f"max_new_tokens{max_new_tokens}")
outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
temperature=0,
)
output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
canonical_solution = example["canonical_solution"]
solution = {k:v for k,v in example.items()}
solution["solution"]=output
sanitized_solution = sanitize_solution(deepcopy(solution),flags.eofs)
attempt = 1
judge = False
modify = False
code = sanitized_solution["solution"]
while attempt==1 or sanitized_solution["solution"]!="":
pre_messages.append({"role":"assistant","content":solution["solution"]})
args = (
flags.dataset,
0,
problem,
sanitized_solution["solution"],
expected_output,
flags.version,
True, # fast_check
example["task_id"]+f'_{attempt}',
flags.min_time_limit,
flags.gt_time_limit_factor,
)
result = check_correctness(*args)
if flags.version=="base" and result["base"][0]==SUCCESS:
code = sanitized_solution["solution"]
if attempt==2:
modify = True
judge = True
break
elif flags.version=="plus" and result["plus"][0]==result["base"][0]==SUCCESS:
code = sanitized_solution["solution"]
if attempt==2:
modify = True
judge = True
break
else:
attempt += 1
if attempt > MAX_TRY:
code = sanitized_solution["solution"]
break
execution_feedback=""
if flags.version=="base":
execution_feedback=result["base"][2]
elif flags.version=="plus":
if result["base"][0]!=SUCCESS:
execution_feedback+=result["base"][2]
if "The results aren't as expected." in execution_feedback:
if result["plus"][0]!=SUCCESS:
execution_feedback+="\n"+result["plus"][2]
else:
execution_feedback=result["plus"][2]
gpt_messages=[{"role":"system","content":"You are a helpful assistant."}]
gpt_messages.append({"role":"user","content":build_gpt_prompt(\
pre_messages, example['prompt'], sanitized_solution["solution"], execution_feedback)})
gpt_feedback=None
trytime=0
while gpt_feedback is None and trytime<5:
gpt_feedback=gpt_predict(gpt_messages)
trytime+=1
if gpt_feedback==None:
raise BaseException("Please resume.")
if prompt.endswith("@@ Response"):
prompt +="""
{}
{}
""".format(solution["solution"],gpt_feedback)
else:
prompt +="""
{}
{}
""".format(solution["solution"],gpt_feedback)
pre_messages.append({"role":"user","content":gpt_feedback})
inputs = tokenizer.apply_chat_template(
[{'role': 'user', 'content': prompt }],
return_tensors="pt"
).to(model.device)
outputs = model.generate(
inputs,
max_new_tokens=max_new_tokens,#待定,evalplus论文说除了gpt用的1024,其他都用的512
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
temperature=0,
)
output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
solution = {k:v for k,v in example.items()}
solution["solution"]=output
sanitized_solution = sanitize_solution(deepcopy(solution),flags.eofs)
return code,judge,modify
def get_groundtruth(problems, hashcode, tasks_only_output_not_none):
cache_file = os.path.join(CACHE_DIR, f"{hashcode}.pkl")
if os.path.exists(cache_file):
print(f"Load from ground-truth from {cache_file}")
with open(cache_file, "rb") as f:
return pickle.load(f)
os.makedirs(CACHE_DIR, exist_ok=True)
print("Computing expected output...")
tbegin = time.time()
expected_output = {}
for task_id, problem in problems.items():
oracle = {}
oracle["base"], oracle["base_time"] = trusted_exec(
problem["prompt"] + problem["canonical_solution"],
problem["base_input"],
problem["entry_point"],
record_time=True,
output_not_none=problem["entry_point"] in tasks_only_output_not_none,
)
oracle["plus"], oracle["plus_time"] = trusted_exec(
problem["prompt"] + problem["canonical_solution"],
problem["plus_input"],
problem["entry_point"],
record_time=True,
output_not_none=problem["entry_point"] in tasks_only_output_not_none,
)
expected_output[task_id] = oracle
print(f"Expected outputs computed in {time.time() - tbegin:.2f}s")
with open(cache_file, "wb") as f:
pickle.dump(expected_output, f)
return expected_output
def gen_solution(args):
a,b = 0,0
total_modify = 0
os.environ["TOKENIZERS_PARALLELISM"] = "false"
fail_list=[]
model_path = args.model
logging.info(f"model:{model_path}")
model_name =model_path.replace("/", "_")
lang = "python"
os.makedirs(os.path.join(args.output_path,model_name),exist_ok=True)
output_file = os.path.join(args.output_path,model_name,f"multiround_{args.dataset}_{args.version}_solutions-sanitized.jsonl")
if not args.resume:
if os.path.exists(output_file):
logging.info(f"Old sample jsonl file exists, remove it. {output_file}")
os.remove(output_file)
else:
existing_task_ids = set()
if os.path.exists(output_file):
with open(output_file, 'r') as file:
for line in file:
data = json.loads(line)
if 'task_id' in data:
existing_task_ids.add(data['task_id'])
a=data["pass_num"]
total_modify=data["total_modify"]
b=data["total_num"]-a
fail_list=data["fail_list"]
tokenizer = AutoTokenizer.from_pretrained(model_path)
logging.info("load tokenizer {} from {} over.".format(tokenizer.__class__, model_path))
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
)
model.eval()
modelname=model_path.replace("/", "_")
if args.dataset=="humaneval":
problems = get_human_eval_plus()
examples = problems.items()
dataset_hash = get_human_eval_plus_hash()
expected_outputs = get_groundtruth(problems, dataset_hash, [])
else:
problems = get_mbpp_plus()
examples = problems.items()
dataset_hash = get_mbpp_plus_hash()
expected_outputs = get_groundtruth(
problems,
dataset_hash,
MBPP_OUTPUT_NOT_NONE_TASKS,
)
logging.info("Read {} examples for evaluation over.".format(len(examples)))
for task_id,example in tqdm(examples, desc='Generating'):
if args.resume:
if task_id in existing_task_ids:
continue
problem = problems[task_id]
expected_output = expected_outputs[task_id]
code,judge,modify = generate_multi_round(problem,expected_output,example, lang, tokenizer, model, modelname,args)
if modify:
total_modify += 1
if judge:
a += 1
else:
b += 1
fail_list.append(task_id)
result = a/(a+b)
single_result = (a-total_modify)/(a+b)
print ("pass num ",a)
print ("total num",a+b)
print ("judge: ",judge)
print ('modify: '+str(modify))
print ("num modify: "+str(total_modify))
print ("fail list: ",fail_list)
print ('multisound rate: '+str(result))
print ('singlesound rate: '+str(single_result))
gen_sample=[dict(task_id=task_id, solution=code, pass_num=a, total_modify=total_modify, total_num=a+b, fail_list=fail_list)]
write_jsonl(output_file, gen_sample ,append=True) | null |
17,074 | import argparse
import os
import torch
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM
import logging
from evalplus.data import (get_human_eval_plus,
write_jsonl,
get_human_eval_plus_hash,
get_mbpp_plus,
get_mbpp_plus_hash,
)
from utils import sanitize_solution,check_correctness,get_groundtruth,SUCCESS
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
from copy import deepcopy
def generate_multi_round(problem, expected_output, example, lang, tokenizer, model, name, flags):
def get_groundtruth(problems, hashcode, tasks_only_output_not_none):
def gen_solution(args):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
fail_list=[]
model_path = args.model
logging.info(f"model:{model_path}")
model_name =model_path.replace("/", "_")
lang = "python"
os.makedirs(os.path.join(args.output_path,model_name),exist_ok=True)
output_file = os.path.join(args.output_path,model_name,f"multiround_{args.dataset}_{args.version}_solutions-sanitized.jsonl")
if os.path.exists(output_file):
logging.info(f"Old sample jsonl file exists, remove it. {output_file}")
os.remove(output_file)
tokenizer = AutoTokenizer.from_pretrained(model_path)
logging.info("load tokenizer {} from {} over.".format(tokenizer.__class__, model_path))
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
)
model.eval()
modelname=model_path.replace("/", "_")
if args.dataset=="humaneval":
problems = get_human_eval_plus()
examples = problems.items()
dataset_hash = get_human_eval_plus_hash()
expected_outputs = get_groundtruth(problems, dataset_hash, [])
else:
problems = get_mbpp_plus()
examples = problems.items()
dataset_hash = get_mbpp_plus_hash()
expected_outputs = get_groundtruth(
problems,
dataset_hash,
MBPP_OUTPUT_NOT_NONE_TASKS,
)
logging.info("Read {} examples for evaluation over.".format(len(examples)))
a,b = 0,0
total_modify = 0
for task_id,example in tqdm(examples, desc='Generating'):
problem = problems[task_id]
expected_output = expected_outputs[task_id]
code,judge,modify = generate_multi_round(problem,expected_output,example, lang, tokenizer, model, modelname,args)
gen_sample=[dict(task_id=task_id, solution=code)]
write_jsonl(output_file, gen_sample ,append=True)
if modify:
total_modify += 1
if judge:
a += 1
else:
b += 1
fail_list.append(task_id)
result = a/(a+b)
print ("pass num :",a)
print ("total num:",a+b)
print ('pass rate: '+str(result))
print ("num modify: "+str(total_modify))
print ("judge:",judge)
print ('modify: '+str(modify))
print ("fail list:",fail_list) | null |
17,075 | import argparse
import os
import torch
from pathlib import Path
from tqdm import tqdm
import json
from transformers import AutoTokenizer, AutoModelForCausalLM
import logging
from evalplus.data import (get_human_eval_plus,
write_jsonl,
get_human_eval_plus_hash,
get_mbpp_plus,
get_mbpp_plus_hash,
)
from utils import sanitize_solution,check_correctness,get_groundtruth,SUCCESS
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
from copy import deepcopy
from chat_with_gpt import gpt_predict
def generate_multi_round(problem,expected_output, example, lang, tokenizer, model,name,flags):
def get_groundtruth(problems, hashcode, tasks_only_output_not_none):
def gen_solution(args):
a,b = 0,0
total_modify = 0
os.environ["TOKENIZERS_PARALLELISM"] = "false"
fail_list=[]
model_path = args.model
logging.info(f"model:{model_path}")
model_name =model_path.replace("/", "_")
lang = "python"
os.makedirs(os.path.join(args.output_path,model_name),exist_ok=True)
output_file = os.path.join(args.output_path,model_name,f"multiround_{args.dataset}_{args.version}_solutions-sanitized.jsonl")
if not args.resume:
if os.path.exists(output_file):
logging.info(f"Old sample jsonl file exists, remove it. {output_file}")
os.remove(output_file)
else:
existing_task_ids = set()
if os.path.exists(output_file):
with open(output_file, 'r') as file:
for line in file:
data = json.loads(line)
if 'task_id' in data:
existing_task_ids.add(data['task_id'])
a=data["pass_num"]
total_modify=data["total_modify"]
b=data["total_num"]-a
fail_list=data["fail_list"]
tokenizer = AutoTokenizer.from_pretrained(model_path)
logging.info("load tokenizer {} from {} over.".format(tokenizer.__class__, model_path))
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
device_map="auto",
)
model.eval()
modelname=model_path.replace("/", "_")
if args.dataset=="humaneval":
problems = get_human_eval_plus()
examples = problems.items()
dataset_hash = get_human_eval_plus_hash()
expected_outputs = get_groundtruth(problems, dataset_hash, [])
else:
problems = get_mbpp_plus()
examples = problems.items()
dataset_hash = get_mbpp_plus_hash()
expected_outputs = get_groundtruth(
problems,
dataset_hash,
MBPP_OUTPUT_NOT_NONE_TASKS,
)
logging.info("Read {} examples for evaluation over.".format(len(examples)))
for task_id,example in tqdm(examples, desc='Generating'):
if args.resume:
if task_id in existing_task_ids:
continue
problem = problems[task_id]
# print("problem:\n",problem.keys())
# print("\n",problem)
expected_output = expected_outputs[task_id]
code,judge,modify = generate_multi_round(problem,expected_output,example, lang, tokenizer, model, modelname,args)
if modify:
total_modify += 1
if judge:
a += 1
else:
b += 1
fail_list.append(task_id)
result = a/(a+b)
single_result = (a-total_modify)/(a+b)
print ("pass num ",a)
print ("totalnum ",a+b)
print ("judge: ",judge)
print ('modify: '+str(modify))
print ("num modify: "+str(total_modify))
print ("fail list: ",fail_list)
print ('multisound rate: '+str(result))
print ('singlesound rate: '+str(single_result))
gen_sample=[dict(task_id=task_id, solution=code, pass_num=a, total_modify=total_modify, total_num=a+b, fail_list=fail_list)]
write_jsonl(output_file, gen_sample ,append=True) | null |
17,076 | import os
from evalplus.sanitize import sanitize
from typing import Any, Dict, List, Optional, Tuple, Union
import itertools
import multiprocessing
import time
from multiprocessing import Array, Value
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import pickle
from evalplus.data.utils import CACHE_DIR
from evalplus.eval import *
from evalplus.gen.util import trusted_exec
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS, _poly
from evalplus.eval.utils import TimeoutException
from evalplus.eval.utils import (
create_tempdir,
reliability_guard,
swallow_io,
time_limit,
)
import re
import resource
import traceback
def remove_unindented_lines(code, protect_before, execeptions, trim_tails):
lines = code.splitlines()
cut_idx = []
cut_enabled = False
for i, line in enumerate(lines):
if not cut_enabled and line.startswith(protect_before):
cut_enabled = True
continue
if line.strip() == "":
continue
if any(line.startswith(e) for e in execeptions):
continue
lspace = len(line) - len(line.lstrip())
if lspace == 0:
cut_idx.append(i)
if any(line.rstrip().startswith(t) for t in trim_tails):
# cut off everything behind
cut_idx.extend(list(range(i, len(lines))))
break
return "\n".join([line for i, line in enumerate(lines) if i not in cut_idx]) | null |
17,077 | import os
from evalplus.sanitize import sanitize
from typing import Any, Dict, List, Optional, Tuple, Union
import itertools
import multiprocessing
import time
from multiprocessing import Array, Value
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import pickle
from evalplus.data.utils import CACHE_DIR
from evalplus.eval import *
from evalplus.gen.util import trusted_exec
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS, _poly
from evalplus.eval.utils import TimeoutException
from evalplus.eval.utils import (
create_tempdir,
reliability_guard,
swallow_io,
time_limit,
)
import re
import resource
import traceback
def to_four_space_indents(old_code):
new_code = ""
for line in old_code.splitlines():
lspace = len(line) - len(line.lstrip())
if lspace == 3:
new_code += " "
new_code += line + "\n"
return new_code | null |
17,078 | import json
import os
from abc import ABC, abstractmethod
from typing import List
from warnings import warn
import openai
import torch
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
StoppingCriteria,
StoppingCriteriaList,
)
from vllm import LLM, SamplingParams
from evalplus.gen.util.api_request import make_auto_request
class VLlmDecoder(DecoderBase):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
kwargs = {"tensor_parallel_size": int(os.getenv("VLLM_N_GPUS", "1"))}
if "CodeLlama" in name:
kwargs["dtype"] = "bfloat16"
elif "code-millenials" in name:
kwargs["dtype"] = "float16"
elif "uukuguy/speechless-code-mistral-7b-v1.0" == name:
kwargs["dtype"] = "float16"
elif "uukuguy/speechless-codellama-34b-v2.0" == name:
kwargs["dtype"] = "float16"
elif "CodeBooga" in name:
kwargs["dtype"] = "float16"
elif "WizardCoder" in name:
kwargs["dtype"] = "float16"
elif "deepseek" in name:
kwargs["dtype"] = "bfloat16"
elif "mixtral" in name.lower():
kwargs["dtype"] = "bfloat16"
elif "solar" in name:
kwargs["dtype"] = "float16"
elif "mistral" in name.lower():
kwargs["dtype"] = "bfloat16"
elif "phi" in name.lower():
kwargs["dtype"] = "float16"
kwargs["trust_remote_code"] = True
elif "openchat" in name.lower():
kwargs["dtype"] = "bfloat16"
self.llm = LLM(model=name, max_model_len=2048, **kwargs)
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be greater than 0!"
batch_size = min(self.batch_size, num_samples)
vllm_outputs = self.llm.generate(
[prompt] * batch_size,
SamplingParams(
temperature=self.temperature,
max_tokens=self.max_new_tokens,
top_p=0.95 if do_sample else 1.0,
stop=self.eos,
),
use_tqdm=False,
)
gen_strs = [x.outputs[0].text.replace("\t", " ") for x in vllm_outputs]
return gen_strs
class ChatML(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
kwargs["conversational"] = True
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be greater than 0!"
input = f"""<|im_start|>system
You are an intelligent programming assistant to produce Python algorithmic solutions<|im_end|>
<|im_start|>user
Can you complete the following Python function?
```python
{prompt}
```
<|im_end|>
<|im_start|>assistant
```python
"""
return VLlmDecoder.codegen(self, input, do_sample, num_samples)
class CodeLlamaInstruct(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
kwargs["conversational"] = True
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be greater than 0!"
input = f"""[INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```:
```python
{prompt}
```
[/INST]
```python
"""
return VLlmDecoder.codegen(self, input, do_sample, num_samples)
class Zyte(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
kwargs["conversational"] = True
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be greater than 0!"
input = f"""<|system|>You are an intelligent programming assistant to produce Python algorithmic solutions</s>
<|user|>Can you complete the following Python function?
```python
{prompt}
```
</s>
<|assistant|>
```python
"""
return VLlmDecoder.codegen(self, input, do_sample, num_samples)
class OpenChat(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
kwargs["conversational"] = True
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be greater than 0!"
input = f"""GPT4 Correct User: Can you complete the following Python function?
```python
{prompt}
```
<|end_of_turn|>GPT4 Correct Assistant:
```python
"""
return VLlmDecoder.codegen(self, input, do_sample, num_samples)
class Solar(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be greater than 0!"
input = f"""<s> ### User:
Can you solve and complete the Python function below?
```python
{prompt}
```
### Assistant:
Sure!
```python
"""
return VLlmDecoder.codegen(self, input, do_sample, num_samples)
class Alpaca(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
kwargs["conversational"] = True
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
prompt = f"""Below is an instruction that describes a task. Write a response that appropriately completes request.
### Instruction:
Create a Python script for this problem:
{prompt}
### Response:
```python
"""
return VLlmDecoder.codegen(self, prompt, do_sample, num_samples)
class HFTorchDecoder(DecoderBase):
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
kwargs = {
"trust_remote_code": name
in {
"bigcode/santacoder",
"Salesforce/codegen2-1B",
"Salesforce/codegen2-3_7B",
"Salesforce/codegen2-7B",
"Salesforce/codegen2-16B",
"deepseek-ai/deepseek-coder-6.7b-base",
"deepseek-ai/deepseek-coder-33b-base",
"stabilityai/stable-code-3b",
}
}
if "codegen-" in name: # use fp16 for codegen models
kwargs["torch_dtype"] = torch.float16
if "codegen2-" in name: # avoid warning of trust remote code
kwargs["revision"] = "main"
if "16b" in name.lower():
kwargs["device_map"] = "auto"
if "starcoder" in name:
kwargs["torch_dtype"] = torch.bfloat16
if "CodeLlama" in name:
if "34b" in name.lower():
kwargs["device_map"] = "auto"
kwargs["torch_dtype"] = torch.bfloat16
self.skip_special_tokens = True
if "CodeBooga" in name:
kwargs["torch_dtype"] = torch.float16
kwargs["device_map"] = "auto"
self.skip_special_tokens = True
if "Mistral-7B-codealpaca-lora" == name:
kwargs["torch_dtype"] = torch.float16
self.skip_special_tokens = True
elif "Mistral" in name or "zephyr-7b-beta" in name:
kwargs["torch_dtype"] = torch.bfloat16
if "deepseek" in name:
kwargs["torch_dtype"] = torch.bfloat16
self.skip_special_tokens = True
if "/phi" in name:
kwargs["torch_dtype"] = torch.float16
kwargs["trust_remote_code"] = True
self.skip_special_tokens = True
print(f"{kwargs = }")
self.tokenizer = AutoTokenizer.from_pretrained(name)
self.model = AutoModelForCausalLM.from_pretrained(name, **kwargs)
if name in {"StabilityAI/stablelm-base-alpha-7b"}:
print("Switching to float16 ...")
self.model = self.model.half()
self.skip_special_tokens = True
self.model = self.model.to(self.device)
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if self.temperature == 0:
assert not do_sample
assert num_samples == 1
input_tokens = self.tokenizer.encode(prompt, return_tensors="pt").to(
self.device
)
scores = StoppingCriteriaList(
[
EndOfFunctionCriteria(
start_length=len(input_tokens[0]),
eos=self.eos,
tokenizer=self.tokenizer,
)
]
)
kwargs = {}
if do_sample:
kwargs["top_p"] = 0.95
kwargs["temperature"] = self.temperature
raw_outputs = self.model.generate(
input_tokens,
max_new_tokens=self.max_new_tokens,
stopping_criteria=scores,
do_sample=do_sample,
output_scores=True,
return_dict_in_generate=True,
num_return_sequences=min(self.batch_size, num_samples),
pad_token_id=self.tokenizer.eos_token_id,
**kwargs,
) # remove warning
gen_seqs = raw_outputs.sequences[:, len(input_tokens[0]) :]
gen_strs = self.tokenizer.batch_decode(
gen_seqs, skip_special_tokens=self.skip_special_tokens
)
outputs = []
# removes eos tokens.
for output in gen_strs:
min_index = 10000
for eos in self.eos:
if eos in output:
# could be multiple eos in outputs, better pick minimum one
min_index = min(min_index, output.index(eos))
outputs.append(output[:min_index])
return outputs
class DeepSeekInstruct(VLlmDecoder):
def __init__(self, name: str, **kwargs) -> None:
kwargs["conversational"] = True
super().__init__(name, **kwargs)
self.eos += ["\n```"]
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
prompt = f"""You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.
### Instruction:
Please complete the following Python function in a markdown style code block:
```python
{prompt}
```
### Response:
```python
"""
return VLlmDecoder.codegen(self, prompt, do_sample, num_samples)
class OpenAIChatDecoder(DecoderBase):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
self.client = openai.OpenAI()
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if do_sample:
assert self.temperature > 0, "Temperature must be positive for sampling"
batch_size = min(self.batch_size, num_samples)
assert batch_size <= 20, "Use larger batch size could blow up the memory!"
# construct prompt
fmt = "json_object" if self.name == "gpt-4-1106-preview" else "text"
if fmt == "json_object":
message = r'Please complete the following code snippet by generating JSON like {"code": ""}'
else:
message = r"Please generate code to complete the following problem:"
message += f"\n```python\n{prompt.strip()}\n```"
ret = make_auto_request(
self.client,
message=message,
model=self.name,
max_tokens=self.max_new_tokens,
temperature=self.temperature,
n=batch_size,
response_format={"type": fmt},
)
outputs = []
for item in ret.choices:
content = item.message.content
# if json serializable
if fmt == "json_object":
try:
json_data = json.loads(content)
if json_data.get("code", None) is not None:
outputs.append(prompt + "\n" + json_data["code"])
continue
print(f"'code' field not found in: {json_data}")
except Exception as e:
print(e)
outputs.append(content)
return outputs
class IncoderDecoder(HFTorchDecoder):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
self.infill_ph = "<|mask:0|>"
self.extra_end = "<|mask:1|><|mask:0|>"
self.extra_eos = [
"<|endofmask|>",
"<|/ file",
"</cell>",
"</text>",
"</code>",
"<|",
"</CODE>",
]
self.eos = self.eos + self.extra_eos
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
input = prompt + self.infill_ph + self.extra_end
input_tokens = self.tokenizer.encode(input, return_tensors="pt").to(self.device)
scores = StoppingCriteriaList(
[
EndOfFunctionCriteria(
start_length=len(input_tokens[0]),
eos=self.eos,
tokenizer=self.tokenizer,
)
]
)
raw_outputs = self.model.generate(
input_tokens,
max_new_tokens=self.max_new_tokens,
stopping_criteria=scores,
do_sample=do_sample,
top_p=0.95,
top_k=None,
temperature=self.temperature,
num_return_sequences=min(self.batch_size, num_samples),
output_scores=True,
return_dict_in_generate=True,
)
gen_seqs = raw_outputs.sequences[:, len(input_tokens[0]) :]
gen_strs = self.tokenizer.batch_decode(
gen_seqs, skip_special_tokens=self.skip_special_tokens
)
outputs = []
# removes eos tokens.
for output in gen_strs:
min_index = 10000
for eos in self.eos:
if eos in output:
min_index = min(min_index, output.index(eos))
outputs.append(output[:min_index])
return outputs
class Codegen2Decoder(HFTorchDecoder):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
self.infill_ph = "<mask_1>"
# taken from: https://huggingface.co/Salesforce/codegen2-16B
self.extra_end = "<|endoftext|><sep><mask_1>"
self.extra_eos = ["<eom>"]
self.eos = self.eos + self.extra_eos
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if self.temperature == 0:
assert not do_sample
assert num_samples == 1
input = prompt + self.infill_ph + self.extra_end
input_tokens = self.tokenizer.encode(input, return_tensors="pt").to(self.device)
scores = StoppingCriteriaList(
[
EndOfFunctionCriteria(
start_length=len(input_tokens[0]),
eos=self.eos,
tokenizer=self.tokenizer,
)
]
)
raw_outputs = self.model.generate(
input_tokens,
max_new_tokens=self.max_new_tokens,
stopping_criteria=scores,
do_sample=do_sample,
top_p=0.95,
top_k=None,
temperature=self.temperature,
output_scores=True,
return_dict_in_generate=True,
num_return_sequences=min(self.batch_size, num_samples),
pad_token_id=self.tokenizer.eos_token_id,
)
gen_seqs = raw_outputs.sequences[:, len(input_tokens[0]) :]
gen_strs = self.tokenizer.batch_decode(
gen_seqs, skip_special_tokens=self.skip_special_tokens
)
outputs = []
# removes eos tokens.
for output in gen_strs:
min_index = 10000
for eos in self.eos:
if eos in output:
min_index = min(min_index, output.index(eos))
outputs.append(output[:min_index])
return outputs
class SantaCoder(HFTorchDecoder):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
self.prefix_token = "<fim-prefix>"
self.suffix_token = "<fim-suffix>\n<fim-middle>"
self.extra_eos = ["<|endofmask|>"]
self.eos = self.eos + self.extra_eos
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if self.temperature == 0:
assert not do_sample
assert num_samples == 1
input = self.prefix_token + prompt + self.suffix_token
input_tokens = self.tokenizer.encode(input, return_tensors="pt").to(self.device)
scores = StoppingCriteriaList(
[
EndOfFunctionCriteria(
start_length=len(input_tokens[0]),
eos=self.eos,
tokenizer=self.tokenizer,
)
]
)
raw_outputs = self.model.generate(
input_tokens,
max_new_tokens=self.max_new_tokens,
stopping_criteria=scores,
do_sample=do_sample,
top_p=0.95,
top_k=None,
temperature=self.temperature,
num_return_sequences=min(self.batch_size, num_samples),
output_scores=True,
return_dict_in_generate=True,
pad_token_id=self.tokenizer.eos_token_id,
)
gen_seqs = raw_outputs.sequences[:, len(input_tokens[0]) :]
gen_strs = self.tokenizer.batch_decode(
gen_seqs,
skip_special_tokens=self.skip_special_tokens,
truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"],
)
outputs = []
# removes eos tokens.
for output in gen_strs:
min_index = 10000
for eos in self.eos:
if eos in output:
min_index = min(min_index, output.index(eos))
outputs.append(output[:min_index])
return outputs
class StarCoderInfill(HFTorchDecoder):
def __init__(self, name: str, **kwargs) -> None:
super().__init__(name, **kwargs)
self.prefix_token = "<fim_prefix>"
self.suffix_token = "<fim_suffix><fim_middle>"
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if self.temperature == 0:
assert not do_sample
assert num_samples == 1
input = self.prefix_token + prompt + self.suffix_token
input_tokens = self.tokenizer.encode(input, return_tensors="pt").to(self.device)
scores = StoppingCriteriaList(
[
EndOfFunctionCriteria(
start_length=len(input_tokens[0]),
eos=self.eos,
tokenizer=self.tokenizer,
)
]
)
kwargs = {}
if do_sample:
kwargs["top_p"] = 0.95
kwargs["temperature"] = max(self.temperature, 1e-2)
raw_outputs = self.model.generate(
input_tokens,
max_new_tokens=self.max_new_tokens,
stopping_criteria=scores,
do_sample=do_sample,
num_return_sequences=min(self.batch_size, num_samples),
output_scores=True,
return_dict_in_generate=True,
repetition_penalty=1.0,
pad_token_id=self.tokenizer.eos_token_id,
)
gen_seqs = raw_outputs.sequences[:, len(input_tokens[0]) :]
gen_strs = self.tokenizer.batch_decode(
gen_seqs, skip_special_tokens=self.skip_special_tokens
)
outputs = []
# removes eos tokens.
for output in gen_strs:
min_index = 10000
for eos in self.eos:
if eos in output:
min_index = min(min_index, output.index(eos))
outputs.append(output[:min_index])
return outputs
class CodeT5P(DecoderBase):
def __init__(self, name: str, **kwargs):
super().__init__(name=name, **kwargs)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
assert name in {
"Salesforce/codet5p-2b",
"Salesforce/codet5p-6b",
"Salesforce/codet5p-16b",
"Salesforce/instructcodet5p-16b",
}
self.tokenizer = AutoTokenizer.from_pretrained(name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(
name,
trust_remote_code=True, # False for 220m and 770m models
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
)
self.model.eval()
self.model.to(self.device)
self.skip_special_tokens = True
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
if self.temperature == 0:
assert not do_sample
assert num_samples == 1
prompt = prompt.replace(" ", "\t")
input_tokens = self.tokenizer(prompt, return_tensors="pt").to(self.device)
scores = StoppingCriteriaList(
[
EndOfFunctionCriteria(
start_length=len(input_tokens[0]),
eos=self.eos,
tokenizer=self.tokenizer,
)
]
)
max_new_tokens = self.max_new_tokens
while max_new_tokens > 0:
try:
raw_outputs = self.model.generate(
**input_tokens,
decoder_input_ids=input_tokens["input_ids"],
max_new_tokens=max_new_tokens,
stopping_criteria=scores,
do_sample=do_sample,
top_p=0.95,
top_k=None,
temperature=self.temperature,
output_scores=True,
return_dict_in_generate=True,
num_return_sequences=min(self.batch_size, num_samples),
pad_token_id=self.tokenizer.eos_token_id,
decoder_start_token_id=self.tokenizer.pad_token_id,
) # remove warning
except RuntimeError as e: # catch torch OOM
if "CUDA out of memory" in str(e):
old_max_new_tokens = max_new_tokens
max_new_tokens = int(max_new_tokens * 0.8)
print(
f"OOM, reducing max_new_tokens from {old_max_new_tokens} to {max_new_tokens}"
)
continue
else:
raise e
break
gen_seqs = raw_outputs.sequences[:, len(input_tokens[0]) :]
gen_strs = self.tokenizer.batch_decode(
gen_seqs, skip_special_tokens=self.skip_special_tokens
)
outputs = []
# removes eos tokens.
for output in gen_strs:
min_index = 10000
for eos in self.eos:
if eos in output:
# could be multiple eos in outputs, better pick minimum one
min_index = min(min_index, output.index(eos))
outputs.append(output[:min_index].replace("\t", " "))
return outputs
def make_model(name: str, batch_size: int = 1, temperature: float = 0.8):
if name == "codegen-2b":
return HFTorchDecoder(
batch_size=batch_size,
name="Salesforce/codegen-2B-mono",
temperature=temperature,
)
elif name == "codegen-6b":
return HFTorchDecoder(
batch_size=batch_size,
name="Salesforce/codegen-6B-mono",
temperature=temperature,
)
elif name == "codegen-16b":
return HFTorchDecoder(
batch_size=batch_size,
name="Salesforce/codegen-16B-mono",
temperature=temperature,
)
elif name == "codegen2-1b":
return Codegen2Decoder(
batch_size=batch_size,
name="Salesforce/codegen2-1B",
temperature=temperature,
)
elif name == "codegen2-3b":
return Codegen2Decoder(
batch_size=batch_size,
name="Salesforce/codegen2-3_7B",
temperature=temperature,
)
elif name == "codegen2-7b":
return Codegen2Decoder(
batch_size=batch_size,
name="Salesforce/codegen2-7B",
temperature=temperature,
)
elif name == "codegen2-16b":
warn(
"codegen2-16b checkpoint is `unfinished` at this point (05/11/2023) according to their paper. "
"So it might not make sense to use it."
)
return Codegen2Decoder(
batch_size=batch_size,
name="Salesforce/codegen2-16B",
temperature=temperature,
)
elif name == "polycoder":
return HFTorchDecoder(
batch_size=batch_size,
name="NinedayWang/PolyCoder-2.7B",
temperature=temperature,
)
elif name == "santacoder":
return SantaCoder(
batch_size=batch_size, name="bigcode/santacoder", temperature=temperature
)
elif name == "incoder-1b":
return IncoderDecoder(
batch_size=batch_size, name="facebook/incoder-1B", temperature=temperature
)
elif name == "incoder-6b":
return IncoderDecoder(
batch_size=batch_size, name="facebook/incoder-6B", temperature=temperature
)
elif name == "stablelm-7b":
return HFTorchDecoder(
batch_size=batch_size,
name="StabilityAI/stablelm-base-alpha-7b",
temperature=temperature,
)
elif name.startswith("gpt-3.5-") or name.startswith("gpt-4-"):
return OpenAIChatDecoder(
batch_size=batch_size,
name=name,
temperature=temperature,
conversational=True,
)
elif name == "gptneo-2b":
return HFTorchDecoder(
batch_size=batch_size,
name="EleutherAI/gpt-neo-2.7B",
temperature=temperature,
)
elif name == "gpt-j":
return HFTorchDecoder(
batch_size=batch_size, name="EleutherAI/gpt-j-6B", temperature=temperature
)
elif name.startswith("starcoder"):
return StarCoderInfill(
batch_size=batch_size, name=f"bigcode/{name}", temperature=temperature
)
elif name == "codet5p-2b":
return CodeT5P(
batch_size=batch_size,
name="Salesforce/codet5p-2b",
temperature=temperature,
)
elif name == "codet5p-6b":
return CodeT5P(
batch_size=batch_size,
name="Salesforce/codet5p-6b",
temperature=temperature,
)
elif name == "codet5p-16b":
return CodeT5P(
batch_size=batch_size,
name="Salesforce/codet5p-16b",
temperature=temperature,
)
elif name.startswith("code-llama-"):
if name.endswith("instruct"):
nb = name.split("-")[2]
assert nb.endswith("b")
return CodeLlamaInstruct(
batch_size=batch_size,
name=f"codellama/CodeLlama-{nb}-Instruct-hf",
temperature=temperature,
)
assert name.endswith("b")
nb = name.split("-")[-1]
return VLlmDecoder(
batch_size=batch_size,
name=f"codellama/CodeLlama-{nb}-Python-hf",
temperature=temperature,
)
elif name.startswith("deepseek-coder"):
import re
# format deepseek-coder-{nb}b*
pattern = re.compile(r"deepseek-coder-(\d+\.?\d*)b(.*)")
matches = pattern.findall(name)[0]
nb = float(matches[0])
if nb.is_integer():
nb = int(nb)
if "instruct" in name:
return DeepSeekInstruct(
batch_size=batch_size,
name=f"deepseek-ai/deepseek-coder-{nb}b-instruct",
temperature=temperature,
conversational=True,
)
else:
return VLlmDecoder(
batch_size=batch_size,
name=f"deepseek-ai/deepseek-coder-{nb}b-base",
temperature=temperature,
)
elif name == "wizardcoder-34b":
return Alpaca(
batch_size=batch_size,
name="WizardLM/WizardCoder-Python-34B-V1.0",
temperature=temperature,
conversational=True,
)
elif name == "wizardcoder-15b":
return Alpaca(
batch_size=batch_size,
name="WizardLM/WizardCoder-15B-V1.0",
temperature=temperature,
conversational=True,
)
elif name == "wizardcoder-7b":
return Alpaca(
batch_size=batch_size,
name="WizardLM/WizardCoder-Python-7B-V1.0",
temperature=temperature,
conversational=True,
)
elif name == "mistral-7b-codealpaca":
return HFTorchDecoder(
batch_size=batch_size,
name="Nondzu/Mistral-7B-codealpaca-lora",
temperature=temperature,
)
elif name == "zephyr-7b":
return HFTorchDecoder(
batch_size=batch_size,
name="HuggingFaceH4/zephyr-7b-beta",
temperature=temperature,
)
elif name == "codebooga-34b":
return HFTorchDecoder(
batch_size=batch_size,
name="oobabooga/CodeBooga-34B-v0.1",
temperature=temperature,
)
elif name == "phind-code-llama-34b-v2":
return HFTorchDecoder(
batch_size=batch_size,
name="Phind/Phind-CodeLlama-34B-v2",
temperature=temperature,
)
elif name == "mistral-7b":
return HFTorchDecoder(
batch_size=batch_size,
name="mistralai/Mistral-7B-v0.1",
temperature=temperature,
)
elif name == "dolphin-2.6":
return ChatML(
batch_size=batch_size,
name="cognitivecomputations/dolphin-2.6-mixtral-8x7b",
temperature=temperature,
max_new_tokens=512 + 256,
)
elif name == "solar-10.7b-instruct":
return Solar(
batch_size=batch_size,
name="upstage/SOLAR-10.7B-Instruct-v1.0",
temperature=temperature,
conversational=True,
)
elif name == "mistral-hermes-codepro-7b":
return ChatML(
batch_size=batch_size,
name="beowolx/MistralHermes-CodePro-7B-v1",
temperature=temperature,
max_new_tokens=512 + 256,
)
elif name == "phi-2":
return VLlmDecoder(
batch_size=batch_size,
name="microsoft/phi-2",
temperature=temperature,
)
elif name == "openchat":
return OpenChat(
batch_size=batch_size,
name="openchat/openchat-3.5-0106",
temperature=temperature,
conversational=True,
)
elif name == "speechless-codellama-34b":
return Alpaca(
batch_size=batch_size,
name="uukuguy/speechless-codellama-34b-v2.0",
temperature=temperature,
conversational=True,
)
elif name == "speechless-mistral-7b":
return Alpaca(
batch_size=batch_size,
name="uukuguy/speechless-code-mistral-7b-v1.0",
temperature=temperature,
conversational=True,
)
elif name == "code-millenials-34b":
return Alpaca(
batch_size=batch_size,
name="budecosystem/code-millenials-34b",
temperature=temperature,
conversational=True,
)
elif name == "xdan-l1-chat":
return Alpaca(
batch_size=batch_size,
name="xDAN-AI/xDAN-L1-Chat-dpo-qlora-v1",
temperature=temperature,
conversational=True,
)
elif name == "stable-code-3b":
return HFTorchDecoder(
batch_size=batch_size,
name="stabilityai/stable-code-3b",
temperature=temperature,
)
elif name == "zyte-1b":
return Zyte(
batch_size=batch_size,
name="aihub-app/zyte-1B",
temperature=temperature,
conversational=True,
)
raise ValueError(f"Invalid model name: {name}") | null |
17,079 | import argparse
import os
from os import PathLike
from model import DecoderBase, make_model
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
TextColumn,
TimeElapsedColumn,
)
def construct_contract_prompt(prompt: str, contract_type: str, contract: str) -> str:
if contract_type == "none":
return prompt
elif contract_type == "docstring":
# embed within the docstring
sep = ""
if '"""' in prompt:
sep = '"""'
elif "'''" in prompt:
sep = "'''"
assert sep != ""
l = prompt.split(sep)
contract = "\n".join([x.split("#")[0] for x in contract.splitlines()])
l[1] = (
l[1] + contract + "\n" + " " * (len(contract) - len(contract.lstrip()) - 1)
)
return sep.join(l)
elif contract_type == "code":
# at the beginning of the function
contract = "\n".join([x.split("#")[0] for x in contract.splitlines()])
return prompt + contract
class DecoderBase(ABC):
def __init__(
self,
name: str,
batch_size: int = 1,
temperature: float = 0.8,
max_new_tokens: int = 512,
conversational: bool = False,
max_conversational_new_tokens: int = 1024,
) -> None:
print("Initializing a decoder model: {} ...".format(name))
self.name = name
self.batch_size = batch_size
self.temperature = temperature
self.eos = EOS
self.skip_special_tokens = False
self.max_new_tokens = (
max_conversational_new_tokens if conversational else max_new_tokens
)
self.conversational = conversational
def codegen(
self, prompt: str, do_sample: bool = True, num_samples: int = 200
) -> List[str]:
pass
def __repr__(self) -> str:
return self.name
def __str__(self) -> str:
return self.name
def code_generate(args, workdir: PathLike, model: DecoderBase, id_range=None):
with Progress(
TextColumn(
f"{args.dataset} •" + "[progress.percentage]{task.percentage:>3.0f}%"
),
BarColumn(),
MofNCompleteColumn(),
TextColumn("•"),
TimeElapsedColumn(),
) as p:
if args.dataset == "humaneval":
from evalplus.data import get_human_eval_plus
dataset = get_human_eval_plus()
elif args.dataset == "mbpp":
from evalplus.data import get_mbpp_plus
dataset = get_mbpp_plus()
for task_id, task in p.track(dataset.items()):
if id_range is not None:
id_num = int(task_id.split("/")[1])
low, high = id_range
if id_num < low or id_num >= high:
p.console.print(f"Skipping {task_id} as it is not in {id_range}")
continue
p_name = task_id.replace("/", "_")
if args.contract_type != "none" and task["contract"] == "":
continue
os.makedirs(os.path.join(workdir, p_name), exist_ok=True)
log = f"Codegen: {p_name} @ {model}"
n_existing = 0
if args.resume:
# count existing .py files
n_existing = len(
[
f
for f in os.listdir(os.path.join(workdir, p_name))
if f.endswith(".py")
]
)
if n_existing > 0:
log += f" (resuming from {n_existing})"
nsamples = args.n_samples - n_existing
p.console.print(log)
sidx = args.n_samples - nsamples
while sidx < args.n_samples:
outputs = model.codegen(
construct_contract_prompt(
task["prompt"], args.contract_type, task["contract"]
),
do_sample=not args.greedy,
num_samples=args.n_samples - sidx,
)
assert outputs, "No outputs from model!"
for impl in outputs:
try:
with open(
os.path.join(workdir, p_name, f"{sidx}.py"),
"w",
encoding="utf-8",
) as f:
if model.conversational:
f.write(impl)
else:
f.write(task["prompt"] + impl)
except UnicodeEncodeError:
continue
sidx += 1 | null |
17,080 | import json
import os
import pathlib
import shutil
from importlib import util
from inspect import getmembers, isfunction
from typing import Tuple
from tempdir import TempDir
from evalplus.data.mbpp import get_mbpp, mbpp_serialize_inputs
GROUNDTRUTH_MBPP_PATH = pathlib.Path(__file__).parent.parent.parent / "groundtruth/mbpp"
def get_entry_point(task_id: int, assertion: str) -> str:
py_file_path = str(GROUNDTRUTH_MBPP_PATH) + f"/{str(task_id).zfill(3)}.py"
spec = util.spec_from_file_location("inspect_module", py_file_path)
module = util.module_from_spec(spec)
spec.loader.exec_module(module)
functions = [name for name, value in getmembers(module, isfunction)]
# maybe exist some helper functions, filter them
functions = [func for func in functions if func in assertion]
if len(functions) > 1:
print("more than one function: ", functions)
return functions[0] if len(functions) > 0 else None | null |
17,081 | import json
import os
import pathlib
import shutil
from importlib import util
from inspect import getmembers, isfunction
from typing import Tuple
from tempdir import TempDir
from evalplus.data.mbpp import get_mbpp, mbpp_serialize_inputs
GROUNDTRUTH_MBPP_PATH = pathlib.Path(__file__).parent.parent.parent / "groundtruth/mbpp"
def get_code_and_contract_and_assertion(task_id: id) -> Tuple[str, str, str]:
py_file_path = str(GROUNDTRUTH_MBPP_PATH) + f"/{str(task_id).zfill(3)}.py"
with open(py_file_path) as reader:
text = reader.read()
# remove docstring
start_index = text.find('"""')
end_index = text.find('"""', start_index + 3)
if start_index != -1 and end_index != -1:
text = text[:start_index] + text[end_index + 3 :]
lines = text.splitlines()
assertion = ""
contract = ""
for i in range(len(lines)):
if "$_CONTRACT_$" in lines[i]:
contract += lines[i] + "\n"
elif lines[i].startswith("assert"):
assertion += lines[i] + "\n"
for i in range(len(lines) - 1, -1, -1):
if (
"$_CONTRACT_$" in lines[i]
or lines[i].startswith("assert")
or lines[i] == ""
):
del lines[i]
for i in range(len(lines) - 1, -1, -1):
if lines[i].startswith("import"):
del lines[i]
else:
break
code = "\n".join(lines)
return "\n" + code + "\n", "\n" + contract, "\n" + assertion | null |
17,082 | import json
import os
import pathlib
import shutil
from importlib import util
from inspect import getmembers, isfunction
from typing import Tuple
from tempdir import TempDir
from evalplus.data.mbpp import get_mbpp, mbpp_serialize_inputs
def _ret(entry_point) -> str:
def instrument_inputs(code, entry_point, test_code) -> str:
globals()["_inputs"] = []
fn_text = f"""{code.split(f"def {entry_point}")[0]}
def {entry_point}(*args):
_inputs.append(args)
return {_ret(entry_point)}
"""
exec(fn_text + "\n" + test_code.replace("assert ", ""), globals())
print(fn_text + "\n" + test_code.replace("assert ", ""))
print(globals()["_inputs"])
return globals()["_inputs"] | null |
17,083 | import json
import os
import pathlib
import shutil
from importlib import util
from inspect import getmembers, isfunction
from typing import Tuple
from tempdir import TempDir
from evalplus.data.mbpp import get_mbpp, mbpp_serialize_inputs
def get_atol(task_id: int) -> float:
float_ans_list = [
82,
85,
98,
120,
124,
137,
139,
163,
233,
246,
248,
276,
293,
300,
312,
442,
574,
742,
746,
]
if task_id in float_ans_list:
return 1e-4
return 0 | null |
17,084 | import ast
import inspect
import json
import multiprocessing
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
from evalplus.data.mbpp import (
MBPP_PLUS_VERSION,
get_mbpp,
get_mbpp_plus,
get_mbpp_plus_hash,
)
from evalplus.eval import is_floats
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
from evalplus.evaluate import get_groundtruth
MBPP_TEST_TEMPLATE = """\
{imports}
{aux_fn}
inputs = {inputs}
results = {results}
for i, (inp, exp) in enumerate(zip(inputs, results)):
assertion({entry_point}(*inp), exp, {atol})
"""
MBPP_CROSSCHECK_TEMPLATE = """\
{aux_fn}
{ref_func}
inputs = {inputs}
for i, inp in enumerate(inputs):
assertion({entry_point}(*inp), ref_func(*inp), {atol})
"""
ASSERTION_FN = f"""\
import numpy as np
{inspect.getsource(is_floats)}
def assertion(out, exp, atol):
exact_match = out == exp
if atol == 0 and is_floats(exp):
atol = 1e-6
if not exact_match and atol != 0:
np.testing.assert_allclose(out, exp, atol=atol)
else:
assert exact_match
"""
def synthesize_test_code(task_id, entry_point, inputs, results, ref_func, atol):
# dataset size optimization for large outputs
if entry_point in ("combinations_colors", "freq_count", "get_coordinates"):
return task_id, MBPP_CROSSCHECK_TEMPLATE.format(
aux_fn=ASSERTION_FN,
inputs=inputs,
ref_func=ref_func.replace(f" {entry_point}(", " ref_func("),
entry_point=entry_point,
atol=atol,
)
# default settings
imports = set()
aux_fn = ASSERTION_FN
# inf exists in inputs/results
if entry_point in ("zero_count", "minimum"):
imports.add("from math import inf")
test_code = MBPP_TEST_TEMPLATE.format(
imports="\n".join(imports),
aux_fn=aux_fn,
inputs=inputs,
results=results,
entry_point=entry_point,
atol=atol,
)
return task_id, test_code | null |
17,085 | import ast
import inspect
import json
import multiprocessing
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
from evalplus.data.mbpp import (
MBPP_PLUS_VERSION,
get_mbpp,
get_mbpp_plus,
get_mbpp_plus_hash,
)
from evalplus.eval import is_floats
from evalplus.eval._special_oracle import MBPP_OUTPUT_NOT_NONE_TASKS
from evalplus.evaluate import get_groundtruth
def deduplicate(inputs, results):
assert len(inputs) == len(results)
unique_input_strs = set([f"{x}" for x in inputs])
new_inputs, new_results = [], []
for inp, res in zip(inputs, results):
inp_str = f"{inp}"
if inp_str in unique_input_strs:
new_inputs.append(inp)
new_results.append(res)
unique_input_strs.remove(inp_str)
return new_inputs, new_results | null |
17,086 | import os
from tqdm import tqdm
from evalplus.data import (
get_human_eval_plus,
get_mbpp_plus,
load_solutions,
write_directory,
write_jsonl,
)
from evalplus.sanitize import sanitize
def remove_unindented_lines(code, protect_before, execeptions, trim_tails):
lines = code.splitlines()
cut_idx = []
cut_enabled = False
for i, line in enumerate(lines):
if not cut_enabled and line.startswith(protect_before):
cut_enabled = True
continue
if line.strip() == "":
continue
if any(line.startswith(e) for e in execeptions):
continue
lspace = len(line) - len(line.lstrip())
if lspace == 0:
cut_idx.append(i)
if any(line.rstrip().startswith(t) for t in trim_tails):
# cut off everything behind
cut_idx.extend(list(range(i, len(lines))))
break
return "\n".join([line for i, line in enumerate(lines) if i not in cut_idx]) | null |
17,087 | import os
from tqdm import tqdm
from evalplus.data import (
get_human_eval_plus,
get_mbpp_plus,
load_solutions,
write_directory,
write_jsonl,
)
from evalplus.sanitize import sanitize
def to_four_space_indents(old_code):
new_code = ""
for line in old_code.splitlines():
lspace = len(line) - len(line.lstrip())
if lspace == 3:
new_code += " "
new_code += line + "\n"
return new_code | null |
17,088 | import json
import os
from rich.progress import track
from evalplus.data import get_human_eval_plus, get_human_eval_plus_inputs
LLM_HOME_PATH = "/JawTitan/EvalPlus/humaneval"
model_paths = os.listdir(LLM_HOME_PATH)
cover_info = {f"HumanEval_{i}": {} for i in range(164)}
def get_cover_info():
for model_path in track(model_paths, description="Collecting sets..."):
if not model_path[-1].isdigit():
continue
eval_json_path = os.path.join(LLM_HOME_PATH, model_path, "eval_results.json")
if not os.path.exists(eval_json_path):
continue
with open(eval_json_path, "r") as f:
res = json.load(f)["eval"]
for task_id, v in res.items():
for i_code, (status, res_list) in enumerate(v["base"]):
if status == "success":
continue
code_id = hash(v["files"][i_code])
for i_test, res in enumerate(res_list):
test_id = f"base_{i_test}"
if res == False:
cover_info[task_id].setdefault(test_id, []).append(code_id)
for i_code, (status, res_list) in enumerate(v["plus"]):
if status == "success":
continue
code_id = hash(v["files"][i_code])
for i_test, res in enumerate(res_list):
test_id = f"plus_{i_test}"
if res == False:
cover_info[task_id].setdefault(test_id, []).append(code_id) | null |
17,089 |
def fix(data):
# fix 140 https://github.com/evalplus/evalplus/issues/3
assert data[140]["task_id"] == "HumanEval/140"
data[140]["canonical_solution"] = data[140]["canonical_solution"].replace(
"range(len(text)-1, 2, -1)", "range(len(text), 2, -1)"
)
# fix 75 https://github.com/evalplus/evalplus/issues/4
assert data[75]["task_id"] == "HumanEval/75"
org_contract = '\n assert type(a) == int, "invalid inputs" # $_CONTRACT_$\n'
assert org_contract in data[75]["contract"]
data[75]["contract"] = (
org_contract + ' assert a < 100, "invalid inputs" # $_CONTRACT_$\n'
)
data[75]["base_input"] = [x for x in data[75]["base_input"] if x[0] < 100]
data[75]["plus_input"] = [x for x in data[75]["plus_input"] if x[0] < 100]
# fix 129 https://github.com/evalplus/evalplus/issues/4
assert data[129]["task_id"] == "HumanEval/129"
data[129][
"contract"
] = R"""
assert type(k) == int, "invalid inputs" # $_CONTRACT_$
assert k > 0, "invalid inputs" # $_CONTRACT_$
assert len(grid) >= 2, "invalid inputs" # $_CONTRACT_$
assert all(len(l) == len(grid) for l in grid), "invalid inputs" # $_CONTRACT_$
assert {x for l in grid for x in l} == set(range(1, len(grid) ** 2 + 1)), "invalid inputs" # $_CONTRACT_$
"""
def check_unique(grid):
return {x for l in grid for x in l} == set(range(1, len(grid) ** 2 + 1))
data[129]["base_input"] = [x for x in data[129]["base_input"] if check_unique(x[0])]
data[129]["plus_input"] = [x for x in data[129]["plus_input"] if check_unique(x[0])]
return data | null |
17,090 | import json
import os
import pathlib
from importlib import import_module
from inspect import getsource
from typing import Tuple
from tempdir import TempDir
from evalplus.data.humaneval import get_human_eval
def _ret(entry_point) -> str:
"""This is a hacky function to return some garbages so that we can
successfully run the function .
"""
if entry_point == "sort_third" or entry_point == "sort_even":
return [1, 2, 3]
elif entry_point == "bf":
return ()
return "1"
def instrument_inputs(entry_point, prompt, test) -> str:
globals()["_inputs"] = []
fn_text = f"""{prompt.split(f"def {entry_point}")[0]}
def {entry_point}(*args):
_inputs.append(args)
return {_ret(entry_point)}
"""
exec(fn_text, globals())
exec(test.replace("assert ", ""), globals())
exec(f"check({entry_point})", globals())
exec(fn_text, globals())
return globals()["_inputs"] | null |
17,091 | import json
import os
import pathlib
from importlib import import_module
from inspect import getsource
from typing import Tuple
from tempdir import TempDir
from evalplus.data.humaneval import get_human_eval
def get_contract_and_ref(task_id: int, entry_point) -> Tuple[str, str]:
mod = import_module(f"groundtruth.humaneval.{str(task_id).zfill(3)}_{entry_point}")
fn = getattr(mod, entry_point)
doc = fn.__doc__
if task_id == 51:
doc = doc.replace("bcdf\nghjklm", r"bcdf\nghjklm").replace(
"abcdef\nghijklm", r"abcdef\nghijklm"
)
code = (
getsource(fn).replace(doc, "").replace("''''''", '""""""').split('""""""\n')[-1]
)
assert code, f"Something wrong with {task_id}!"
assert code[:3] != "def", f"Something wrong with the {task_id}!"
# split code to contract and impl
contract = ""
impl = ""
reading_contract = True
for line in code.strip("\n").split("\n"):
if reading_contract and "$_CONTRACT_$" in line:
contract += line + "\n"
else:
reading_contract = False
impl += line + "\n"
if contract:
contract = "\n" + contract
return contract, "\n" + impl + "\n" | null |
17,092 | import json
import os
import pathlib
from importlib import import_module
from inspect import getsource
from typing import Tuple
from tempdir import TempDir
from evalplus.data.humaneval import get_human_eval
def get_atol(task_id: int) -> float:
if task_id == 2 or task_id == 4:
return 1e-6
elif task_id == 32:
return 1e-4
return 0 | null |
17,093 | def check_id(data, task_id):
assert data[task_id]["task_id"] == f"HumanEval/{task_id}"
def fix(data):
# fix 53 https://github.com/evalplus/evalplus/issues/8
check_id(data, 53)
data[53]["contract"] = (
'\n assert isinstance(x, int), "invalid inputs" # $_CONTRACT_$'
+ '\n assert isinstance(y, int), "invalid inputs" # $_CONTRACT_$\n'
)
data[53]["plus_input"] = [
x
for x in data[53]["plus_input"]
if isinstance(x[0], int) and isinstance(x[1], int)
]
# fix 0
check_id(data, 0)
data[0]["contract"] = (
'\n assert isinstance(threshold, float) and threshold > 0, "invalid inputs" # $_CONTRACT_$'
+ '\n assert isinstance(numbers, list), "invalid inputs" # $_CONTRACT_$'
+ '\n assert all([isinstance(v, (int, float)) for v in numbers]), "invalid inputs" # $_CONTRACT_$\n'
)
data[0]["plus_input"] = [
x
for x in data[0]["plus_input"]
if isinstance(x[1], float) and x[1] > 0 and isinstance(x[0], list)
]
# fix 3
check_id(data, 3)
data[3]["contract"] = (
'\n assert type(operations) == list, "invalid inputs" # $_CONTRACT_$'
+ '\n assert all([isinstance(v, int) for v in operations]), "invalid inputs" # $_CONTRACT_$\n'
)
data[3]["plus_input"] = [x for x in data[3]["plus_input"] if isinstance(x[0], list)]
# fix 9
check_id(data, 9)
data[9]["contract"] = (
'\n assert isinstance(numbers, list), "invalid inputs" # $_CONTRACT_$'
+ '\n assert all([isinstance(v, int) for v in numbers]), "invalid inputs" # $_CONTRACT_$\n'
)
data[9]["plus_input"] = [x for x in data[9]["plus_input"] if isinstance(x[0], list)]
# fix 148
check_id(data, 148)
data[148][
"contract"
] = '\n assert isinstance(planet1, str) and isinstance(planet2, str), "invalid inputs" # $_CONTRACT_$\n'
data[148]["plus_input"] = [
x
for x in data[148]["plus_input"]
if isinstance(x[0], str) and isinstance(x[1], str)
]
# minor format fix 75
check_id(data, 75)
data[75]["contract"] = (
'\n assert type(a) == int, "invalid inputs" # $_CONTRACT_$'
+ '\n assert a < 100, "invalid inputs" # $_CONTRACT_$\n'
)
return data | null |
17,094 | import math
def fix(data):
# https://github.com/evalplus/evalplus/issues/29
check_id(data, 35)
data[35]["contract"] += ' assert len(l) != 0, "invalid inputs" # $_CONTRACT_$\n'
# https://github.com/evalplus/evalplus/issues/28
check_id(data, 2)
data[2][
"contract"
] += ' assert number != float("+inf"), "invalid inputs" # $_CONTRACT_$\n'
check_id(data, 99)
data[99][
"contract"
] += r""" import math # $_CONTRACT_$
assert not (math.isinf(value) or math.isnan(value)), "invalid inputs" # $_CONTRACT_$
"""
# https://github.com/evalplus/evalplus/issues/27
check_id(data, 1)
data[1]["contract"] += ' assert cnt == 0, "invalid inputs" # $_CONTRACT_$\n'
return data
def evolve(src_file, tgt_file):
with open(src_file) as f:
data = [json.loads(line) for line in f.readlines() if line]
data = fix(data)
with open(tgt_file, "wb") as f:
for x in data:
f.write((json.dumps(x) + "\n").encode("utf-8")) | null |
17,095 | import math
def check_id(data, task_id):
assert data[task_id]["task_id"] == f"HumanEval/{task_id}"
def check_valid(xs):
if not (isinstance(xs, list) and len(xs) > 0 and len(xs) % 2 == 0):
return False
if not all(type(x) == int for x in xs):
return False
dxs = [xs[i] * i for i in range(1, len(xs))]
def func(x):
return poly(xs, x)
def derivative(x):
return poly(dxs, x)
x, tol = 0, 1e-5
for _ in range(1000):
fx = func(x)
dfx = derivative(x)
if abs(fx) < tol:
break
x = x - fx / dfx
if abs(poly(xs, x)) >= tol:
return False
return True
def fix(data):
check_id(data, 32)
data[32]["contract"] = (
'\n assert isinstance(xs, list) and len(xs) > 0 and len(xs) % 2 == 0, "invalid inputs" # $_CONTRACT_$'
+ '\n assert all(type(x) == int for x in xs), "invalid inputs" # $_CONTRACT_$'
+ "\n dxs = [xs[i] * i for i in range(1, len(xs))] # $_CONTRACT_$"
+ "\n def func(x): # $_CONTRACT_$"
+ "\n return poly(xs, x) # $_CONTRACT_$"
+ "\n def derivative(x): # $_CONTRACT_$"
+ "\n return poly(dxs, x) # $_CONTRACT_$"
+ "\n x, tol = 0, 1e-5 # $_CONTRACT_$"
+ "\n for _ in range(1000): # $_CONTRACT_$"
+ "\n fx = func(x) # $_CONTRACT_$"
+ "\n dfx = derivative(x) # $_CONTRACT_$"
+ "\n if abs(fx) < tol: break # $_CONTRACT_$"
+ "\n x = x - fx / dfx # $_CONTRACT_$"
+ '\n assert abs(poly(xs, x)) < tol, "invalid inputs" # $_CONTRACT_$\n'
)
data[32]["plus_input"] = [l for l in data[32]["plus_input"] if check_valid(l[0])]
return data | null |
17,096 | import math
def check_id(data, task_id):
assert data[task_id]["task_id"] == f"HumanEval/{task_id}"
def check_valid(s: str):
cnt = 0
for ch in s:
if ch == "(":
cnt += 1
elif ch == ")":
cnt -= 1
else:
return False
if cnt < 0:
return False
return cnt == 0
def fix(data):
check_id(data, 126)
data[126]["contract"] = (
'\n assert type(lst) == list, "invalid inputs" # $_CONTRACT_$'
+ '\n assert all(type(x) == int and x >= 0 for x in lst), "invalid inputs" # $_CONTRACT_$\n'
)
data[126]["plus_input"] = [
l
for l in data[126]["plus_input"]
if type(l[0]) == list and all(type(x) == int and x >= 0 for x in l[0])
]
check_id(data, 6)
data[6]["contract"] += ' assert cnt == 0, "invalid inputs"\n'
data[6]["plus_input"] = [l for l in data[6]["plus_input"] if check_valid(l[0])]
return data | null |
17,097 | import math
def fix(data):
# https://github.com/evalplus/evalplus/issues/44
check_id(data, 115)
data[115]["prompt"] = "import math\n" + data[115]["prompt"].replace(
" import math\n", ""
)
check_id(data, 114)
data[114]["prompt"] = data[114]["prompt"].replace("import math\n", "")
# https://github.com/evalplus/evalplus/issues/30#issue-1944054257
check_id(data, 35)
data[35][
"contract"
] += ' assert all(type(x) in [int, float] for x in l), "invalid inputs" # $_CONTRACT_$\n'
data[35]["canonical_solution"] = data[35]["canonical_solution"].replace(
' assert all(type(x) in [int, float] for x in l), "invalid inputs"\n', ""
)
# https://github.com/evalplus/evalplus/issues/30#issuecomment-1763502126
check_id(data, 28)
data[28][
"contract"
] += ' assert isinstance(strings, list), "invalid inputs" # $_CONTRACT_$\n'
# https://github.com/evalplus/evalplus/issues/34
check_id(data, 32)
terms = data[32]["contract"].splitlines()
terms.insert(2, ' assert xs[-1] != 0, "invalid inputs" # $_CONTRACT_$')
data[32]["contract"] = "\n".join(terms)
# https://github.com/evalplus/evalplus/issues/35
check_id(data, 160)
terms = data[160]["contract"].splitlines()
terms.insert(
len(terms),
' assert not any([operand[i-1] == 0 and operator[i] == "//" for i in range(1, len(operand))]), "invalid inputs" # $_CONTRACT_$',
)
data[160]["contract"] = "\n".join(terms)
return data
def evolve(src_file, tgt_file):
with open(src_file) as f:
data = [json.loads(line) for line in f.readlines() if line]
data = fix(data)
with open(tgt_file, "wb") as f:
for x in data:
f.write((json.dumps(x) + "\n").encode("utf-8")) | null |
17,098 | import math
def check_id(data, task_id):
assert data[task_id]["task_id"] == f"HumanEval/{task_id}"
def check_valid(op, num):
try:
exp = ""
for i in range(len(op)):
exp += str(num[i]) + str(op[i])
exp += str(num[-1])
exp = str(eval(exp))
except:
return False
return True
def fix(data):
check_id(data, 160)
data[160]["plus_input"] = [
l for l in data[160]["plus_input"] if check_valid(l[0], l[1])
]
return data | null |
17,099 | import ast
import inspect
import json
import multiprocessing
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
from evalplus.data.humaneval import (
HUMANEVAL_PLUS_VERSION,
get_human_eval_plus,
get_human_eval_plus_hash,
)
from evalplus.eval import is_floats
from evalplus.eval._special_oracle import _poly
from evalplus.evaluate import get_groundtruth
HUMANEVAL_TEST_TEMPLATE = """\
{imports}
{aux_fn}
def check(candidate):
inputs = {inputs}
results = {results}
for i, (inp, exp) in enumerate(zip(inputs, results)):
{assertion}
"""
HUMANEVAL_CROSSCHECK_TEMPLATE = """\
{aux_fn}
{ref_func}
def check(candidate):
inputs = {inputs}
for i, inp in enumerate(inputs):
assertion(candidate(*inp), ref_func(*inp), {atol})
"""
ASSERTION_FN = f"""\
import numpy as np
{inspect.getsource(is_floats)}
def assertion(out, exp, atol):
exact_match = out == exp
if atol == 0 and is_floats(exp):
atol = 1e-6
if not exact_match and atol != 0:
np.testing.assert_allclose(out, exp, atol=atol)
else:
assert exact_match
"""
def synthesize_test_code(task_id, entry_point, inputs, results, ref_func, atol):
# dataset size optimization for large outputs
if entry_point in (
"tri",
"string_sequence",
"starts_one_ends",
"make_a_pile",
"special_factorial",
"all_prefixes",
):
return task_id, HUMANEVAL_CROSSCHECK_TEMPLATE.format(
aux_fn=ASSERTION_FN,
inputs=inputs,
ref_func=ref_func.replace(f" {entry_point}(", " ref_func("),
atol=atol,
)
# default settings
imports = set()
aux_fn = ASSERTION_FN
assertion = f"assertion(candidate(*inp), exp, {atol})"
# special case: poly
if entry_point == "find_zero":
imports.add("import math")
aux_fn = inspect.getsource(_poly) + "\n"
assertion = f"assert _poly(*candidate(*inp), inp) <= {atol}"
return task_id, HUMANEVAL_TEST_TEMPLATE.format(
imports="\n".join(imports),
aux_fn=aux_fn,
inputs=inputs,
results=results,
assertion=assertion,
) | null |
17,100 | import ast
import inspect
import json
import multiprocessing
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
from evalplus.data.humaneval import (
HUMANEVAL_PLUS_VERSION,
get_human_eval_plus,
get_human_eval_plus_hash,
)
from evalplus.eval import is_floats
from evalplus.eval._special_oracle import _poly
from evalplus.evaluate import get_groundtruth
def deduplicate(inputs, results):
assert len(inputs) == len(results)
unique_input_strs = set([f"{x}" for x in inputs])
new_inputs, new_results = [], []
for inp, res in zip(inputs, results):
inp_str = f"{inp}"
if inp_str in unique_input_strs:
new_inputs.append(inp)
new_results.append(res)
unique_input_strs.remove(inp_str)
return new_inputs, new_results | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.