data / chunking.txt
infinex's picture
Uploading dataset files from the local data folder.
e3c248b verified
import json
import logging
import uuid
import math
import os
import sys
from dataclasses import dataclass, field
from typing import List, Dict, Any, Set, Optional
# -----------------------------------------------------------------------------
# Imports & Dependency Checks
# -----------------------------------------------------------------------------
try:
import yaml
except ImportError:
print("Error: 'PyYAML' is required. Install via 'pip install pyyaml'.")
sys.exit(1)
try:
from openai import OpenAI, OpenAIError
except ImportError:
print("Error: 'openai' is required. Install via 'pip install openai'.")
sys.exit(1)
# We check for transformers inside the class to avoid crashing if
# the user wants heuristic mode but doesn't have transformers installed.
try:
from transformers import AutoTokenizer
TRANSFORMERS_AVAILABLE = True
except ImportError:
TRANSFORMERS_AVAILABLE = False
# -----------------------------------------------------------------------------
# Logging
# -----------------------------------------------------------------------------
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s] %(asctime)s - %(funcName)s:%(lineno)d - %(message)s',
datefmt='%H:%M:%S'
)
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Configuration
# -----------------------------------------------------------------------------
@dataclass
class GroupInterval:
start: int
end: int
line_numbers: Set[int]
@dataclass
class ChunkingConfig:
"""Configuration object loaded from YAML."""
api_key: str
llm_model_name: str
temperature: float
# Tokenization
tokenizer_method: str
hf_model_name: str
heuristic_chars_per_token: int
# Limits
llm_token_limit: int
overlap_token_count: int
model_token_limit: int
# Prompts
system_prompt_base: str
@classmethod
def from_yaml(cls, path: str) -> 'ChunkingConfig':
if not os.path.exists(path):
raise FileNotFoundError(f"Config file not found at: {path}")
logger.info(f"Loading configuration from {path}...")
with open(path, 'r') as f:
data = yaml.safe_load(f)
oa = data.get('openai', {})
tok = data.get('tokenization', {})
tok_heu = tok.get('heuristic', {})
tok_hf = tok.get('huggingface', {})
lim = data.get('limits', {})
prompts = data.get('prompts', {})
raw_key = oa.get('api_key', 'ENV')
api_key = os.getenv("OPENAI_API_KEY") if raw_key == "ENV" else raw_key
return cls(
api_key=api_key or "MISSING_KEY",
llm_model_name=oa.get('model_name', 'gpt-4o-mini'),
temperature=oa.get('temperature', 0.0),
# Tokenizer Config
tokenizer_method=tok.get('method', 'heuristic'),
hf_model_name=tok_hf.get('model_name', 'gpt2'),
heuristic_chars_per_token=tok_heu.get('chars_per_token', 4),
llm_token_limit=lim.get('llm_context_window', 300),
overlap_token_count=lim.get('window_overlap', 50),
model_token_limit=lim.get('target_chunk_size', 100),
system_prompt_base=prompts.get('system_instructions', '')
)
# -----------------------------------------------------------------------------
# Data Structures
# -----------------------------------------------------------------------------
@dataclass
class Line:
number: int
text: str
token_count: int
@dataclass
class PreChunkSegment:
lines: List[Line]
segment_id: str = field(default_factory=lambda: str(uuid.uuid4()))
@property
def formatted_text(self) -> str:
return "\n".join([f"{line.number} | {line.text}" for line in self.lines])
@dataclass
class SemanticGroup:
line_numbers: Set[int]
# -----------------------------------------------------------------------------
# Service Implementation
# -----------------------------------------------------------------------------
class DocumentChunkingService:
def __init__(self, config_path: str = "config.yaml"):
# 1. Load Config
try:
self.config = ChunkingConfig.from_yaml(config_path)
except Exception as e:
logger.critical(f"Failed to load config: {e}")
sys.exit(1)
# 2. Setup Tokenizer based on Method
self.hf_tokenizer = None
if self.config.tokenizer_method == "huggingface":
if not TRANSFORMERS_AVAILABLE:
logger.critical("Config requests 'huggingface', but library is missing. Install 'transformers'.")
sys.exit(1)
try:
logger.info(f"Initializing HuggingFace Tokenizer: {self.config.hf_model_name}")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
self.hf_tokenizer = AutoTokenizer.from_pretrained(self.config.hf_model_name)
except Exception as e:
logger.critical(f"Failed to load HF Tokenizer: {e}")
sys.exit(1)
elif self.config.tokenizer_method == "heuristic":
logger.info(f"Using Heuristic Tokenizer ({self.config.heuristic_chars_per_token} chars/token)")
else:
logger.warning(f"Unknown tokenizer method '{self.config.tokenizer_method}'. Defaulting to heuristic.")
# 3. Setup OpenAI
if self.config.api_key == "MISSING_KEY":
logger.critical("No valid API Key found.")
self.client = None
else:
try:
self.client = OpenAI(api_key=self.config.api_key)
except Exception as e:
logger.error(f"Failed to initialize OpenAI Client: {e}")
sys.exit(1)
def _count_tokens(self, text: str) -> int:
"""
Determines token count based on the configured method.
"""
if not text:
return 0
if self.config.tokenizer_method == "huggingface" and self.hf_tokenizer:
# HuggingFace Count
return len(self.hf_tokenizer.encode(text, add_special_tokens=False))
else:
# Heuristic Count
return math.ceil(len(text) / self.config.heuristic_chars_per_token)
def _prepare_lines(self, document_text: str) -> List[Line]:
logger.debug(f"Preparing lines using {self.config.tokenizer_method} method...")
raw_lines = document_text.split('\n')
processed_lines = []
for idx, text in enumerate(raw_lines, start=1):
if not text.strip(): continue
count = self._count_tokens(text)
processed_lines.append(Line(idx, text, count))
return processed_lines
def _create_pre_chunks(self, lines: List[Line]) -> List[PreChunkSegment]:
logger.debug(f"Segmenting lines (Limit: {self.config.llm_token_limit})...")
segments = []
current_segment_lines = []
current_tokens = 0
i = 0
while i < len(lines):
line = lines[i]
if current_tokens + line.token_count > self.config.llm_token_limit and current_segment_lines:
segments.append(PreChunkSegment(list(current_segment_lines)))
# Overlap Logic
overlap_buffer = []
overlap_tokens = 0
back_idx = i - 1
while back_idx >= 0:
prev_line = lines[back_idx]
if prev_line in current_segment_lines:
overlap_buffer.insert(0, prev_line)
overlap_tokens += prev_line.token_count
if overlap_tokens >= self.config.overlap_token_count:
break
else:
break
back_idx -= 1
current_segment_lines = list(overlap_buffer)
current_tokens = overlap_tokens
current_segment_lines.append(line)
current_tokens += line.token_count
i += 1
if current_segment_lines:
segments.append(PreChunkSegment(current_segment_lines))
return segments
def _call_openai(self, segment_text: str, available_lines: List[int]) -> List[List[int]]:
runtime_constraint = f"\nCRITICAL CONSTRAINT: Only use the line numbers provided in this specific range: {available_lines}"
full_system_prompt = self.config.system_prompt_base + runtime_constraint
user_prompt = f"Input Lines:\n{segment_text}\n\nOutput JSON:"
try:
logger.debug(f"Calling OpenAI (Lines {available_lines[0]}-{available_lines[-1]})...")
response = self.client.chat.completions.create(
model=self.config.llm_model_name,
messages=[
{"role": "system", "content": full_system_prompt},
{"role": "user", "content": user_prompt}
],
response_format={"type": "json_object"},
temperature=self.config.temperature
)
parsed = json.loads(response.choices[0].message.content)
groups = parsed.get("groups", [])
if isinstance(groups, list) and all(isinstance(g, list) for g in groups):
return groups
return [[l] for l in available_lines]
except Exception as e:
logger.error(f"OpenAI Call Failed: {e}")
return [[l] for l in available_lines]
def _get_semantic_groupings(self, segments: List[PreChunkSegment]) -> List[List[int]]:
all_raw_groups = []
for idx, seg in enumerate(segments):
available_lines = [l.number for l in seg.lines]
response_groups = self._call_openai(seg.formatted_text, available_lines)
all_raw_groups.extend(response_groups)
return all_raw_groups
def resolve_overlaps(raw_groups: List[List[int]], all_lines_map: Dict[int, Line]) -> List[SemanticGroup]:
"""
Merges groups based on overlapping line number ranges.
Uses a standard 'Merge Intervals' algorithm.
"""
intervals: List[GroupInterval] = []
# 1. Convert raw groups to Intervals
for group in raw_groups:
if not group:
continue
# Filter for valid lines only
valid_lines = {g for g in group if g in all_lines_map}
if not valid_lines:
continue
# Define range based on min and max line numbers in the group
intervals.append(GroupInterval(
start=min(valid_lines),
end=max(valid_lines),
line_numbers=valid_lines
))
if not intervals:
return []
# 2. Sort by start time
intervals.sort(key=lambda x: x.start)
# 3. Merge overlapping intervals
merged: List[GroupInterval] = []
for current in intervals:
if not merged:
merged.append(current)
continue
last = merged[-1]
# Check for overlap:
# If current starts before (or exactly when) last ends, they overlap.
# e.g. [84, 795] and [788, 887] -> 788 <= 795, so merge.
if current.start <= last.end:
# Merge logic:
# 1. Extend the end if needed
last.end = max(last.end, current.end)
# 2. Combine the sets of line numbers
last.line_numbers.update(current.line_numbers)
else:
# No overlap, start a new cluster
merged.append(current)
# 4. Convert back to SemanticGroups
results = [SemanticGroup(group.line_numbers) for group in merged]
return sorted(results, key=lambda x: min(x.line_numbers) if x.line_numbers else 0)
def _finalize_chunk(self, content: str, line_numbers: List[int], parent_id: Optional[str] = None) -> List[Dict[str, Any]]:
count = self._count_tokens(content)
if count <= self.config.model_token_limit:
return [{
"content": content,
"line_numbers": line_numbers,
"token_estimate": count,
"metadata": {"parent_id": parent_id}
}]
if len(line_numbers) <= 1:
return [{
"content": content,
"line_numbers": line_numbers,
"token_estimate": count,
"metadata": {"parent_id": parent_id, "warning": "oversized"}
}]
mid = len(line_numbers) // 2
left_lines = line_numbers[:mid]
right_lines = line_numbers[mid:]
left_text = "\n".join([self.current_doc_map[n].text for n in left_lines])
right_text = "\n".join([self.current_doc_map[n].text for n in right_lines])
cid = parent_id if parent_id else str(uuid.uuid4())[:8]
results = []
results.extend(self._finalize_chunk(left_text, left_lines, parent_id=cid))
results.extend(self._finalize_chunk(right_text, right_lines, parent_id=cid))
return results
def process_document(self, plaintext: str) -> str:
logger.info(f">>> Processing Document [Mode: {self.config.tokenizer_method.upper()}]")
lines = self._prepare_lines(plaintext)
self.current_doc_map = {l.number: l for l in lines}
pre_chunks = self._create_pre_chunks(lines)
raw_groups = self._get_semantic_groupings(pre_chunks)
merged_groups = self._resolve_overlaps(raw_groups, self.current_doc_map)
final_output = []
logger.info("Finalizing chunks...")
for group in merged_groups:
sorted_nums = sorted(list(group.line_numbers))
text_content = "\n".join([self.current_doc_map[n].text for n in sorted_nums])
chunks = self._finalize_chunk(text_content, sorted_nums)
final_output.extend(chunks)
logger.info(f"<<< Done. Generated {len(final_output)} chunks.")
return json.dumps(final_output, indent=2)
# -----------------------------------------------------------------------------
# Main Execution
# -----------------------------------------------------------------------------
if __name__ == "__main__":
sample_text = """The history of Artificial Intelligence is fascinating.
It begins with the Turing Test proposed by Alan Turing.
Early AI research focused on symbolic logic and problem solving.
However, computing power was limited in the 1950s.
Decades later, machine learning emerged as a dominant paradigm.
Neural networks, inspired by the human brain, gained popularity.
Deep learning revolutionized the field in the 2010s.
Transformers, introduced by Google, changed NLP forever.
Large Language Models like GPT-4 are now commonplace.
Retrieval Augmented Generation allows LLMs to use external data.
Chunking documents is essential for RAG systems.
It preserves semantic meaning during retrieval.
This specific code implements a rigorous chunking strategy.
It uses heuristic strategies for token estimation.
The end goal is high quality embeddings."""
service = DocumentChunkingService("config.yaml")
if service.client:
result = service.process_document(sample_text)
print("\n--- Final Output JSON ---")
print(result)
openai:
api_key: "ENV"
model_name: "gpt-4o-mini"
temperature: 0.0
tokenization:
# MASTER SWITCH: Choose "heuristic" or "huggingface"
# - "heuristic": Uses simple math (chars / chars_per_token). Fast, no dependencies.
# - "huggingface": Uses a real tokenizer (e.g., gpt2). Precise, requires 'transformers' lib.
method: "heuristic"
# Settings for "heuristic" method
heuristic:
chars_per_token: 4
# Settings for "huggingface" method
huggingface:
# "gpt2" is a standard proxy for general LLM token counting
model_name: "gpt2"
limits:
# Max tokens to send to OpenAI in one request (chunk context window)
llm_context_window: 300
# Overlap between context windows to prevent cutting sentences
window_overlap: 50
# The target max size for a final, atomic chunk
target_chunk_size: 100
prompts:
system_instructions: |
You are a document chunking assistant. Your goal is to group lines of text into semantically coherent chunks.
Strict Rules:
1. Every line number provided in the input must appear exactly once in your output.
2. Group line numbers that belong together conceptually.
3. Return a JSON object with a single key 'groups' containing a list of lists of integers.