Spaces:
Sleeping
Sleeping
File size: 10,774 Bytes
7d5083d b3e0a65 7d5083d b3e0a65 7d5083d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 |
"""
Pydantic models for API request validation.
This module defines the input models for the topic segmentation API,
including transcript data, prompt requests, and validation constraints.
"""
from typing import List, Optional, Dict, Any, Union
from datetime import datetime
from enum import Enum
from pydantic import BaseModel, Field, field_validator, ConfigDict
from pydantic.types import PositiveInt, NonNegativeFloat
from config.settings import AnthropicModel
class SpeakerRole(str, Enum):
"""Enumeration of possible speaker roles in transcripts."""
INTERVIEWER = "interviewer"
INTERVIEWEE = "interviewee"
MODERATOR = "moderator"
PARTICIPANT = "participant"
CUSTOMER = "customer"
AGENT = "agent"
UNKNOWN = "unknown"
class LanguageCode(str, Enum):
"""Supported language codes for processing."""
ENGLISH = "en"
CZECH = "cs"
SLOVAK = "sk"
AUTO_DETECT = "auto"
class PromptTemplate(str, Enum):
"""Pre-built prompt templates for different business scenarios."""
INTERVIEW = "interview"
CUSTOMER_CALL = "customer_call"
FEEDBACK_TICKET = "feedback_ticket"
GENERAL_COMMENTARY = "general_commentary"
CUSTOM = "custom"
class TranscriptSentence(BaseModel):
"""
Individual sentence in a transcript with metadata.
Represents a single sentence or utterance in the transcript
with timing information, speaker details, and content.
"""
model_config = ConfigDict(
str_strip_whitespace=True,
validate_assignment=True,
extra="forbid"
)
# Core content
text: str = Field(
...,
min_length=1,
max_length=2000,
description="The actual text content of the sentence"
)
# Indexing and identification
sentence_index: PositiveInt = Field(
...,
description="Sequential index of the sentence in the transcript (1-based)"
)
# Timing information
start_time: NonNegativeFloat = Field(
...,
description="Start time of the sentence in seconds"
)
end_time: NonNegativeFloat = Field(
...,
description="End time of the sentence in seconds"
)
# Speaker information
speaker: str = Field(
...,
min_length=1,
max_length=100,
description="Speaker identifier or name"
)
speaker_role: Optional[SpeakerRole] = Field(
default=SpeakerRole.UNKNOWN,
description="Role of the speaker in the conversation"
)
# Optional metadata
confidence_score: Optional[float] = Field(
default=None,
ge=0.0,
le=1.0,
description="Transcription confidence score (0.0 to 1.0)"
)
language: Optional[LanguageCode] = Field(
default=None,
description="Detected or specified language of the sentence"
)
metadata: Optional[Dict[str, Any]] = Field(
default=None,
description="Additional metadata for the sentence"
)
@field_validator('end_time')
@classmethod
def validate_end_time_after_start(cls, v, info):
"""Ensure end_time is after start_time."""
if 'start_time' in info.data and v <= info.data['start_time']:
raise ValueError('end_time must be greater than start_time')
return v
@field_validator('text')
@classmethod
def validate_text_content(cls, v):
"""Validate text content is meaningful."""
if not v or v.isspace():
raise ValueError('text cannot be empty or only whitespace')
return v.strip()
class PromptConfiguration(BaseModel):
"""
Configuration for dynamic prompt injection.
Allows customization of the topic extraction prompt
while maintaining output format consistency.
"""
model_config = ConfigDict(
str_strip_whitespace=True,
validate_assignment=True,
extra="forbid"
)
# Template selection
template: PromptTemplate = Field(
default=PromptTemplate.INTERVIEW,
description="Pre-built prompt template to use"
)
# Custom prompt (when template is CUSTOM)
custom_prompt: Optional[str] = Field(
default=None,
min_length=10,
max_length=5000,
description="Custom prompt text (required when template is CUSTOM)"
)
# Language specification
language: LanguageCode = Field(
default=LanguageCode.AUTO_DETECT,
description="Language for processing and prompts"
)
# Business context
business_domain: Optional[str] = Field(
default=None,
max_length=200,
description="Business domain or industry context"
)
# Additional instructions
additional_instructions: Optional[str] = Field(
default=None,
max_length=1000,
description="Additional instructions to append to the prompt"
)
# Output format preferences
include_confidence_scores: bool = Field(
default=True,
description="Whether to include confidence scores in output"
)
include_speaker_analysis: bool = Field(
default=True,
description="Whether to include speaker-specific analysis"
)
@field_validator('custom_prompt')
@classmethod
def validate_custom_prompt(cls, v, info):
"""Validate custom prompt when template is CUSTOM."""
if 'template' in info.data and info.data['template'] == PromptTemplate.CUSTOM:
if not v:
raise ValueError('custom_prompt is required when template is CUSTOM')
return v
class ModelConfiguration(BaseModel):
"""
Configuration for Anthropic model selection and parameters.
Allows fine-tuning of model behavior for specific use cases.
"""
model_config = ConfigDict(
validate_assignment=True,
extra="forbid"
)
# Model selection
model: Optional[AnthropicModel] = Field(
default=None,
description="Specific Anthropic model to use (uses default if not specified)"
)
# Generation parameters
max_tokens: PositiveInt = Field(
default=4000,
le=8000,
description="Maximum tokens to generate"
)
temperature: float = Field(
default=0.0,
ge=0.0,
le=1.0,
description="Sampling temperature (0.0 for deterministic, 1.0 for creative)"
)
# Fallback configuration
enable_fallback: bool = Field(
default=True,
description="Whether to enable automatic fallback to alternative models"
)
# Timeout settings
timeout_seconds: PositiveInt = Field(
default=300,
le=600,
description="Request timeout in seconds"
)
class TranscriptRequest(BaseModel):
"""
Main request model for transcript topic segmentation.
Contains the transcript data and all configuration options
for processing and analysis.
"""
model_config = ConfigDict(
str_strip_whitespace=True,
validate_assignment=True,
extra="forbid",
protected_namespaces=()
)
# Core transcript data
sentences: List[TranscriptSentence] = Field(
...,
min_length=1,
max_length=1500,
description="List of transcript sentences with metadata"
)
# Request metadata
transcript_id: Optional[str] = Field(
default=None,
max_length=100,
description="Optional identifier for the transcript"
)
transcript_title: Optional[str] = Field(
default=None,
max_length=200,
description="Optional title or description of the transcript"
)
# Processing configuration
prompt_config: Optional[PromptConfiguration] = Field(
default_factory=PromptConfiguration,
description="Prompt configuration for topic extraction"
)
model_config_override: Optional[ModelConfiguration] = Field(
default=None,
description="Model configuration overrides",
alias="model_config_override"
)
# Processing options
merge_similar_topics: bool = Field(
default=True,
description="Whether to merge similar or duplicate topics"
)
min_topic_length: PositiveInt = Field(
default=2,
le=10,
description="Minimum number of sentences for a topic"
)
include_metadata: bool = Field(
default=True,
description="Whether to include detailed metadata in response"
)
# Client information
client_info: Optional[Dict[str, str]] = Field(
default=None,
description="Optional client information for logging and analytics"
)
@field_validator('sentences')
@classmethod
def validate_sentences_order(cls, v):
"""Validate sentences are in chronological order."""
if len(v) < 2:
return v
for i in range(1, len(v)):
if v[i].sentence_index <= v[i-1].sentence_index:
raise ValueError(f'Sentences must be in ascending order by sentence_index')
if v[i].start_time < v[i-1].start_time:
raise ValueError(f'Sentences must be in chronological order by start_time')
return v
@field_validator('sentences')
@classmethod
def validate_sentence_indices(cls, v):
"""Validate sentence indices are sequential."""
expected_indices = list(range(1, len(v) + 1))
actual_indices = [s.sentence_index for s in v]
if actual_indices != expected_indices:
raise ValueError(f'Sentence indices must be sequential starting from 1')
return v
class HealthCheckRequest(BaseModel):
"""
Request model for health check with optional detailed checks.
"""
model_config = ConfigDict(extra="forbid")
include_model_health: bool = Field(
default=False,
description="Whether to include detailed model health checks"
)
include_performance_stats: bool = Field(
default=False,
description="Whether to include performance statistics"
)
class ModelSwitchRequest(BaseModel):
"""
Request model for switching the active model.
"""
model_config = ConfigDict(extra="forbid")
model: AnthropicModel = Field(
...,
description="Model to switch to"
)
reason: Optional[str] = Field(
default=None,
max_length=200,
description="Optional reason for the model switch"
)
# Type aliases for convenience
TranscriptData = List[TranscriptSentence]
RequestMetadata = Dict[str, Union[str, int, float, bool]] |