jmisak's picture
Upload 19 files
f277022 verified
"""Core analysis orchestrator combining all services."""
import time
from typing import Dict, Any, Tuple
from writing_studio.core.config import settings
from writing_studio.core.exceptions import ValidationError, TextGenerationError
from writing_studio.services.diff_service import DiffService
from writing_studio.services.model_service import get_model_service
from writing_studio.services.prompt_service import PromptService
from writing_studio.services.rubric_service import RubricService
from writing_studio.utils.logging import logger
from writing_studio.utils.metrics import (
request_count,
request_duration,
generation_duration,
error_count,
active_requests,
)
from writing_studio.utils.validation import validate_text_input, validate_model_name
class WritingAnalyzer:
"""Main analyzer orchestrating all writing analysis services."""
def __init__(self):
"""Initialize the analyzer with all required services."""
self.model_service = get_model_service()
self.rubric_service = RubricService()
self.diff_service = DiffService()
self.prompt_service = PromptService()
def analyze_and_compare(
self,
user_text: str,
model_name: str = None,
prompt_pack: str = "General",
) -> Tuple[str, str, str, str, Dict[str, Any]]:
"""
Analyze text and generate comprehensive feedback.
Args:
user_text: User's input text
model_name: Model to use (default: from settings)
prompt_pack: Prompt pack to use
Returns:
Tuple of (original, revision, feedback, diff_html, metadata)
Raises:
ValidationError: If input validation fails
TextGenerationError: If text generation fails
"""
active_requests.inc()
start_time = time.time()
try:
# Validate and sanitize input
logger.info("Starting text analysis")
user_text = validate_text_input(user_text)
# Load model if different from current
model_name = model_name or settings.default_model
model_name = validate_model_name(model_name)
if self.model_service._current_model_name != model_name:
logger.info(f"Loading new model: {model_name}")
self.model_service.load_model(model_name)
# Generate prompt using selected pack
prompt = self.prompt_service.generate_prompt(user_text, prompt_pack)
# Generate AI revision
logger.info("Generating AI revision...")
with generation_duration.time():
revision = self.model_service.generate_text(
prompt,
max_length=min(len(user_text.split()) * 2 + 100, settings.max_model_length),
use_cache=True
)
# Clean up revision (remove any prompt artifacts)
if prompt_pack in revision:
revision = revision.split(prompt_pack)[-1].strip()
if "Revised text:" in revision:
revision = revision.split("Revised text:")[-1].strip()
if user_text in revision:
# Model might include original text, extract just the revision
revision = revision.replace(user_text, "").strip()
# If revision is empty or too similar, provide a note
if not revision or revision == user_text:
revision = user_text + "\n\n[Note: The AI model kept the text as-is, suggesting it's already well-written!]"
# Analyze with rubric
rubric_results = self.rubric_service.analyze_text(user_text)
feedback = self.rubric_service.format_feedback(rubric_results)
# Generate diff if enabled
diff_html = ""
if settings.enable_diff_highlighting:
diff_html = self.diff_service.generate_html_diff(user_text, revision)
# Gather metadata
metadata = {
"model": model_name,
"prompt_pack": prompt_pack,
"duration": time.time() - start_time,
"rubric_scores": rubric_results,
"diff_stats": self.diff_service.get_change_summary(user_text, revision),
}
duration = time.time() - start_time
request_duration.labels(operation="analyze").observe(duration)
request_count.labels(status="success").inc()
logger.info(f"Analysis completed in {duration:.2f}s")
return user_text, revision, feedback, diff_html, metadata
except ValidationError as e:
logger.error(f"Validation error: {e}")
error_count.labels(error_type="validation").inc()
request_count.labels(status="validation_error").inc()
raise
except TextGenerationError as e:
logger.error(f"Generation error: {e}")
error_count.labels(error_type="generation").inc()
request_count.labels(status="generation_error").inc()
raise
except Exception as e:
logger.error(f"Unexpected error: {e}")
error_count.labels(error_type="unexpected").inc()
request_count.labels(status="error").inc()
raise
finally:
active_requests.dec()
def get_available_prompt_packs(self) -> list:
"""Get list of available prompt packs."""
return self.prompt_service.get_available_packs()
def clear_cache(self) -> None:
"""Clear the model generation cache."""
self.model_service.clear_cache()
logger.info("Cache cleared")