Spaces:
Sleeping
Sleeping
File size: 5,718 Bytes
aeb3f7c f277022 aeb3f7c f277022 aeb3f7c f277022 aeb3f7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
"""Core analysis orchestrator combining all services."""
import time
from typing import Dict, Any, Tuple
from writing_studio.core.config import settings
from writing_studio.core.exceptions import ValidationError, TextGenerationError
from writing_studio.services.diff_service import DiffService
from writing_studio.services.model_service import get_model_service
from writing_studio.services.prompt_service import PromptService
from writing_studio.services.rubric_service import RubricService
from writing_studio.utils.logging import logger
from writing_studio.utils.metrics import (
request_count,
request_duration,
generation_duration,
error_count,
active_requests,
)
from writing_studio.utils.validation import validate_text_input, validate_model_name
class WritingAnalyzer:
"""Main analyzer orchestrating all writing analysis services."""
def __init__(self):
"""Initialize the analyzer with all required services."""
self.model_service = get_model_service()
self.rubric_service = RubricService()
self.diff_service = DiffService()
self.prompt_service = PromptService()
def analyze_and_compare(
self,
user_text: str,
model_name: str = None,
prompt_pack: str = "General",
) -> Tuple[str, str, str, str, Dict[str, Any]]:
"""
Analyze text and generate comprehensive feedback.
Args:
user_text: User's input text
model_name: Model to use (default: from settings)
prompt_pack: Prompt pack to use
Returns:
Tuple of (original, revision, feedback, diff_html, metadata)
Raises:
ValidationError: If input validation fails
TextGenerationError: If text generation fails
"""
active_requests.inc()
start_time = time.time()
try:
# Validate and sanitize input
logger.info("Starting text analysis")
user_text = validate_text_input(user_text)
# Load model if different from current
model_name = model_name or settings.default_model
model_name = validate_model_name(model_name)
if self.model_service._current_model_name != model_name:
logger.info(f"Loading new model: {model_name}")
self.model_service.load_model(model_name)
# Generate prompt using selected pack
prompt = self.prompt_service.generate_prompt(user_text, prompt_pack)
# Generate AI revision
logger.info("Generating AI revision...")
with generation_duration.time():
revision = self.model_service.generate_text(
prompt,
max_length=min(len(user_text.split()) * 2 + 100, settings.max_model_length),
use_cache=True
)
# Clean up revision (remove any prompt artifacts)
if prompt_pack in revision:
revision = revision.split(prompt_pack)[-1].strip()
if "Revised text:" in revision:
revision = revision.split("Revised text:")[-1].strip()
if user_text in revision:
# Model might include original text, extract just the revision
revision = revision.replace(user_text, "").strip()
# If revision is empty or too similar, provide a note
if not revision or revision == user_text:
revision = user_text + "\n\n[Note: The AI model kept the text as-is, suggesting it's already well-written!]"
# Analyze with rubric
rubric_results = self.rubric_service.analyze_text(user_text)
feedback = self.rubric_service.format_feedback(rubric_results)
# Generate diff if enabled
diff_html = ""
if settings.enable_diff_highlighting:
diff_html = self.diff_service.generate_html_diff(user_text, revision)
# Gather metadata
metadata = {
"model": model_name,
"prompt_pack": prompt_pack,
"duration": time.time() - start_time,
"rubric_scores": rubric_results,
"diff_stats": self.diff_service.get_change_summary(user_text, revision),
}
duration = time.time() - start_time
request_duration.labels(operation="analyze").observe(duration)
request_count.labels(status="success").inc()
logger.info(f"Analysis completed in {duration:.2f}s")
return user_text, revision, feedback, diff_html, metadata
except ValidationError as e:
logger.error(f"Validation error: {e}")
error_count.labels(error_type="validation").inc()
request_count.labels(status="validation_error").inc()
raise
except TextGenerationError as e:
logger.error(f"Generation error: {e}")
error_count.labels(error_type="generation").inc()
request_count.labels(status="generation_error").inc()
raise
except Exception as e:
logger.error(f"Unexpected error: {e}")
error_count.labels(error_type="unexpected").inc()
request_count.labels(status="error").inc()
raise
finally:
active_requests.dec()
def get_available_prompt_packs(self) -> list:
"""Get list of available prompt packs."""
return self.prompt_service.get_available_packs()
def clear_cache(self) -> None:
"""Clear the model generation cache."""
self.model_service.clear_cache()
logger.info("Cache cleared")
|