Smart-Auto-Complete / src /autocomplete.py
Sandipan Haldar
feat: Replace general context with LinkedIn-specific context
770544d
"""
Smart Auto-Complete Core Logic
Handles text completion with context awareness
"""
import logging
import time
from dataclasses import dataclass
from typing import List
from .api_client import APIClient
from .cache import CacheManager
from .utils import sanitize_input
logger = logging.getLogger(__name__)
@dataclass
class CompletionRequest:
"""Data class for completion requests"""
text: str
context: str
max_suggestions: int = 3
temperature: float = 0.7
max_tokens: int = 150
class SmartAutoComplete:
"""Main autocomplete engine with context awareness"""
# Context-specific prompts and configurations
CONTEXT_PROMPTS = {
"email": {
"system_prompt": """You are an expert email writing assistant. Generate professional,
contextually appropriate email completions. Focus on:
- Professional tone and structure
- Clear, concise communication
- Appropriate greetings and closings
- Business communication best practices
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
"user_template": "Complete this email text naturally and professionally with approximately {max_tokens} tokens: {text}",
"temperature": 0.6,
},
"creative": {
"system_prompt": """You are a creative writing assistant. Generate engaging,
imaginative story continuations. Focus on:
- Narrative consistency and flow
- Character development
- Descriptive and engaging language
- Plot advancement
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
"user_template": "Continue this creative writing piece naturally with approximately {max_tokens} tokens: {text}",
"temperature": 0.8,
},
"linkedin": {
"system_prompt": """You are a LinkedIn writing assistant specialized in professional networking content. Generate engaging,
professional LinkedIn-appropriate text completions. Focus on:
- Professional networking tone
- Industry-relevant language
- Engaging and authentic voice
- LinkedIn best practices (hashtags, mentions, professional insights)
- Career development and business communication
IMPORTANT: Generate a completion that is approximately {max_tokens} tokens long.
Adjust your response length accordingly - shorter for fewer tokens, longer for more tokens.""",
"user_template": "Complete this LinkedIn post/content naturally and professionally with approximately {max_tokens} tokens: {text}",
"temperature": 0.7,
},
}
def __init__(self, settings=None):
"""Initialize the autocomplete engine"""
self.settings = settings
self.api_client = APIClient(settings)
self.cache_manager = CacheManager(settings)
self.request_history = []
logger.info("SmartAutoComplete engine initialized")
def get_suggestions(
self,
text: str,
context: str = "linkedin",
max_tokens: int = 150,
user_context: str = "",
) -> List[str]:
"""
Get auto-complete suggestions for the given text and context
Args:
text: Input text to complete
context: Context type (email, creative, linkedin)
max_tokens: Maximum tokens in the response
user_context: Additional context provided by the user
Returns:
List of suggestion strings (typically 1 suggestion)
"""
try:
# Input validation and sanitization
text = sanitize_input(text)
if not text or len(text.strip()) < 2:
return []
# Check cache first
cache_key = self._generate_cache_key(
text, context, max_tokens, user_context
)
cached_suggestions = self.cache_manager.get(cache_key)
if cached_suggestions:
logger.debug(f"Cache hit for key: {cache_key}")
return cached_suggestions
# Create completion request
request = CompletionRequest(
text=text,
context=context,
max_suggestions=1, # Always return 1 suggestion
temperature=self.CONTEXT_PROMPTS[context]["temperature"],
max_tokens=max_tokens,
)
# Get suggestions from API
suggestions = self._get_suggestions_from_api(request, user_context)
# Process and filter suggestions
suggestions = self._process_suggestions(suggestions, text, context)
# Cache the results
if suggestions:
self.cache_manager.set(cache_key, suggestions)
# Track request for analytics
self._track_request(request, suggestions)
return suggestions
except Exception as e:
logger.error(f"Error getting suggestions: {str(e)}")
return []
def _get_suggestions_from_api(
self, request: CompletionRequest, user_context: str = ""
) -> List[str]:
"""Get suggestions from the API client"""
try:
context_config = self.CONTEXT_PROMPTS.get(
request.context, self.CONTEXT_PROMPTS["linkedin"]
)
# Format system prompt with max_tokens and user context
system_prompt = context_config["system_prompt"].format(
max_tokens=request.max_tokens
)
if user_context and user_context.strip():
system_prompt += f"\n\nIMPORTANT CONTEXT: Please consider this background information when generating the completion: {user_context.strip()}"
logger.info(f"Using user context: {user_context.strip()[:100]}...")
# Format user message with max_tokens and context awareness
user_message = context_config["user_template"].format(
text=request.text, max_tokens=request.max_tokens
)
if user_context and user_context.strip():
user_message = (
f"Given the context: {user_context.strip()}\n\n{user_message}"
)
logger.info(f"Requesting {request.max_tokens} tokens from API")
# Add additional length instruction to user message
length_instruction = f"\n\nIMPORTANT: Please generate approximately {request.max_tokens} tokens. "
if request.max_tokens <= 100:
length_instruction += "Keep it concise and brief."
elif request.max_tokens <= 200:
length_instruction += "Provide a moderate length response."
elif request.max_tokens <= 300:
length_instruction += "Provide a detailed response."
else:
length_instruction += "Provide a comprehensive and detailed response."
user_message += length_instruction
# Prepare messages for API
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message},
]
# Get single completion
try:
completion = self.api_client.get_completion(
messages=messages,
temperature=request.temperature,
max_tokens=request.max_tokens,
)
if completion:
return [completion.strip()]
else:
return []
except Exception as e:
logger.warning(f"Failed to get suggestion: {str(e)}")
return []
except Exception as e:
logger.error(f"Error getting suggestions from API: {str(e)}")
return []
def _process_suggestions(
self, suggestions: List[str], text: str, context: str
) -> List[str]:
"""Process and filter the suggestions"""
try:
# Basic filtering based on context
processed_suggestions = []
for suggestion in suggestions:
if suggestion and suggestion not in text:
processed_suggestions.append(suggestion)
return processed_suggestions
except Exception as e:
logger.error(f"Error processing suggestions: {str(e)}")
return []
def _track_request(self, request: CompletionRequest, suggestions: List[str]):
"""Track the request for analytics"""
try:
self.request_history.append(
{
"text": request.text,
"context": request.context,
"suggestions": suggestions,
"timestamp": time.time(),
}
)
except Exception as e:
logger.error(f"Error tracking request: {str(e)}")
def _generate_cache_key(
self, text: str, context: str, max_tokens: int, user_context: str = ""
) -> str:
"""Generate a unique cache key for the request"""
return f"{text}_{context}_{max_tokens}_{user_context}"