Spaces:
Runtime error
Runtime error
| """Configuration constants for the application.""" | |
| from __future__ import annotations | |
| import os | |
| from typing import Dict, Any | |
| # Agent Configuration | |
| class AgentConfig: | |
| """Configuration for agent behavior.""" | |
| # Optimization cycles | |
| OPTIMIZATION_CYCLES = int(os.getenv("OPTIMIZATION_CYCLES", "3")) | |
| # Character limits | |
| RESUME_MAX_CHARS = int(os.getenv("RESUME_MAX_CHARS", "8000")) | |
| COVER_LETTER_MAX_CHARS = int(os.getenv("COVER_LETTER_MAX_CHARS", "4000")) | |
| # Keyword extraction | |
| JOB_KEYWORDS_COUNT = int(os.getenv("JOB_KEYWORDS_COUNT", "40")) | |
| RESUME_KEYWORDS_COUNT = int(os.getenv("RESUME_KEYWORDS_COUNT", "25")) | |
| COVER_KEYWORDS_COUNT = int(os.getenv("COVER_KEYWORDS_COUNT", "20")) | |
| MAX_NEW_KEYWORDS = int(os.getenv("MAX_NEW_KEYWORDS", "30")) | |
| # Consistency checking | |
| MAX_CONTRADICTION_FIXES = int(os.getenv("MAX_CONTRADICTION_FIXES", "8")) | |
| # Text processing | |
| SKILL_DISPLAY_LIMIT = int(os.getenv("SKILL_DISPLAY_LIMIT", "8")) | |
| DISTILL_MAX_POINTS = int(os.getenv("DISTILL_MAX_POINTS", "12")) | |
| # LLM Configuration | |
| class LLMConfig: | |
| """Configuration for LLM providers.""" | |
| PROVIDER = os.getenv("LLM_PROVIDER", "openai").lower() | |
| MODEL = os.getenv("LLM_MODEL") | |
| # Model defaults by provider | |
| DEFAULT_MODELS = { | |
| "openai": "gpt-4o-mini", | |
| "anthropic": "claude-3-5-sonnet-latest", | |
| "gemini": "gemini-1.5-flash" | |
| } | |
| # Token limits | |
| RESUME_MAX_TOKENS = int(os.getenv("RESUME_MAX_TOKENS", "1200")) | |
| COVER_MAX_TOKENS = int(os.getenv("COVER_MAX_TOKENS", "800")) | |
| DEFAULT_MAX_TOKENS = int(os.getenv("DEFAULT_MAX_TOKENS", "1200")) | |
| # Temperature | |
| TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.4")) | |
| # API Configuration | |
| class APIConfig: | |
| """Configuration for external APIs.""" | |
| # LinkedIn OAuth | |
| LINKEDIN_CLIENT_ID = os.getenv("LINKEDIN_CLIENT_ID", "") | |
| LINKEDIN_CLIENT_SECRET = os.getenv("LINKEDIN_CLIENT_SECRET", "") | |
| LINKEDIN_REDIRECT_URI = os.getenv("LINKEDIN_REDIRECT_URI", "http://localhost:8501") | |
| MOCK_MODE = os.getenv("MOCK_MODE", "true").lower() == "true" | |
| # Tavily Research | |
| TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") | |
| TAVILY_MAX_RESULTS = int(os.getenv("TAVILY_MAX_RESULTS", "5")) | |
| # Timeouts | |
| HTTP_TIMEOUT = float(os.getenv("HTTP_TIMEOUT", "20.0")) | |
| # Retry configuration | |
| MAX_RETRIES = int(os.getenv("MAX_RETRIES", "3")) | |
| RETRY_BACKOFF = float(os.getenv("RETRY_BACKOFF", "1.0")) | |
| # Memory Configuration | |
| class MemoryConfig: | |
| """Configuration for memory storage.""" | |
| BASE_DIR = os.getenv("MEMORY_BASE_DIR", "/workspace/memory/data") | |
| # File limits | |
| MAX_PATH_LENGTH = int(os.getenv("MAX_PATH_LENGTH", "255")) | |
| # Cleanup | |
| AUTO_CLEANUP_DAYS = int(os.getenv("AUTO_CLEANUP_DAYS", "30")) | |
| # Security Configuration | |
| class SecurityConfig: | |
| """Security-related configuration.""" | |
| # Input validation | |
| MAX_INPUT_LENGTH = int(os.getenv("MAX_INPUT_LENGTH", "10000")) | |
| MAX_JOB_ID_LENGTH = int(os.getenv("MAX_JOB_ID_LENGTH", "100")) | |
| # Rate limiting (requests per minute) | |
| RATE_LIMIT_PER_USER = int(os.getenv("RATE_LIMIT_PER_USER", "10")) | |
| # Session | |
| SESSION_TIMEOUT_MINUTES = int(os.getenv("SESSION_TIMEOUT_MINUTES", "60")) | |
| # UI Configuration | |
| class UIConfig: | |
| """User interface configuration.""" | |
| # Streamlit | |
| PAGE_TITLE = "Job Application Assistant" | |
| LAYOUT = "wide" | |
| # Display limits | |
| MAX_PREVIEW_LENGTH = int(os.getenv("MAX_PREVIEW_LENGTH", "3000")) | |
| MAX_SUGGESTED_JOBS = int(os.getenv("MAX_SUGGESTED_JOBS", "5")) | |
| # Gradio | |
| GRADIO_PORT = int(os.getenv("PORT", "7860")) | |
| GRADIO_SERVER = "0.0.0.0" | |
| # Probability Scoring Weights | |
| class ScoringConfig: | |
| """Configuration for probability scoring.""" | |
| # Resume scoring | |
| RESUME_COVERAGE_WEIGHT = float(os.getenv("RESUME_COVERAGE_WEIGHT", "0.7")) | |
| RESUME_CONCISENESS_WEIGHT = float(os.getenv("RESUME_CONCISENESS_WEIGHT", "0.3")) | |
| # Cover letter scoring | |
| COVER_COVERAGE_WEIGHT = float(os.getenv("COVER_COVERAGE_WEIGHT", "0.6")) | |
| COVER_CONCISENESS_WEIGHT = float(os.getenv("COVER_CONCISENESS_WEIGHT", "0.4")) | |
| # Default Values | |
| class Defaults: | |
| """Default values for various components.""" | |
| USER_ID = "default_user" | |
| # Mock profile | |
| MOCK_USER_NAME = "Alex Candidate" | |
| MOCK_USER_HEADLINE = "Senior Software Engineer" | |
| MOCK_USER_EMAIL = "alex@example.com" | |
| MOCK_USER_LOCATION = "Remote" | |
| MOCK_USER_SKILLS = [ | |
| "Python", "AWS", "Docker", "Kubernetes", | |
| "PostgreSQL", "Data Engineering" | |
| ] | |
| def get_config() -> Dict[str, Any]: | |
| """Get all configuration as a dictionary.""" | |
| return { | |
| "agent": { | |
| "optimization_cycles": AgentConfig.OPTIMIZATION_CYCLES, | |
| "resume_max_chars": AgentConfig.RESUME_MAX_CHARS, | |
| "cover_letter_max_chars": AgentConfig.COVER_LETTER_MAX_CHARS, | |
| }, | |
| "llm": { | |
| "provider": LLMConfig.PROVIDER, | |
| "model": LLMConfig.MODEL, | |
| "temperature": LLMConfig.TEMPERATURE, | |
| }, | |
| "api": { | |
| "mock_mode": APIConfig.MOCK_MODE, | |
| "http_timeout": APIConfig.HTTP_TIMEOUT, | |
| }, | |
| "security": { | |
| "max_input_length": SecurityConfig.MAX_INPUT_LENGTH, | |
| "rate_limit": SecurityConfig.RATE_LIMIT_PER_USER, | |
| } | |
| } | |
| # Export main config classes | |
| __all__ = [ | |
| "AgentConfig", | |
| "LLMConfig", | |
| "APIConfig", | |
| "MemoryConfig", | |
| "SecurityConfig", | |
| "UIConfig", | |
| "ScoringConfig", | |
| "Defaults", | |
| "get_config" | |
| ] |