Spaces:
Runtime error
Runtime error
File size: 5,774 Bytes
7498f2c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
"""Configuration constants for the application."""
from __future__ import annotations
import os
from typing import Dict, Any
# Agent Configuration
class AgentConfig:
"""Configuration for agent behavior."""
# Optimization cycles
OPTIMIZATION_CYCLES = int(os.getenv("OPTIMIZATION_CYCLES", "3"))
# Character limits
RESUME_MAX_CHARS = int(os.getenv("RESUME_MAX_CHARS", "8000"))
COVER_LETTER_MAX_CHARS = int(os.getenv("COVER_LETTER_MAX_CHARS", "4000"))
# Keyword extraction
JOB_KEYWORDS_COUNT = int(os.getenv("JOB_KEYWORDS_COUNT", "40"))
RESUME_KEYWORDS_COUNT = int(os.getenv("RESUME_KEYWORDS_COUNT", "25"))
COVER_KEYWORDS_COUNT = int(os.getenv("COVER_KEYWORDS_COUNT", "20"))
MAX_NEW_KEYWORDS = int(os.getenv("MAX_NEW_KEYWORDS", "30"))
# Consistency checking
MAX_CONTRADICTION_FIXES = int(os.getenv("MAX_CONTRADICTION_FIXES", "8"))
# Text processing
SKILL_DISPLAY_LIMIT = int(os.getenv("SKILL_DISPLAY_LIMIT", "8"))
DISTILL_MAX_POINTS = int(os.getenv("DISTILL_MAX_POINTS", "12"))
# LLM Configuration
class LLMConfig:
"""Configuration for LLM providers."""
PROVIDER = os.getenv("LLM_PROVIDER", "openai").lower()
MODEL = os.getenv("LLM_MODEL")
# Model defaults by provider
DEFAULT_MODELS = {
"openai": "gpt-4o-mini",
"anthropic": "claude-3-5-sonnet-latest",
"gemini": "gemini-1.5-flash"
}
# Token limits
RESUME_MAX_TOKENS = int(os.getenv("RESUME_MAX_TOKENS", "1200"))
COVER_MAX_TOKENS = int(os.getenv("COVER_MAX_TOKENS", "800"))
DEFAULT_MAX_TOKENS = int(os.getenv("DEFAULT_MAX_TOKENS", "1200"))
# Temperature
TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.4"))
# API Configuration
class APIConfig:
"""Configuration for external APIs."""
# LinkedIn OAuth
LINKEDIN_CLIENT_ID = os.getenv("LINKEDIN_CLIENT_ID", "")
LINKEDIN_CLIENT_SECRET = os.getenv("LINKEDIN_CLIENT_SECRET", "")
LINKEDIN_REDIRECT_URI = os.getenv("LINKEDIN_REDIRECT_URI", "http://localhost:8501")
MOCK_MODE = os.getenv("MOCK_MODE", "true").lower() == "true"
# Tavily Research
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
TAVILY_MAX_RESULTS = int(os.getenv("TAVILY_MAX_RESULTS", "5"))
# Timeouts
HTTP_TIMEOUT = float(os.getenv("HTTP_TIMEOUT", "20.0"))
# Retry configuration
MAX_RETRIES = int(os.getenv("MAX_RETRIES", "3"))
RETRY_BACKOFF = float(os.getenv("RETRY_BACKOFF", "1.0"))
# Memory Configuration
class MemoryConfig:
"""Configuration for memory storage."""
BASE_DIR = os.getenv("MEMORY_BASE_DIR", "/workspace/memory/data")
# File limits
MAX_PATH_LENGTH = int(os.getenv("MAX_PATH_LENGTH", "255"))
# Cleanup
AUTO_CLEANUP_DAYS = int(os.getenv("AUTO_CLEANUP_DAYS", "30"))
# Security Configuration
class SecurityConfig:
"""Security-related configuration."""
# Input validation
MAX_INPUT_LENGTH = int(os.getenv("MAX_INPUT_LENGTH", "10000"))
MAX_JOB_ID_LENGTH = int(os.getenv("MAX_JOB_ID_LENGTH", "100"))
# Rate limiting (requests per minute)
RATE_LIMIT_PER_USER = int(os.getenv("RATE_LIMIT_PER_USER", "10"))
# Session
SESSION_TIMEOUT_MINUTES = int(os.getenv("SESSION_TIMEOUT_MINUTES", "60"))
# UI Configuration
class UIConfig:
"""User interface configuration."""
# Streamlit
PAGE_TITLE = "Job Application Assistant"
LAYOUT = "wide"
# Display limits
MAX_PREVIEW_LENGTH = int(os.getenv("MAX_PREVIEW_LENGTH", "3000"))
MAX_SUGGESTED_JOBS = int(os.getenv("MAX_SUGGESTED_JOBS", "5"))
# Gradio
GRADIO_PORT = int(os.getenv("PORT", "7860"))
GRADIO_SERVER = "0.0.0.0"
# Probability Scoring Weights
class ScoringConfig:
"""Configuration for probability scoring."""
# Resume scoring
RESUME_COVERAGE_WEIGHT = float(os.getenv("RESUME_COVERAGE_WEIGHT", "0.7"))
RESUME_CONCISENESS_WEIGHT = float(os.getenv("RESUME_CONCISENESS_WEIGHT", "0.3"))
# Cover letter scoring
COVER_COVERAGE_WEIGHT = float(os.getenv("COVER_COVERAGE_WEIGHT", "0.6"))
COVER_CONCISENESS_WEIGHT = float(os.getenv("COVER_CONCISENESS_WEIGHT", "0.4"))
# Default Values
class Defaults:
"""Default values for various components."""
USER_ID = "default_user"
# Mock profile
MOCK_USER_NAME = "Alex Candidate"
MOCK_USER_HEADLINE = "Senior Software Engineer"
MOCK_USER_EMAIL = "alex@example.com"
MOCK_USER_LOCATION = "Remote"
MOCK_USER_SKILLS = [
"Python", "AWS", "Docker", "Kubernetes",
"PostgreSQL", "Data Engineering"
]
def get_config() -> Dict[str, Any]:
"""Get all configuration as a dictionary."""
return {
"agent": {
"optimization_cycles": AgentConfig.OPTIMIZATION_CYCLES,
"resume_max_chars": AgentConfig.RESUME_MAX_CHARS,
"cover_letter_max_chars": AgentConfig.COVER_LETTER_MAX_CHARS,
},
"llm": {
"provider": LLMConfig.PROVIDER,
"model": LLMConfig.MODEL,
"temperature": LLMConfig.TEMPERATURE,
},
"api": {
"mock_mode": APIConfig.MOCK_MODE,
"http_timeout": APIConfig.HTTP_TIMEOUT,
},
"security": {
"max_input_length": SecurityConfig.MAX_INPUT_LENGTH,
"rate_limit": SecurityConfig.RATE_LIMIT_PER_USER,
}
}
# Export main config classes
__all__ = [
"AgentConfig",
"LLMConfig",
"APIConfig",
"MemoryConfig",
"SecurityConfig",
"UIConfig",
"ScoringConfig",
"Defaults",
"get_config"
] |