File size: 1,471 Bytes
3e6b9d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
"""Application configuration."""
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
"""Application settings."""
# Hugging Face Space OpenAI API endpoint
hf_space_url: str = "https://jeanbaptdzd-open-finance-llm-8b.hf.space"
# OpenAI-compatible API settings
api_key: str = "not-needed" # No authentication required
model_name: str = "DragonLLM/qwen3-8b-fin-v1.0"
# API configuration
timeout: float = 120.0
max_retries: int = 3
# Generation settings for reasoning models
# Qwen3 uses <think> tags which consume 40-60% of tokens
# Increase max_tokens to allow complete responses
max_tokens: int = 1500 # Increased for reasoning models (was default ~800-1000)
# Context window limits for Qwen-3 8B
# Base context window: 32,768 tokens (32K)
# Extended with YaRN: up to 128,000 tokens (128K)
# Current max_tokens is for generation, context input can use up to ~30K tokens
# Generation limits
# Maximum theoretical generation: 20,000 tokens
# Practical limit depends on: context_window - input_tokens - safety_margin
# With typical input (~500 tokens), can generate up to ~30K tokens
max_generation_limit: int = 20000 # Theoretical maximum (rarely needed)
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
extra="ignore",
)
settings = Settings()
|