# OpenAI-compatible LLM (default: local Ollama) OPENAI_BASE_URL=http://localhost:11434/v1 OPENAI_API_KEY=ollama OPENAI_MODEL=llama3.1 OPENAI_TEMPERATURE=0 # LangSmith tracing (use with python-dotenv / your shell) LANGSMITH_TRACING=true LANGSMITH_ENDPOINT=https://api.smith.langchain.com LANGSMITH_API_KEY= LANGSMITH_PROJECT= # Alternative names LangChain also understands: # LANGCHAIN_TRACING_V2=true # LANGCHAIN_API_KEY= # LANGCHAIN_PROJECT= # Optional: verbose LangChain stdout (noisy; off by default) # LANGCHAIN_DEBUG=true # FMP FMP_API_KEY = # HTTP API: uvicorn api:app --host 0.0.0.0 --port 8000