| """ |
| Configuration system for FocusFlow LLM providers. |
| Supports both local (Ollama) and cloud (Hugging Face) deployments. |
| """ |
| import os |
| from enum import Enum |
|
|
| class LLMProvider(Enum): |
| """Available LLM providers""" |
| OLLAMA = "ollama" |
| HUGGINGFACE = "huggingface" |
|
|
| |
| USE_PROVIDER = os.getenv("LLM_PROVIDER", "ollama").lower() |
|
|
| |
| |
| DEPLOYMENT_MODE = os.environ.get("DEPLOYMENT_MODE", "local").lower() |
| IS_CLOUD = DEPLOYMENT_MODE == "cloud" |
|
|
| |
| CONFIG = { |
| "llm_provider": LLMProvider.OLLAMA if USE_PROVIDER == "ollama" else LLMProvider.HUGGINGFACE, |
| |
| |
| "ollama": { |
| "model": "llama3.2:1b", |
| "base_url": "http://localhost:11434" |
| }, |
| |
| |
| "huggingface": { |
| "model": "meta-llama/Meta-Llama-3-8B-Instruct", |
| "api_token": os.getenv("HUGGINGFACE_API_TOKEN", ""), |
| "max_length": 512, |
| "temperature": 0.7 |
| } |
| } |
|
|
| def get_llm_provider(): |
| """Get the current LLM provider""" |
| return CONFIG["llm_provider"] |
|
|
| def get_llm_config(): |
| """Get configuration for the current provider""" |
| provider = get_llm_provider() |
| return CONFIG[provider.value] |
|
|
| def is_local_mode(): |
| """Check if running in local (offline) mode""" |
| return get_llm_provider() == LLMProvider.OLLAMA |
|
|
| def is_cloud_mode(): |
| """Check if running in cloud (online demo) mode""" |
| return get_llm_provider() == LLMProvider.HUGGINGFACE |
|
|
| def get_llm(): |
| """ |
| Get LLM instance based on environment configuration. |
| Supports both local (Ollama) and cloud (Hugging Face) modes. |
| """ |
| provider = get_llm_provider() |
| config = get_llm_config() |
| |
| if provider == LLMProvider.OLLAMA: |
| |
| return Ollama( |
| model=config["model"], |
| base_url=config.get("base_url", "http://localhost:11434") |
| ) |
| else: |
| |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint |
| |
| |
| llm = HuggingFaceEndpoint( |
| repo_id=config["model"], |
| huggingfacehub_api_token=config["api_token"], |
| max_new_tokens=512, |
| temperature=config.get("temperature", 0.7) |
| ) |
| |
| |
| return ChatHuggingFace(llm=llm) |
|
|
| def get_embeddings(): |
| """ |
| Get embeddings model based on environment configuration. |
| Supports both local (Ollama) and cloud (Hugging Face) modes. |
| """ |
| provider = get_llm_provider() |
| |
| if provider == LLMProvider.OLLAMA: |
| |
| from langchain_community.embeddings import OllamaEmbeddings |
| return OllamaEmbeddings(model="nomic-embed-text") |
| else: |
| |
| from langchain_huggingface import HuggingFaceEmbeddings |
| return HuggingFaceEmbeddings( |
| model_name="sentence-transformers/all-MiniLM-L6-v2", |
| model_kwargs={"device": "cpu"}, |
| encode_kwargs={"normalize_embeddings": True} |
| ) |
|
|
| |
| FIREBASE_SERVICE_ACCOUNT_JSON = os.getenv("FIREBASE_SERVICE_ACCOUNT_JSON", "") |
| FIREBASE_API_KEY = os.getenv("FIREBASE_API_KEY", "") |
| FIREBASE_PROJECT_ID = os.getenv("FIREBASE_PROJECT_ID", "") |
|
|
| |
| GOOGLE_CLIENT_ID = os.getenv("GOOGLE_CLIENT_ID", "") |
| GOOGLE_CLIENT_SECRET = os.getenv("GOOGLE_CLIENT_SECRET", "") |
| GITHUB_CLIENT_ID = os.getenv("GITHUB_CLIENT_ID", "") |
| GITHUB_CLIENT_SECRET = os.getenv("GITHUB_CLIENT_SECRET", "") |
| APP_URL = os.getenv("APP_URL", "http://localhost:8501") |
|
|
| def is_firebase_configured(): |
| """Check if Firebase credentials are available (cloud mode with auth)""" |
| return bool(FIREBASE_SERVICE_ACCOUNT_JSON) |
|
|
| |
| |
| |
| |
| YOUTUBE_API_KEY = os.getenv("YOUTUBE_API_KEY", None) |
|
|
| def has_youtube_api_key(): |
| """Check if YouTube Data API key is configured""" |
| return bool(YOUTUBE_API_KEY) |
|
|