ChatCal.ai-1 / fallback_llm.py
Peter Michael Gits
feat: Add fallback solutions for LlamaIndex and OAuth persistence
b69ff02
"""
Fallback LLM implementation without LlamaIndex dependency.
Direct API clients for maximum compatibility.
"""
import logging
from typing import List, Dict, Optional
import json
# Direct API imports (no LlamaIndex)
try:
import groq
except ImportError:
groq = None
try:
import anthropic
except ImportError:
anthropic = None
try:
import google.generativeai as genai
except ImportError:
genai = None
from .config import config
logger = logging.getLogger(__name__)
class DirectLLMProvider:
"""Direct LLM provider without LlamaIndex dependency"""
def __init__(self):
self.providers_available = {
'groq': groq is not None and config.groq_api_key,
'anthropic': anthropic is not None and config.anthropic_api_key,
'gemini': genai is not None and config.google_api_key
}
async def chat(self, messages: List[Dict[str, str]], temperature: float = 0.1) -> str:
"""Chat completion with fallback chain: Groq -> Anthropic -> Gemini -> Mock"""
# Try Groq first
if self.providers_available['groq']:
try:
client = groq.Groq(api_key=config.groq_api_key)
response = client.chat.completions.create(
model="llama-3.1-8b-instant",
messages=messages,
temperature=temperature,
max_tokens=1000
)
return response.choices[0].message.content
except Exception as e:
logger.warning(f"Groq failed: {e}")
# Fallback to Anthropic
if self.providers_available['anthropic']:
try:
client = anthropic.Anthropic(api_key=config.anthropic_api_key)
# Separate system message
system_msg = ""
user_messages = []
for msg in messages:
if msg["role"] == "system":
system_msg = msg["content"]
else:
user_messages.append(msg)
response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
temperature=temperature,
system=system_msg,
messages=user_messages
)
return response.content[0].text
except Exception as e:
logger.warning(f"Anthropic failed: {e}")
# Fallback to Gemini
if self.providers_available['gemini']:
try:
genai.configure(api_key=config.google_api_key)
model = genai.GenerativeModel('gemini-pro')
# Convert messages to Gemini format
prompt = ""
for msg in messages:
if msg["role"] == "system":
prompt += f"System: {msg['content']}\n\n"
elif msg["role"] == "user":
prompt += f"User: {msg['content']}\n"
elif msg["role"] == "assistant":
prompt += f"Assistant: {msg['content']}\n"
response = model.generate_content(prompt)
return response.text
except Exception as e:
logger.warning(f"Gemini failed: {e}")
# Final fallback to mock
return self._mock_response(messages)
def _mock_response(self, messages: List[Dict[str, str]]) -> str:
"""Mock response for development/fallback"""
last_msg = messages[-1]["content"].lower() if messages else "hello"
if any(word in last_msg for word in ["book", "schedule", "appointment"]):
return "I'd be happy to help you book an appointment! Please provide your name, preferred date and time."
elif any(word in last_msg for word in ["cancel", "delete"]):
return "I can help you cancel an appointment. Which meeting would you like to cancel?"
elif any(word in last_msg for word in ["available", "availability"]):
return "Let me check Peter's availability. What dates are you considering?"
else:
return "Hello! I'm ChatCal, your voice-enabled scheduling assistant. How can I help you today?"
# Global instance
direct_llm = DirectLLMProvider()