Spaces:
Paused
Paused
| """ | |
| Integration example showing how to use both fallback solutions | |
| """ | |
| # In calendar_service.py - OAuth integration | |
| from oauth_persistence import save_oauth_token_after_auth, load_oauth_token_on_startup | |
| async def handle_oauth_callback(self, user_email: str, auth_code: str): | |
| """Handle OAuth callback and store refresh token""" | |
| # Existing OAuth flow | |
| credentials = self.flow.fetch_token(authorization_response=auth_code) | |
| # NEW: Store refresh token persistently | |
| await save_oauth_token_after_auth(user_email, credentials) | |
| return credentials | |
| async def startup_restore_tokens(self): | |
| """Restore tokens on app startup""" | |
| user_email = config.my_email_address | |
| refresh_token = await load_oauth_token_on_startup(user_email) | |
| if refresh_token: | |
| # Restore credentials from refresh token | |
| self.credentials = self._create_credentials_from_refresh_token(refresh_token) | |
| # In chat_agent.py - LlamaIndex replacement | |
| from fallback_llm import direct_llm | |
| class ChatCalAgent: | |
| def __init__(self): | |
| # OLD: self.llm = get_llm() # LlamaIndex version | |
| # NEW: Use direct LLM provider | |
| self.llm_provider = direct_llm | |
| self.calendar_service = CalendarService() | |
| async def _handle_general_conversation(self, message: str, session: SessionData) -> str: | |
| """Handle general conversation with direct LLM""" | |
| messages = [ | |
| {"role": "system", "content": SYSTEM_PROMPT.format(...)}, | |
| *[{"role": msg["role"], "content": msg["content"]} | |
| for msg in session.conversation_history[-10:]] | |
| ] | |
| # NEW: Direct LLM call (no LlamaIndex) | |
| response = await self.llm_provider.chat(messages) | |
| session.add_message("assistant", response) | |
| return response | |
| # In requirements.txt - Simplified dependencies | |
| """ | |
| # Remove these LlamaIndex dependencies: | |
| # llama-index==0.11.0 | |
| # llama-index-llms-groq==0.2.0 | |
| # llama-index-llms-anthropic==0.3.0 | |
| # llama-index-tools-google==0.2.0 | |
| # Keep only direct API clients: | |
| groq==0.9.0 | |
| anthropic==0.34.0 | |
| google-generativeai==0.5.2 | |
| google-cloud-secret-manager==2.20.0 | |
| # Remove problematic pydantic constraint: | |
| # pydantic==2.8.2 # No longer needed! | |
| """ |