Spaces:
Runtime error
Runtime error
| # utils/langchain_enhancements.py | |
| from langchain.memory import ConversationSummaryMemory | |
| from langchain.agents import Tool, AgentExecutor, OpenAIFunctions | |
| from langchain.tools import DuckDuckGoSearchRun | |
| from langchain.retrievers import TimeWeightedVectorStoreRetriever | |
| from langchain.embeddings import OpenAIEmbeddings | |
| from langchain.vectorstores import Chroma | |
| from langchain.chat_models import ChatAnthropic | |
| class EnhancedLearningSystem: | |
| def __init__(self, anthropic_api_key): | |
| self.llm = ChatAnthropic(anthropic_api_key=anthropic_api_key) | |
| self.memory = self._setup_memory() | |
| self.tools = self._setup_tools() | |
| self.agent_executor = self._setup_agent() | |
| def _setup_memory(self): | |
| """Setup enhanced memory system""" | |
| # Create vectorstore for storing chat history | |
| embeddings = OpenAIEmbeddings() | |
| vectorstore = Chroma( | |
| collection_name="chat_history", | |
| embedding_function=embeddings | |
| ) | |
| # Create time-weighted retriever | |
| retriever = TimeWeightedVectorStoreRetriever( | |
| vectorstore=vectorstore, | |
| decay_rate=0.01, | |
| k=5 | |
| ) | |
| # Create summary memory | |
| memory = ConversationSummaryMemory( | |
| llm=self.llm, | |
| memory_key="chat_history", | |
| return_messages=True | |
| ) | |
| return { | |
| 'summary': memory, | |
| 'retriever': retriever | |
| } | |
| def _setup_tools(self): | |
| """Setup tools for the learning system""" | |
| search = DuckDuckGoSearchRun() | |
| tools = [ | |
| Tool( | |
| name="Search", | |
| func=search.run, | |
| description="Useful for finding current trading information and examples" | |
| ), | |
| Tool( | |
| name="Historical Context", | |
| func=self._get_historical_context, | |
| description="Get relevant historical chat context" | |
| ), | |
| Tool( | |
| name="Learning Progress", | |
| func=self._check_learning_progress, | |
| description="Check user's learning progress and suggest next topics" | |
| ) | |
| ] | |
| return tools | |
| def _setup_agent(self): | |
| """Setup the learning agent""" | |
| agent = OpenAIFunctions.from_llm_and_tools( | |
| llm=self.llm, | |
| tools=self.tools, | |
| memory=self.memory['summary'] | |
| ) | |
| return AgentExecutor.from_agent_and_tools( | |
| agent=agent, | |
| tools=self.tools, | |
| memory=self.memory['summary'], | |
| verbose=True | |
| ) | |
| async def _get_historical_context(self, topic): | |
| """Retrieve relevant historical context""" | |
| relevant_docs = await self.memory['retriever'].aget_relevant_documents(topic) | |
| return [doc.page_content for doc in relevant_docs] | |
| def _check_learning_progress(self, topic): | |
| """Check user's progress in a topic""" | |
| # Implement progress tracking logic | |
| return { | |
| 'mastered_concepts': [], | |
| 'in_progress': [], | |
| 'suggested_next': [] | |
| } | |
| async def process_question(self, question): | |
| """Process a learning question with enhanced features""" | |
| # Get historical context | |
| history = await self._get_historical_context(question) | |
| # Execute agent | |
| response = await self.agent_executor.arun( | |
| input={ | |
| 'question': question, | |
| 'history': history, | |
| 'progress': self._check_learning_progress(question) | |
| } | |
| ) | |
| # Update memory | |
| self.memory['summary'].save_context( | |
| {'input': question}, | |
| {'output': response} | |
| ) | |
| return { | |
| 'response': response, | |
| 'context': history, | |
| 'progress': self._check_learning_progress(question) | |
| } |