Spaces:
Runtime error
Runtime error
| import torch | |
| from typing import Dict, Any | |
| from reasoning.scraper import scrape_social_knowledge | |
| class ReasoningAgent: | |
| def __init__(self, engine): | |
| self.engine = engine | |
| def reason(self, query: str, model_outputs: Dict[str, torch.Tensor]): | |
| reasoning_steps = [] | |
| # 1. Memory retrieval | |
| memories = self.engine.ltm.retrieve_text(query, k=5) | |
| if memories: | |
| reasoning_steps.extend(memories) | |
| # 2. Model reasoning | |
| if model_outputs: | |
| for name, tensor in model_outputs.items(): | |
| if isinstance(tensor, torch.Tensor): | |
| score = torch.mean(tensor).item() | |
| reasoning_steps.append( | |
| f"{name} relevance score {score:.3f}" | |
| ) | |
| # 3. If reasoning is weak → use scraper | |
| if len(reasoning_steps) < 2: | |
| scraped = scrape_social_knowledge(query) | |
| for item in scraped[:5]: | |
| reasoning_steps.append(item["text"]) | |
| # store knowledge in memory | |
| embedding = self.engine.sentence_encoder.encode(item["text"]) | |
| self.engine.ltm.store_embedding( | |
| embedding, | |
| metadata=item | |
| ) | |
| # 4. Synthesize answer | |
| response = " ".join(reasoning_steps) | |
| return response |