ishaq101's picture
clean init
478dec6
from services.llms.LLM import model_gemini
from typing import AsyncIterable, Optional
from langchain.callbacks import AsyncIteratorCallbackHandler
class LLMAgent:
def __init__(self, model = model_gemini):
self.agent_id:Optional[str] = None
self.session_id:Optional[str] = None
self.thread_id:Optional[str] = None
self.agent_name:Optional[str] = None
self.model = model
self.callback = AsyncIteratorCallbackHandler()
self.callbacks = [self.callback]
async def generate(self, messages: list) -> AsyncIterable[str]:
""" Generates a response from messages using the model's astream method.
Args:
model (ChatGoogleGenerativeAI): The model to use for generating responses.
messages (list): A list of messages to send to the model.
```python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]```
Yields:
str: The content of each token generated by the model.
Raises:
Exception: If an error occurs during the generation process.
"""
try:
async for token in self.model.astream(input=messages, callbacks=self.callbacks):
print(f"token: {token}")
yield token.content
except Exception as e:
print(f"Caught exception: {e}")