| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate |
| from langchain_core.messages import HumanMessage, AIMessage |
| from langchain_groq import ChatGroq |
| from typing import List |
| import os |
| from services.prompts import ASSISTANT_PROMPT |
| from langchain.memory import ConversationSummaryMemory |
| from dotenv import load_dotenv |
| load_dotenv() |
| os.environ["GROQ_API_KEY"]=os.getenv("GROQ_API_KEY") |
|
|
| class ConversationHandler: |
| def __init__(self, model_name="llama-3.3-70b-versatile", temperature=0.7): |
| self.chat_model = ChatGroq( |
| model_name=model_name, |
| temperature=temperature |
| ) |
| self.prompt = ChatPromptTemplate.from_messages([ |
| ("system", ASSISTANT_PROMPT)]) |
| self.memory=ConversationSummaryMemory( |
| llm=self.chat_model, |
| max_token_limit=2000, |
| return_messages=True, |
| memory_key="chat_history" |
| ) |
| |
| def give_response(self,user_input): |
| chain= self.prompt|self.chat_model |
| memory_variables = self.memory.load_memory_variables({}) |
| response=chain.invoke( |
| { |
| "user_query": user_input, |
| "chat_history": memory_variables["chat_history"] |
|
|
| |
| } |
| ) |
| print(response.content) |
| self.memory.save_context( |
| {"input": user_input}, |
| {"output": response.content} |
| ) |
| return response |
| def summarize_conversation(self) -> str: |
| memory_variables = self.memory.load_memory_variables({}) |
| return self.memory.predict_new_summary( |
| messages=memory_variables["chat_history"], |
| existing_summary="" |
| ) |
| |
| def clear_memory(self): |
| self.memory.clear() |
|
|