import os #from langchain_community.chat_models import ChatOpenAI from langchain_groq import ChatGroq from dotenv import load_dotenv load_dotenv() class LLMHandler: def __init__(self, model_name="llama-3.3-70b-versatile"): """ Initializes the LLMHandler with the specified Groq model. """ self.groq_api_key = os.getenv("GROQ_API_KEY") if not self.groq_api_key: raise ValueError("GROQ_API_KEY environment variable not set.") # Initialize Groq LLM client self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name) def generate_response(self, user_prompt, data): """ Generate a concise response using the LLM based on user prompt and data. :param user_prompt: Prompt provided by the user. :param data: Dictionary containing the instance information (e.g., UID, Name, etc.). :return: Generated response text. """ # Create the full prompt using user input and instance data prompt = ( f"You are a professional AI model tasked with writing personalized invite texts " f"that are concise (40 to 50 words), brochure-suitable, and tailored as per the user prompt.\n\n" f"Consider the user prompt: {user_prompt}\n\n" f"Details of the individual:\n" f"- Name: {data['Name']}\n" f"- Job Title: {data['Job Title']}\n" f"- Organisation: {data['Organisation']}\n" f"- Area of Interest: {data['Area of Interest']}\n" f"- Category: {data['Category']}\n\n" f"- Instance info: {data}\n" f"- Go through the Instance info If the {user_prompt} mentions you to use variables that are not mentioned above. " f"- The response **MUST**:\n" f"- Start with 'Hello {data['Name']}'.\n" f"- Be concise, professional, and STRICTLY DO NOT generate invalid characters or encoding errors (e.g. 'SoraVR’s').\n" f"- Use standard English punctuation, such as single quotes (e.g., 'can't', 'it's').\n" f"- STRICTLY Give only one response for the Category the sample belongs to.\n" f"- Do NOT include preambles or unnecessary text.\n\n" f"Return the final response cleanly, without any extraneous symbols or characters." ) # Query the LLM and return the response response = self.llm.invoke(prompt) return response.content.strip()