Spaces:
Sleeping
Sleeping
File size: 2,507 Bytes
d66a587 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import os
#from langchain_community.chat_models import ChatOpenAI
from langchain_groq import ChatGroq
from dotenv import load_dotenv
load_dotenv()
class LLMHandler:
def __init__(self, model_name="llama-3.3-70b-versatile"):
"""
Initializes the LLMHandler with the specified Groq model.
"""
self.groq_api_key = os.getenv("GROQ_API_KEY")
if not self.groq_api_key:
raise ValueError("GROQ_API_KEY environment variable not set.")
# Initialize Groq LLM client
self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
def generate_response(self, user_prompt, data):
"""
Generate a concise response using the LLM based on user prompt and data.
:param user_prompt: Prompt provided by the user.
:param data: Dictionary containing the instance information (e.g., UID, Name, etc.).
:return: Generated response text.
"""
# Create the full prompt using user input and instance data
prompt = (
f"You are a professional AI model tasked with writing personalized invite texts "
f"that are concise (40 to 50 words), brochure-suitable, and tailored as per the user prompt.\n\n"
f"Consider the user prompt: {user_prompt}\n\n"
f"Details of the individual:\n"
f"- Name: {data['Name']}\n"
f"- Job Title: {data['Job Title']}\n"
f"- Organisation: {data['Organisation']}\n"
f"- Area of Interest: {data['Area of Interest']}\n"
f"- Category: {data['Category']}\n\n"
f"- Instance info: {data}\n"
f"- Go through the Instance info If the {user_prompt} mentions you to use variables that are not mentioned above. "
f"- The response **MUST**:\n"
f"- Start with 'Hello {data['Name']}'.\n"
f"- Be concise, professional, and STRICTLY DO NOT generate invalid characters or encoding errors (e.g. 'SoraVR’s').\n"
f"- Use standard English punctuation, such as single quotes (e.g., 'can't', 'it's').\n"
f"- STRICTLY Give only one response for the Category the sample belongs to.\n"
f"- Do NOT include preambles or unnecessary text.\n\n"
f"Return the final response cleanly, without any extraneous symbols or characters."
)
# Query the LLM and return the response
response = self.llm.invoke(prompt)
return response.content.strip()
|