InviteAI / llm_merger.py
dhanvanth183's picture
Added Local code for Invitation Generator
07bd23e
import os
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from openai import OpenAI
load_dotenv()
class PrimaryLLMHandler:
def __init__(self, model_name="gpt-4o-mini"):
"""
Initializes the Primary LLM Handler (GPT0-mini).
"""
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not self.openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable not set.")
self.client = OpenAI(api_key=self.openai_api_key)
self.model_name = model_name
def generate_response(self, user_prompt, data):
"""
Generates a response using the primary LLM.
"""
prompt = (
f"You are a professional AI model tasked with writing personalized invite texts "
f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample."
f"\n\n"
f"User prompt: {user_prompt}\n\n"
f"Details of the individual:\n"
f"- Name: {data['Name']}\n"
f"- Job Title: {data['Job Title']}\n"
f"- Organisation: {data['Organisation']}\n"
f"- Area of Interest: {data['Area of Interest']}\n"
f"- Category: {data['Category']}\n\n"
f"The response should start with 'Hello {data['Name']}'."
f"Ensure the tone aligns with the instructions. STRICTLY give only one response."
)
completion = self.client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": "You are a professional assistant AI."},
{"role": "user", "content": prompt},
],
)
return completion.choices[0].message.content.strip()
class ValidatorLLMHandler:
def __init__(self, model_name="gemma2-9b-it"):
"""
Initializes the Validator LLM Handler (Llama 3.3 8B).
"""
self.groq_api_key = os.getenv("GROQ_API_KEY")
if not self.groq_api_key:
raise ValueError("GROQ_API_KEY environment variable not set.")
self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
def validate_and_correct_response(self, user_prompt, original_response, data):
"""
Validates and corrects the response using the secondary LLM.
"""
validation_prompt = (
f"You are a professional AI model tasked with validating and correcting AI-generated texts. "
f"The original response must align strictly with the provided user prompt and input details. "
f"If the response fails to meet the requirements, generate a corrected version."
f"\n\n"
f"User prompt: {user_prompt}\n\n"
f"Details of the individual:\n"
f"- Name: {data['Name']}\n"
f"- Job Title: {data['Job Title']}\n"
f"- Organisation: {data['Organisation']}\n"
f"- Area of Interest: {data['Area of Interest']}\n"
f"- Category: {data['Category']}\n\n"
f"Original response: {original_response}\n\n"
f"Instructions:\n"
f"- If the original response aligns with the user prompt and input details, reply with 'Valid Response'.\n"
f"- Otherwise, provide a corrected version starting with 'Hello {data['Name']}'.\n"
f"- Keep it concise (less than 40 words) and brochure-suitable.\n"
)
response = self.llm.invoke(validation_prompt)
return response.content.strip()