File size: 3,585 Bytes
07bd23e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from openai import OpenAI

load_dotenv()


class PrimaryLLMHandler:
    def __init__(self, model_name="gpt-4o-mini"):
        """
        Initializes the Primary LLM Handler (GPT0-mini).
        """
        self.openai_api_key = os.getenv("OPENAI_API_KEY")
        if not self.openai_api_key:
            raise ValueError("OPENAI_API_KEY environment variable not set.")

        self.client = OpenAI(api_key=self.openai_api_key)
        self.model_name = model_name

    def generate_response(self, user_prompt, data):
        """
        Generates a response using the primary LLM.
        """
        prompt = (
            f"You are a professional AI model tasked with writing personalized invite texts "
            f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample."
            f"\n\n"
            f"User prompt: {user_prompt}\n\n"
            f"Details of the individual:\n"
            f"- Name: {data['Name']}\n"
            f"- Job Title: {data['Job Title']}\n"
            f"- Organisation: {data['Organisation']}\n"
            f"- Area of Interest: {data['Area of Interest']}\n"
            f"- Category: {data['Category']}\n\n"
            f"The response should start with 'Hello {data['Name']}'."
            f"Ensure the tone aligns with the instructions. STRICTLY give only one response."
        )

        completion = self.client.chat.completions.create(
            model=self.model_name,
            messages=[
                {"role": "system", "content": "You are a professional assistant AI."},
                {"role": "user", "content": prompt},
            ],
        )

        return completion.choices[0].message.content.strip()


class ValidatorLLMHandler:
    def __init__(self, model_name="gemma2-9b-it"):
        """
        Initializes the Validator LLM Handler (Llama 3.3 8B).
        """
        self.groq_api_key = os.getenv("GROQ_API_KEY")
        if not self.groq_api_key:
            raise ValueError("GROQ_API_KEY environment variable not set.")

        self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)

    def validate_and_correct_response(self, user_prompt, original_response, data):
        """
        Validates and corrects the response using the secondary LLM.
        """
        validation_prompt = (
            f"You are a professional AI model tasked with validating and correcting AI-generated texts. "
            f"The original response must align strictly with the provided user prompt and input details. "
            f"If the response fails to meet the requirements, generate a corrected version."
            f"\n\n"
            f"User prompt: {user_prompt}\n\n"
            f"Details of the individual:\n"
            f"- Name: {data['Name']}\n"
            f"- Job Title: {data['Job Title']}\n"
            f"- Organisation: {data['Organisation']}\n"
            f"- Area of Interest: {data['Area of Interest']}\n"
            f"- Category: {data['Category']}\n\n"
            f"Original response: {original_response}\n\n"
            f"Instructions:\n"
            f"- If the original response aligns with the user prompt and input details, reply with 'Valid Response'.\n"
            f"- Otherwise, provide a corrected version starting with 'Hello {data['Name']}'.\n"
            f"- Keep it concise (less than 40 words) and brochure-suitable.\n"
        )

        response = self.llm.invoke(validation_prompt)
        return response.content.strip()