Spaces:
Runtime error
Runtime error
| import os | |
| from langchain_groq import ChatGroq | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| class LLMHandler: | |
| def __init__(self, model_name="llama-3.3-70b-versatile"): | |
| self.groq_api_key = os.getenv("GROQ_API_KEY") | |
| if not self.groq_api_key: | |
| raise ValueError("GROQ_API_KEY environment variable not set.") | |
| self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name) | |
| def generate_questions(self, context): | |
| """Generate questions based on the initial context provided by the user.""" | |
| prompt = f""" | |
| Based on this context about an invitation: "{context}" | |
| Generate questions to gather necessary information for creating a professional invitation prompt. | |
| Generate 8-12 focused questions. Include multiple choice options where appropriate. | |
| Questions should cover: | |
| 1. Senders Company/Organization and role details | |
| 2. Product/service specific details | |
| 3. Key specifications or features | |
| 4. Approximate length of the invite [Word count], take a text response from the user instead of multiple choice for this question. | |
| 5. What information from the receivers details do you want to include and influence in the invite | |
| 6. Tone and style preferences | |
| 7. Additional information which you would like to provide [Type N/A if you wish not to] | |
| 8. Call to action [multiple choice] for example [ contact phone number, visit our website, visit our social media etc] | |
| 9. In context to Call to action question, ask a followup question [Textual response] for CTA | |
| to collect the website link/ phone number/ social media handles etc. | |
| Return the questions in this exact JSON format: | |
| [ | |
| {{"question": "Question 1", "choices": ["Choice 1", "Choice 2"]}}, | |
| {{"question": "Question 2"}}, | |
| {{"question": "Question 3", "choices": ["Choice 1", "Choice 2", "Choice 3"]}} | |
| ] | |
| For questions without multiple choice options, omit the 'choices' key. | |
| Make choices relevant but not exhaustive, as users will have option for custom responses. | |
| """ | |
| # Default questions to use as fallback | |
| default_questions = [ | |
| { | |
| "question": "What is your role in the company?", | |
| "choices": ["CEO", "CTO", "Director", "Product Manager"] | |
| }, | |
| { | |
| "question": "What is your company name?", | |
| }, | |
| { | |
| "question": "What is the name of your product/service?", | |
| }, | |
| { | |
| "question": "What is the suggested Invite lenght[word count] you prefer?", | |
| }, | |
| { | |
| "question": "What is the key technical specification or feature?", | |
| }, | |
| { | |
| "question": "Can you explain in brief about what the invite is about?", | |
| }, | |
| { | |
| "question": "Select the preferred tone for the invitation:", | |
| "choices": ["Professional", "Innovation-focused", "Casual", "Business & Strategic", "Friendly"] | |
| } | |
| ] | |
| try: | |
| # Get response from LLM | |
| response = self.llm.invoke(prompt) | |
| # Extract the JSON string from the response | |
| response_text = response.content.strip() | |
| # Find the start and end of the JSON array | |
| start_idx = response_text.find('[') | |
| end_idx = response_text.rfind(']') + 1 | |
| if start_idx == -1 or end_idx == 0: | |
| raise ValueError("Could not find JSON array in response") | |
| json_str = response_text[start_idx:end_idx] | |
| # Parse the JSON string | |
| import json | |
| questions = json.loads(json_str) | |
| # Validate the question format | |
| for question in questions: | |
| if 'question' not in question: | |
| raise ValueError("Question missing 'question' field") | |
| if 'choices' in question and not isinstance(question['choices'], list): | |
| raise ValueError("'choices' must be a list") | |
| # If we successfully parsed the questions, return them | |
| return questions | |
| except Exception as e: | |
| # print(f"Error parsing LLM response: {str(e)}") | |
| print("Using default questions as fallback") | |
| return default_questions | |
| def generate_final_prompt(self, context, questions, answers): | |
| formatted_answers = [] | |
| for i, question in enumerate(questions): | |
| # Use str(i) to match the string keys in the answers dictionary | |
| answer = answers.get(str(i), "") | |
| formatted_answers.append(f"Q: {question['question']}\nA: {answer}") | |
| answers_text = "\n".join(formatted_answers) | |
| # Rest of the method remains the same | |
| # Rest of the method remains the same | |
| prompt = ( | |
| f"Your task is to generate a professional prompt for invitation generation by using the below context and answers: \n" | |
| f" The initial context provided by user to generate the questions are [Context] :{context} and" | |
| f" The questions and answers provide detail information on how the prompt has to be designed [Answers]: {answers_text}. \n" | |
| f" Please follow the below instructions while drafting the prompt: \n" | |
| f" 1. Use the Complete Information in the context and answers. \n" | |
| f" 2. You Should draft best suitable prompt that can be used for generating personalized invites based on the information provided by user. \n" | |
| f" 3. Generate only the prompt and DO NOT include any statements like this in the beginning: \n" | |
| f" [Here is a professional prompt for invitation generation based on the provided context and answers] \n" | |
| f" The goal is by using this prompt, the user can obtain personalized invites to wide range of receivers work domain." | |
| ) | |
| # response = self.llm.invoke(prompt) | |
| # return response.content.strip() | |
| response = self.llm.invoke(prompt) | |
| return response.content.strip() | |
| def generate_response(self, user_prompt, data): | |
| """Generate a concise response using the LLM based on user prompt and data.""" | |
| prompt = ( | |
| f"You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable " | |
| f"and tailored to the user's request and recipient details.\n\n" | |
| f"User Prompt: {user_prompt}\n" | |
| f"Recipient Details: {data}\n\n" | |
| f"**Instructions:**\n" | |
| f"1. Start the response with an appropriate salutation, for example: 'Hello {data.get('Name', '')}' if available.\n" | |
| f"2. Match the tone specified in the user prompt. If no tone is mentioned, use a formal tone.\n" | |
| f"3. Write the invite within 90-100 words unless a specific length is provided.\n" | |
| f"4. Strictly adhere to all instructions and details given in the user prompt.\n\n" | |
| f"**Additional Guidelines:**\n" | |
| f"1. Tailor the invite to align with the recipient's context and profession. For example:\n" | |
| f" - If the recipient's information is unrelated to the context, provide a general formal invite highlighting key features.\n" | |
| f" - If the recipient is closely related to the context (e.g., a GENAI engineer for an AI product), highlight specific benefits relevant to their needs.\n" | |
| f"2. You are free to choose complete or partial recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt to make sure it fits naturally into the invite " | |
| # f"2. Seamlessly incorporate recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt only if they fit naturally into the invite.\n" | |
| f"3. Do not forcefully match the applications of the user product with the recipients information.\n" | |
| # f"4. " | |
| f"4. Avoid preambles, unnecessary symbols, or extraneous text.\n" | |
| f"5. Return the final invite text cleanly, in concise with no demeaning language.\n\n" | |
| f"Validate the invite to make sure it is following all the guidelines. " | |
| # f"**Goal:** Generate personalized invites suitable for a wide range of recipients while aligning with the product or service described in the user prompt." | |
| ) | |
| response = self.llm.invoke(prompt) | |
| return response.content.strip() | |
| # Prompt for instruction generator: | |
| prompt1 = ( | |
| f"Your task is to generate a professional prompt for invitation generation by using the below context and answers: \n" | |
| # f" The initial context provided by user to generate the questions are [Context] :{context} and" | |
| # f" The questions and answers provide detail information on how the prompt has to be designed [Answers]: {answers_text}. \n" | |
| f" Please follow the below instructions while drafting the prompt: \n" | |
| f" 1. Use the Complete Information in the context and answers. \n" | |
| f" 2. You Should draft best suitable prompt that can be used for generating personalized invites based on the information provided by user. \n" | |
| f" 3. Generate only the prompt and DO NOT include any statements like this in the beginning: \n" | |
| f" [Here is a professional prompt for invitation generation based on the provided context and answers] \n" | |
| # f"In addition, make sure the prompt generated includes the below points: \n" | |
| # f" 1. If the receivers information is not related to context and answers, generate a professional generic invite.\n " | |
| # f" for example: If the context is about gpu device, the receiver is a farmer, then provide a generic response highlighting its features. \n" | |
| # f"but if the receiver is GENAI engineer, provide an invite highlighting on how it is suitable to their needs and ease their work. " | |
| # f" 2. Aptly fit the receivers information in the invite and make sure it is not forcefully added in the invite" | |
| f" The goal is by using this prompt, the user can obtain personalized invites to wide range of receivers work domain." | |
| ) | |
| prompt4 = f""" | |
| Based on the initial context: "context" and the provided answers: answers_text, | |
| Generate a professional prompt for invitation generation by USING COMPLETE INFORMATION in the context and answers, | |
| which is most suitable to generate the best invites. | |
| The goal is, you should draft best suitable prompt that can be sent to LLM for generating personalized invites | |
| # based on the information available in context and answers. \n | |
| f" STRICTLY provide NO preamble.\n" | |
| #f"2. If the recipient's field does not match the product domain, generate a professional generic invite instead.\n" | |
| #f"3. If the recipient is not working at any company[for ex: self employed] do consider this case while drafting the prompt | |
| #and think on how to handle this case. | |
| #The response should consist ONLY of the generated prompt as per these instructions. | |
| """ | |
| # prompt for invite generation | |
| prompt2 = ( | |
| f"You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable " | |
| f"and tailored to the user's request.\n\n" | |
| # f"User Prompt: {user_prompt}\n\n" | |
| # f"Details of the Recipient: {data}\n\n" | |
| f"Please follow the below instructions while drafting the Invite of the recipient:\n" | |
| f"1. The response must start with appropriate salutations.\n" | |
| f"2. Match the tone of the invite specified in the user prompt. If not mentioned, use a formal tone.\n" | |
| f"3. Incorporate recipient-specific details (e.g., Job Title, Industry, Areas of Interest) as specified in the user prompt. If not mentioned, " | |
| f"use the provided recipient details.\n" | |
| f"4. Adjust the technical depth based on the recipient's expertise level.\n" | |
| f"5. If the recipient's details does not match the product domain, generate a professional generic invite instead.\n" | |
| f"6. If the user prompt does not specify the invite length, write the invite within 50-60 words.\n\n" | |
| f"Constraints:\n" | |
| f"- Strictly adhere to all details mentioned in the user prompt.\n" | |
| f"- Avoid preambles, extraneous symbols, or unnecessary text.\n" | |
| f"- Return only the final invite text in clean, concise language." | |
| ) | |
| prompt3 = ( | |
| f" You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable " | |
| f" and tailored as per the user prompt and details of the recipient.\n\n" | |
| # f"User Prompt: {user_prompt}\n\n" | |
| # f"Details of the Recipient: {data}\n\n" | |
| f"Please follow the below instructions while drafting the Invite of the recipient:\n" | |
| f"1. The response must start with appropriate salutations.\n" | |
| f"2. Match the tone of the invite specified in the user prompt. If not mentioned, use a formal tone.\n" | |
| f"3. If the user prompt does not specify the invite length, write the invite within 80-90 words.\n" | |
| f"4. Make sure to **follow all the instructions** given in the user prompt. \n\n" | |
| f"In addition, the invite generated SHOULD include the below points: \n" | |
| f" 1. If the recipients information is not related to context of the user prompt, generate a professional formal invite with NO demeaning words.\n " | |
| f" for example: If the context is about gpu device, the receiver is a farmer, then provide a generic response highlighting its features. \n" | |
| f"but if the recipient is GENAI engineer, provide an invite highlighting on how it is suitable to their needs and ease their work. " | |
| f" 2. Aptly fit the recipient-specific details (e.g., Job Title, Industry, Areas of Interest) as specified in the user prompt in the invite " | |
| f"and make sure it is not forcefully added in the invite. \n" | |
| f" 3. Avoid preambles, extraneous symbols, or unnecessary text.\n" | |
| f" 4. Return only the final invite text in clean, concise language.\n\n" | |
| f"The goal is to generate personalized invites to wide range of receivers in terms of work domain, while matching it with the product/service " | |
| f"provided by the user, make sure the invites are fulfilling this goal. " | |
| ) |