Invite_AI / supplement /openrouter_llms.py
LiminalVR-AR's picture
Init Project v2
89c010a
import os
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
class LLMHandler:
def __init__(self, model_name="meta-llama/llama-3.3-70b-instruct"):
self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
if not self.openrouter_api_key:
raise ValueError("OPENROUTER_API_KEY environment variable not set.")
# Initialize OpenAI client with OpenRouter base URL and default headers
self.client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=self.openrouter_api_key,
default_headers={
"HTTP-Referer": "http://localhost:8501", # Local development URL
"X-Title": "Invite AI", # Application name
"x-routing-config": '{"provider": {"order": ["Together", "Avian.io", "DeepInfra", "Lambda"]}, "allow_fallbacks": false}'
}
)
self.model_name = model_name
def _make_api_call(self, messages):
"""Helper method to make API calls"""
return self.client.chat.completions.create(
model=self.model_name,
messages=messages
)
def generate_questions(self, context):
"""Generate questions based on the initial context provided by the user."""
prompt = f"""
Based on this context about an invitation: "{context}"
Generate questions to gather necessary information for creating a professional invitation prompt.
Generate 8-12 focused questions. Include multiple choice options where appropriate.
Questions should cover:
1. Senders Company/Organization and role details
2. Product/service specific details
3. Key specifications or features
4. Approximate length of the invite [Word count]
5. What information from the receivers details do you want to include and influence in the invite
6. Tone and style preferences
7. Additional information which you would like to provide [Type N/A if you wish not to]
8. Call to action [multiple choice] for example [ contact phone number, visit our website, visit our social media etc]
9. In context to Call to action question, ask a followup question [Textual response] for CTA
to collect the website link/ phone number/ social media handles etc.
Return the questions in this exact JSON format:
[
{{"question": "Question 1", "choices": ["Choice 1", "Choice 2"]}},
{{"question": "Question 2"}},
{{"question": "Question 3", "choices": ["Choice 1", "Choice 2", "Choice 3"]}}
]
For questions without multiple choice options, omit the 'choices' key.
Make choices relevant but not exhaustive, as users will have option for custom responses.
"""
# Default questions to use as fallback
default_questions = [
{
"question": "What is your role in the company?",
"choices": ["CEO", "CTO", "Director", "Product Manager"]
},
{
"question": "What is your company name?",
},
{
"question": "What is the name of your product/service?",
},
{
"question": "What is the suggested Invite lenght[word count] you prefer?",
},
{
"question": "What is the key technical specification or feature?",
},
{
"question": "Can you explain in brief about what the invite is about?",
},
{
"question": "Select the preferred tone for the invitation:",
"choices": ["Professional", "Innovation-focused", "Casual", "Business & Strategic", "Friendly"]
}
]
try:
response = self._make_api_call([{"role": "user", "content": prompt}])
response_text = response.choices[0].message.content.strip()
# Find the start and end of the JSON array
start_idx = response_text.find('[')
end_idx = response_text.rfind(']') + 1
if start_idx == -1 or end_idx == 0:
raise ValueError("Could not find JSON array in response")
json_str = response_text[start_idx:end_idx]
# Parse the JSON string
import json
questions = json.loads(json_str)
# Validate the question format
for question in questions:
if 'question' not in question:
raise ValueError("Question missing 'question' field")
if 'choices' in question and not isinstance(question['choices'], list):
raise ValueError("'choices' must be a list")
return questions
except Exception as e:
print("Using default questions as fallback")
return default_questions
def generate_final_prompt(self, context, questions, answers):
"""Generate the final prompt based on context and question answers."""
formatted_answers = []
for i, question in enumerate(questions):
answer = answers[i]
formatted_answers.append(f"Q: {question['question']}\nA: {answer}")
answers_text = "\n".join(formatted_answers)
prompt = (
f"Your task is to generate a professional prompt for invitation generation by using the below context and answers: \n"
f"The initial context provided by user to generate the questions are [Context] :{context} and"
f"The questions and answers provide detail information on how the prompt has to be designed [Answers]: {answers_text}. \n"
f"Please follow the below instructions while drafting the prompt: \n"
f"1. Use the Complete Information in the context and answers. \n"
f"2. You Should draft best suitable prompt that can be used for generating personalized invites based on the information provided by user. \n"
f"3. Generate only the prompt and DO NOT include any statements like this in the beginning: \n"
f"[Here is a professional prompt for invitation generation based on the provided context and answers] \n"
f"The goal is by using this prompt, the user can obtain personalized invites to wide range of receivers work domain."
)
response = self._make_api_call([{"role": "user", "content": prompt}])
return response.choices[0].message.content.strip()
def generate_response(self, user_prompt, data):
"""Generate a concise response using the LLM based on user prompt and data."""
prompt = (
f"You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable "
f"and tailored to the user's request and recipient details.\n\n"
f"User Prompt: {user_prompt}\n"
f"Recipient Details: {data}\n\n"
f"**Instructions:**\n"
f"1. Start the response with an appropriate salutation, for example: 'Hello {data.get('Name', '')}' if available.\n"
f"2. Match the tone specified in the user prompt. If no tone is mentioned, use a formal tone.\n"
f"3. Write the invite within 90-100 words unless a specific length is provided.\n"
f"4. Strictly adhere to all instructions and details given in the user prompt.\n\n"
f"**Additional Guidelines:**\n"
f"1. Tailor the invite to align with the recipient's context and profession. For example:\n"
f" - If the recipient's information is unrelated to the context, provide a general formal invite highlighting key features.\n"
f" - If the recipient is closely related to the context (e.g., a GENAI engineer for an AI product), highlight specific benefits relevant to their needs.\n"
f"2. You are free to choose complete or partial recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt that would fit naturally into the invite "
#f"2. Seamlessly incorporate recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt only if they fit naturally into the invite.\n"
f"3. Do not forcefully match the applications of the user product with the recipients information.\n"
#f"4. "
f"4. Avoid preambles, unnecessary symbols, or extraneous text.\n"
f"5. Return the final invite text cleanly, in concise with no demeaning language.\n\n"
f"Validate the invite to make sure it is following all the guidelines. "
#f"**Goal:** Generate personalized invites suitable for a wide range of recipients while aligning with the product or service described in the user prompt."
)
response = self._make_api_call([{"role": "user", "content": prompt}])
return response.choices[0].message.content.strip()