dhanvanth183 commited on
Commit
26ee443
·
verified ·
1 Parent(s): 72ffe0c

Upload 4 files

Browse files

This project focuses on leveraging advanced language models, GPT-4o and GPT-4o-mini, to generate concise, personalized invitation texts tailored for various professional events. The system takes user-provided prompts and individual details such as Name, Job Title, Organisation, Area of Interest, and Category to produce brochure-suitable, user defines-word count in invitation text.

Files changed (4) hide show
  1. .env +2 -0
  2. app.py +59 -0
  3. openai_llms.py +63 -0
  4. requirements.txt +12 -0
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ GROQ_API_KEY=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
2
+ OPENAI_API_KEY=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
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from openai_llms import LLMHandler
4
+ import tempfile
5
+ import os
6
+
7
+ # Load environment variables for OpenAI API Key
8
+ from dotenv import load_dotenv
9
+
10
+ load_dotenv()
11
+
12
+ # Initialize LLMHandler
13
+ llm_handler = LLMHandler()
14
+
15
+
16
+ def process_csv(file, user_prompt):
17
+ """Read CSV, generate responses using LLMHandler, and return processed DataFrame."""
18
+ df = pd.read_csv(file)
19
+ responses = []
20
+
21
+ for _, row in df.iterrows():
22
+ try:
23
+ response = llm_handler.generate_response(user_prompt, row.to_dict())
24
+ responses.append(response)
25
+ except Exception as e:
26
+ responses.append(f"Error: {e}")
27
+
28
+ df["Generated Text"] = responses
29
+ return df
30
+
31
+
32
+ # Streamlit UI
33
+ st.title("Personalized Invitation Generator")
34
+
35
+ # File uploader for CSV
36
+ uploaded_file = st.file_uploader("Upload CSV File", type=["csv"])
37
+ user_prompt = st.text_area("Enter the prompt for generating invitation texts:", "Write professional invitation text...")
38
+
39
+ if uploaded_file is not None and user_prompt:
40
+ st.write("Processing file...")
41
+ processed_df = process_csv(uploaded_file, user_prompt)
42
+
43
+ # Display results
44
+ st.dataframe(processed_df)
45
+
46
+ # Option to download the processed CSV
47
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as temp_file:
48
+ processed_df.to_csv(temp_file.name, index=False)
49
+ temp_file.close() # Ensure the file is properly closed before proceeding
50
+
51
+ st.download_button(
52
+ label="Download Results CSV",
53
+ data=open(temp_file.name, "rb"),
54
+ file_name="generated_invitations.csv",
55
+ mime="text/csv"
56
+ )
57
+
58
+ # Now safely delete the temporary file
59
+ os.unlink(temp_file.name)
openai_llms.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+
7
+
8
+ class LLMHandler:
9
+ def __init__(self, model_name="gpt-4o-mini"):
10
+ """
11
+ Initializes the LLMHandler with the specified OpenAI model.
12
+ """
13
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
14
+ if not self.openai_api_key:
15
+ raise ValueError("OPENAI_API_KEY environment variable not set.")
16
+
17
+ # Initialize OpenAI client
18
+ self.client = OpenAI(api_key=self.openai_api_key)
19
+ self.model_name = model_name
20
+
21
+ def generate_response(self, user_prompt, data):
22
+ """
23
+ Generate a concise response using the LLM based on user prompt and data.
24
+ :param user_prompt: Prompt provided by the user.
25
+ :param data: Dictionary containing the instance information.
26
+ :return: Generated response text.
27
+ """
28
+ # Refined prompt to handle encoding and formatting
29
+ prompt = (
30
+ f"You are a professional AI model tasked with writing personalized invite texts "
31
+ f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample.\n\n"
32
+ f"Consider the user prompt: {user_prompt}\n\n"
33
+ f"Details of the individual:\n"
34
+ f"- Name: {data['Name']}\n"
35
+ f"- Job Title: {data['Job Title']}\n"
36
+ f"- Organisation: {data['Organisation']}\n"
37
+ f"- Area of Interest: {data['Area of Interest']}\n"
38
+ f"- Category: {data['Category']}\n\n"
39
+ f"The response **MUST**:\n"
40
+ f"- Start with 'Hello {data['Name']}'.\n"
41
+ f"- Be concise, professional, and STRICTLY DO NOT generate invalid characters or encoding errors (e.g. 'SoraVR’s').\n"
42
+ f"- Use standard English punctuation, such as single quotes (e.g., 'can't', 'it's').\n"
43
+ f"- STRICTLY Give only one response for the Category the sample belongs to.\n"
44
+ f"- Do NOT include preambles or unnecessary text.\n\n"
45
+ f"Return the final response cleanly, without any extraneous symbols or characters."
46
+ )
47
+
48
+ # Query the OpenAI client and return the response
49
+ completion = self.client.chat.completions.create(
50
+ model=self.model_name,
51
+ messages=[
52
+ {"role": "system", "content": "You are a professional assistant."},
53
+ {"role": "user", "content": prompt},
54
+ ]
55
+ )
56
+
57
+ # Extract and clean the generated response
58
+ response = completion.choices[0].message.content.strip()
59
+
60
+ # Optional: Post-process to clean invalid characters
61
+ #response_cleaned = response.encode('utf-8').decode('utf-8', errors='ignore')
62
+
63
+ return response
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy~=1.26.4
2
+ pandas~=2.2.3
3
+ sentence-transformers~=3.2.0
4
+ python-dotenv~=1.0.1
5
+ langchain-openai
6
+ langchain_groq
7
+ langchain
8
+ langchain_community
9
+ openai
10
+ langchain-community~=0.3.3
11
+ langchain-core~=0.3.12
12
+ streamlit