LiminalVR-AR commited on
Commit
89c010a
·
1 Parent(s): b78adc9

Init Project v2

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.11 (pythonProject1)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (pythonProject1)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/pythonProject1.iml" filepath="$PROJECT_DIR$/.idea/pythonProject1.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/pythonProject1.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/.venv" />
6
+ </content>
7
+ <orderEntry type="inheritedJdk" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
app.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from groq_llms import LLMHandler
4
+ #from openrouter_llms import LLMHandler
5
+ import tempfile
6
+ import os
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+ # Initialize LLMHandler
12
+ llm_handler = LLMHandler()
13
+
14
+
15
+ def process_csv(file, user_prompt):
16
+ """Read CSV, generate responses using LLMHandler, and return processed DataFrame."""
17
+ df = pd.read_csv(file)
18
+ responses = []
19
+
20
+ for _, row in df.iterrows():
21
+ try:
22
+ response = llm_handler.generate_response(user_prompt, row.to_dict())
23
+ responses.append(response)
24
+ except Exception as e:
25
+ responses.append(f"Error: {e}")
26
+
27
+ df["Generated Text"] = responses
28
+ return df
29
+
30
+
31
+ def initialize_session_state():
32
+ """Initialize session state variables"""
33
+ if 'prompt_creation_method' not in st.session_state:
34
+ st.session_state.prompt_creation_method = None
35
+ if 'current_step' not in st.session_state:
36
+ st.session_state.current_step = 'choose_method'
37
+ if 'context' not in st.session_state:
38
+ st.session_state.context = ""
39
+ if 'questions' not in st.session_state:
40
+ st.session_state.questions = []
41
+ if 'answers' not in st.session_state:
42
+ st.session_state.answers = {}
43
+ if 'multiselect_answers' not in st.session_state:
44
+ st.session_state.multiselect_answers = {}
45
+ if 'custom_options' not in st.session_state:
46
+ st.session_state.custom_options = {}
47
+ if 'final_prompt' not in st.session_state:
48
+ st.session_state.final_prompt = ""
49
+ if 'direct_prompt' not in st.session_state:
50
+ st.session_state.direct_prompt = ""
51
+
52
+
53
+ def display_progress_tracker():
54
+ """Display current progress and previous responses"""
55
+ with st.expander("📋 View Progress", expanded=True):
56
+ if st.session_state.prompt_creation_method:
57
+ st.write(f"**Method chosen:** {st.session_state.prompt_creation_method.title()}")
58
+
59
+ if st.session_state.context:
60
+ st.write("**Initial Context:**")
61
+ st.info(st.session_state.context)
62
+ if st.button("Edit Context", key="edit_context"):
63
+ st.session_state.current_step = 'initial_context'
64
+ st.rerun()
65
+
66
+ if st.session_state.answers:
67
+ st.write("**Your Responses:**")
68
+ for i, question in enumerate(st.session_state.questions):
69
+ if i in st.session_state.multiselect_answers:
70
+ answers = ", ".join(st.session_state.multiselect_answers[i])
71
+ st.success(f"Q: {question['question']}\nA: {answers}")
72
+ elif i in st.session_state.answers:
73
+ st.success(f"Q: {question['question']}\nA: {st.session_state.answers[i]}")
74
+ if st.button("Edit Responses", key="edit_responses"):
75
+ st.session_state.current_step = 'answer_questions'
76
+ st.rerun()
77
+
78
+ if st.session_state.direct_prompt:
79
+ st.write("**Your Direct Prompt:**")
80
+ st.info(st.session_state.direct_prompt)
81
+ if st.button("Edit Prompt", key="edit_direct_prompt"):
82
+ st.session_state.current_step = 'direct_prompt'
83
+ st.rerun()
84
+
85
+ if st.session_state.final_prompt:
86
+ st.write("**Final Generated Prompt:**")
87
+ st.info(st.session_state.final_prompt)
88
+ if st.button("Edit Final Prompt", key="edit_final_prompt"):
89
+ st.session_state.current_step = 'edit_prompt'
90
+ st.rerun()
91
+
92
+
93
+ # Streamlit UI
94
+ st.set_page_config(page_title="Invite AI", page_icon="💬", layout="wide")
95
+
96
+ # Header
97
+ st.title("Invite AI")
98
+ st.markdown(
99
+ """
100
+ Welcome to the Invitation Generator! This tool helps you create personalized invitations using the power of AI.
101
+ """
102
+ )
103
+
104
+ # Initialize session state
105
+ initialize_session_state()
106
+
107
+ # Display progress tracker (always visible)
108
+ display_progress_tracker()
109
+
110
+ # Sidebar with instructions
111
+ st.sidebar.title("Instructions")
112
+ st.sidebar.markdown(
113
+ """
114
+ ### Template Download
115
+ [Click here to download the suggested CSV template](http://surl.li/ptvzzv) 📥
116
+ ### Suggested Requirements
117
+ - **Unique Identifier for each receiver**
118
+ - **Name of the receiver**
119
+ - **Designation/Job title of the receiver**
120
+ - **Company/Organisation where the receiver works**
121
+ - **Areas the receiver is interested in / has expertise in**
122
+ - **Categorize receivers into groups**
123
+
124
+ [Note: The above template is for your reference, you are free to submit your own data.]
125
+ """
126
+ )
127
+
128
+ # Main content area with steps
129
+ st.markdown("---") # Separator between progress tracker and current step
130
+
131
+ if st.session_state.current_step == 'choose_method':
132
+ st.subheader("Choose Your Prompt Creation Method")
133
+
134
+ col1, col2 = st.columns(2)
135
+
136
+ with col1:
137
+ st.markdown("""
138
+ ### Guided Prompt Builder
139
+ - Step-by-step assistance
140
+ - AI-generated questions
141
+ - Structured approach
142
+ """)
143
+ if st.button("Use Guided Builder"):
144
+ st.session_state.prompt_creation_method = 'guided'
145
+ st.session_state.current_step = 'initial_context'
146
+ st.rerun()
147
+
148
+ with col2:
149
+ st.markdown("""
150
+ ### Direct Prompt Entry
151
+ - Write your own prompt
152
+ - Complete control
153
+ - Quick setup
154
+ """)
155
+ if st.button("Use Direct Entry"):
156
+ st.session_state.prompt_creation_method = 'direct'
157
+ st.session_state.current_step = 'direct_prompt'
158
+ st.rerun()
159
+
160
+ elif st.session_state.current_step == 'direct_prompt':
161
+ st.subheader("Enter Your Prompt")
162
+ st.markdown(
163
+ "Write your complete prompt for generating invitations. Include all necessary details and requirements.")
164
+
165
+ direct_prompt = st.text_area(
166
+ "Your Prompt:",
167
+ value=st.session_state.direct_prompt,
168
+ placeholder="Example: Generate a professional invitation for a product launch...",
169
+ height=200
170
+ )
171
+
172
+ col1, col2 = st.columns([1, 5])
173
+ with col1:
174
+ if st.button("← Back"):
175
+ st.session_state.current_step = 'choose_method'
176
+ st.rerun()
177
+ with col2:
178
+ if st.button("Continue →"):
179
+ if direct_prompt:
180
+ st.session_state.direct_prompt = direct_prompt
181
+ st.session_state.final_prompt = direct_prompt
182
+ st.session_state.current_step = 'upload_process'
183
+ st.rerun()
184
+ else:
185
+ st.error("Please enter a prompt before continuing.")
186
+
187
+ elif st.session_state.prompt_creation_method == 'guided':
188
+ if st.session_state.current_step == 'initial_context':
189
+ st.subheader("Step 1: Provide Initial Context")
190
+ st.markdown("Briefly describe what your invitation is about (e.g., 'Launching a new GPU product')")
191
+
192
+ context = st.text_area(
193
+ "Context:",
194
+ value=st.session_state.context,
195
+ placeholder="Example: Launching a new GPU product for AI and HPC applications",
196
+ height=100
197
+ )
198
+
199
+ col1, col2 = st.columns([1, 5])
200
+ with col1:
201
+ if st.button("← Back"):
202
+ st.session_state.current_step = 'choose_method'
203
+ st.rerun()
204
+ with col2:
205
+ if st.button("Generate Questions →"):
206
+ if context:
207
+ st.session_state.context = context
208
+ st.session_state.questions = llm_handler.generate_questions(context)
209
+ st.session_state.current_step = 'answer_questions'
210
+ st.rerun()
211
+ else:
212
+ st.error("Please provide context before proceeding.")
213
+
214
+
215
+ # In the answer_questions section of your code, replace the multiselect implementation with this:
216
+
217
+ elif st.session_state.current_step == 'answer_questions':
218
+
219
+ st.subheader("Step 2: Answer Questions")
220
+
221
+ for i, question in enumerate(st.session_state.questions):
222
+
223
+ if 'choices' in question:
224
+
225
+ # Get previously selected options
226
+
227
+ previous_selections = st.session_state.multiselect_answers.get(i, [])
228
+
229
+ # Initialize base choices
230
+
231
+ base_choices = question['choices'].copy()
232
+
233
+ if "Custom" not in base_choices:
234
+ base_choices.append("Custom")
235
+
236
+ # Add any previous custom value to the choices if it exists
237
+
238
+ custom_values = [x for x in previous_selections if x not in question['choices'] and x != "Custom"]
239
+
240
+ all_choices = base_choices + custom_values
241
+
242
+ # Handle word count questions differently
243
+
244
+ if any(word in question['question'].lower() for word in ['word count', 'words', 'length']):
245
+
246
+ selected_options = st.multiselect(
247
+
248
+ question['question'],
249
+
250
+ options=all_choices,
251
+
252
+ default=previous_selections,
253
+
254
+ key=f"multiselect_{i}"
255
+
256
+ )
257
+
258
+ if "Custom" in selected_options:
259
+
260
+ # Pre-fill with previous custom value if exists
261
+
262
+ default_custom = next((x for x in previous_selections if x not in base_choices), "")
263
+
264
+ custom_value = st.text_input(
265
+
266
+ "Enter custom word count:",
267
+
268
+ value=default_custom,
269
+
270
+ key=f"custom_{i}"
271
+
272
+ )
273
+
274
+ if custom_value:
275
+
276
+ try:
277
+
278
+ word_count = int(custom_value)
279
+
280
+ if word_count > 0:
281
+
282
+ selected_options = [opt for opt in selected_options if opt != "Custom"]
283
+
284
+ if str(word_count) not in selected_options:
285
+ selected_options.append(str(word_count))
286
+
287
+ else:
288
+
289
+ st.error("Please enter a positive number")
290
+
291
+ except ValueError:
292
+
293
+ st.error("Please enter a valid number")
294
+
295
+
296
+ else:
297
+
298
+ # Regular non-numeric multiselect handling
299
+
300
+ selected_options = st.multiselect(
301
+
302
+ question['question'],
303
+
304
+ options=all_choices,
305
+
306
+ default=previous_selections,
307
+
308
+ key=f"multiselect_{i}"
309
+
310
+ )
311
+
312
+ if "Custom" in selected_options:
313
+
314
+ # Pre-fill with previous custom value if exists
315
+
316
+ default_custom = next((x for x in previous_selections if x not in base_choices), "")
317
+
318
+ custom_value = st.text_input(
319
+
320
+ "Enter your custom response:",
321
+
322
+ value=default_custom,
323
+
324
+ key=f"custom_{i}"
325
+
326
+ )
327
+
328
+ if custom_value:
329
+
330
+ selected_options = [opt for opt in selected_options if opt != "Custom"]
331
+
332
+ if custom_value not in selected_options:
333
+ selected_options.append(custom_value)
334
+
335
+ # Update session state
336
+
337
+ st.session_state.multiselect_answers[i] = selected_options
338
+
339
+ st.session_state.answers[i] = ", ".join(selected_options) if selected_options else ""
340
+
341
+
342
+ else:
343
+
344
+ # Handle non-choice questions
345
+
346
+ st.session_state.answers[i] = st.text_input(
347
+
348
+ question['question'],
349
+
350
+ value=st.session_state.answers.get(i, ""),
351
+
352
+ key=f"question_{i}"
353
+
354
+ )
355
+
356
+ col1, col2 = st.columns([1, 5])
357
+
358
+ with col1:
359
+
360
+ if st.button("← Back"):
361
+ st.session_state.current_step = 'initial_context'
362
+
363
+ st.rerun()
364
+
365
+ with col2:
366
+
367
+ if st.button("Generate Prompt →"):
368
+
369
+ if all(st.session_state.answers.values()):
370
+
371
+ st.session_state.final_prompt = llm_handler.generate_final_prompt(
372
+
373
+ st.session_state.context,
374
+
375
+ st.session_state.questions,
376
+
377
+ st.session_state.answers
378
+
379
+ )
380
+
381
+ st.session_state.current_step = 'edit_prompt'
382
+
383
+ st.rerun()
384
+
385
+ else:
386
+
387
+ st.error("Please answer all questions before proceeding.")
388
+ elif st.session_state.current_step == 'edit_prompt':
389
+ st.subheader("Step 3: Review and Edit Final Prompt")
390
+ edited_prompt = st.text_area(
391
+ "Edit your prompt if needed:",
392
+ value=st.session_state.final_prompt,
393
+ height=200
394
+ )
395
+
396
+ col1, col2 = st.columns([1, 5])
397
+ with col1:
398
+ if st.button("← Back"):
399
+ st.session_state.current_step = 'answer_questions'
400
+ st.rerun()
401
+ with col2:
402
+ if st.button("Continue to Upload →"):
403
+ st.session_state.final_prompt = edited_prompt
404
+ st.session_state.current_step = 'upload_process'
405
+ st.rerun()
406
+
407
+ # Common upload and processing section for both paths
408
+ if st.session_state.current_step == 'upload_process':
409
+ st.subheader("Upload and Process")
410
+ uploaded_file = st.file_uploader("📂 Upload CSV File", type=["csv"])
411
+
412
+ col1, col2 = st.columns([1, 5])
413
+ with col1:
414
+ if st.button("← Back"):
415
+ if st.session_state.prompt_creation_method == 'guided':
416
+ st.session_state.current_step = 'edit_prompt'
417
+ else:
418
+ st.session_state.current_step = 'direct_prompt'
419
+ st.rerun()
420
+
421
+ if uploaded_file is not None and st.session_state.final_prompt:
422
+ st.write("⏳ Processing your file... Please wait.")
423
+ processed_df = process_csv(uploaded_file, st.session_state.final_prompt)
424
+
425
+ st.write("### Generated Invitations")
426
+ st.dataframe(processed_df, use_container_width=True)
427
+
428
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as temp_file:
429
+ processed_df.to_csv(temp_file.name, index=False)
430
+ temp_file.close()
431
+
432
+ st.download_button(
433
+ label="📥 Download Results CSV",
434
+ data=open(temp_file.name, "rb"),
435
+ file_name="generated_invitations.csv",
436
+ mime="text/csv",
437
+ )
438
+ os.unlink(temp_file.name)
439
+
440
+ # Reset button (moved to sidebar)
441
+ st.sidebar.markdown("---")
442
+ if st.sidebar.button("🔄 Start Over"):
443
+ st.session_state.prompt_creation_method = None
444
+ st.session_state.current_step = 'choose_method'
445
+ st.session_state.context = ""
446
+ st.session_state.questions = []
447
+ st.session_state.answers = {}
448
+ st.session_state.multiselect_answers = {}
449
+ st.session_state.custom_options = {}
450
+ st.session_state.final_prompt = ""
451
+ st.session_state.direct_prompt = ""
452
+ st.rerun()
453
+
454
+ st.markdown("---")
455
+ st.markdown("💡 **Tip:** Ensure your data aligns with the provided template for accurate results.")
flask_api.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import json
4
+ import tempfile
5
+ import pandas as pd
6
+ from flask import Flask, request, jsonify, send_file
7
+ from flask_cors import CORS
8
+ from dotenv import load_dotenv
9
+ from groq_llms import LLMHandler
10
+
11
+ # Load environment variables
12
+ load_dotenv()
13
+
14
+ app = Flask(__name__)
15
+ CORS(app) # Enable CORS for all routes
16
+
17
+ # Initialize LLM Handler
18
+ llm_handler = LLMHandler()
19
+
20
+
21
+ def process_csv(file, user_prompt):
22
+ """
23
+ Process CSV file and generate responses using LLMHandler
24
+
25
+ Args:
26
+ file (werkzeug.datastructures.FileStorage): Uploaded CSV file
27
+ user_prompt (str): Prompt for invitation generation
28
+
29
+ Returns:
30
+ pandas.DataFrame: DataFrame with generated invitations
31
+ """
32
+ try:
33
+ # Read CSV directly from file storage
34
+ df = pd.read_csv(file)
35
+ responses = []
36
+
37
+ for _, row in df.iterrows():
38
+ try:
39
+ response = llm_handler.generate_response(user_prompt, row.to_dict())
40
+ responses.append(response)
41
+ except Exception as e:
42
+ responses.append(f"Error: {e}")
43
+
44
+ df["Generated Text"] = responses
45
+ return df
46
+ except Exception as e:
47
+ raise ValueError(f"Error processing CSV: {str(e)}")
48
+
49
+ @app.route('/generate-questions', methods=['POST'])
50
+ def generate_questions():
51
+ """
52
+ Generate questions based on initial context
53
+
54
+ Request Payload:
55
+ {
56
+ "context": "Initial context for invitation"
57
+ }
58
+
59
+ Returns:
60
+ JSON array of questions
61
+ """
62
+ data = request.json
63
+ context = data.get('context', '')
64
+
65
+ try:
66
+ questions = llm_handler.generate_questions(context)
67
+ return jsonify(questions)
68
+ except Exception as e:
69
+ return jsonify({"error": str(e)}), 500
70
+
71
+
72
+ @app.route('/generate-final-prompt', methods=['POST'])
73
+ def generate_final_prompt():
74
+ """
75
+ Generate final prompt based on context, questions, and answers
76
+
77
+ Request Payload:
78
+ {
79
+ "context": "Initial context",
80
+ "questions": [...],
81
+ "answers": {...}
82
+ }
83
+
84
+ Returns:
85
+ Generated final prompt
86
+ """
87
+ data = request.json
88
+ context = data.get('context', '')
89
+ questions = data.get('questions', [])
90
+ answers = data.get('answers', {})
91
+
92
+ try:
93
+ final_prompt = llm_handler.generate_final_prompt(context, questions, answers)
94
+ return jsonify({"prompt": final_prompt})
95
+ except Exception as e:
96
+ return jsonify({"error": str(e)}), 500
97
+
98
+
99
+ @app.route('/process-invitations', methods=['POST'])
100
+ def process_invitations():
101
+ """
102
+ Process CSV file and generate invitations
103
+
104
+ Request Parameters:
105
+ - file: CSV file
106
+ - prompt: Invitation generation prompt
107
+
108
+ Returns:
109
+ Processed CSV file with generated invitations
110
+ """
111
+ if 'file' not in request.files:
112
+ return jsonify({"error": "No file uploaded"}), 400
113
+
114
+ file = request.files['file']
115
+ user_prompt = request.form.get('prompt', '')
116
+
117
+ if file.filename == '':
118
+ return jsonify({"error": "No selected file"}), 400
119
+
120
+ try:
121
+ # Process CSV and generate invitations
122
+ processed_df = process_csv(file, user_prompt)
123
+
124
+ # Save processed DataFrame to a bytes buffer
125
+ output = io.BytesIO()
126
+ processed_df.to_csv(output, index=False)
127
+ output.seek(0)
128
+
129
+ # Return the file
130
+ return send_file(
131
+ output,
132
+ mimetype='text/csv',
133
+ as_attachment=True,
134
+ download_name='generated_invitations.csv'
135
+ )
136
+ except Exception as e:
137
+ return jsonify({"error": str(e)}), 500
138
+
139
+
140
+ if __name__ == '__main__':
141
+ # Configurable port, defaults to 5000
142
+ port = int(os.environ.get('PORT', 5000))
143
+ app.run(host='0.0.0.0', port=port, debug=True)
groq_llms.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_groq import ChatGroq
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+
8
+ class LLMHandler:
9
+ def __init__(self, model_name="llama-3.3-70b-versatile"):
10
+
11
+ self.groq_api_key = os.getenv("GROQ_API_KEY")
12
+ if not self.groq_api_key:
13
+ raise ValueError("GROQ_API_KEY environment variable not set.")
14
+ self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
15
+
16
+ def generate_questions(self, context):
17
+ """Generate questions based on the initial context provided by the user."""
18
+ prompt = f"""
19
+ Based on this context about an invitation: "{context}"
20
+
21
+ Generate questions to gather necessary information for creating a professional invitation prompt.
22
+
23
+ Generate 8-12 focused questions. Include multiple choice options where appropriate.
24
+ Questions should cover:
25
+ 1. Senders Company/Organization and role details
26
+ 2. Product/service specific details
27
+ 3. Key specifications or features
28
+ 4. Approximate length of the invite [Word count], take a text response from the user instead of multiple choice for this question.
29
+ 5. What information from the receivers details do you want to include and influence in the invite
30
+ 6. Tone and style preferences
31
+ 7. Additional information which you would like to provide [Type N/A if you wish not to]
32
+ 8. Call to action [multiple choice] for example [ contact phone number, visit our website, visit our social media etc]
33
+ 9. In context to Call to action question, ask a followup question [Textual response] for CTA
34
+ to collect the website link/ phone number/ social media handles etc.
35
+
36
+ Return the questions in this exact JSON format:
37
+ [
38
+ {{"question": "Question 1", "choices": ["Choice 1", "Choice 2"]}},
39
+ {{"question": "Question 2"}},
40
+ {{"question": "Question 3", "choices": ["Choice 1", "Choice 2", "Choice 3"]}}
41
+ ]
42
+
43
+ For questions without multiple choice options, omit the 'choices' key.
44
+ Make choices relevant but not exhaustive, as users will have option for custom responses.
45
+ """
46
+
47
+ # Default questions to use as fallback
48
+ default_questions = [
49
+ {
50
+ "question": "What is your role in the company?",
51
+ "choices": ["CEO", "CTO", "Director", "Product Manager"]
52
+ },
53
+ {
54
+ "question": "What is your company name?",
55
+ },
56
+ {
57
+ "question": "What is the name of your product/service?",
58
+ },
59
+ {
60
+ "question": "What is the suggested Invite lenght[word count] you prefer?",
61
+ },
62
+ {
63
+ "question": "What is the key technical specification or feature?",
64
+ },
65
+
66
+ {
67
+ "question": "Can you explain in brief about what the invite is about?",
68
+ },
69
+ {
70
+ "question": "Select the preferred tone for the invitation:",
71
+ "choices": ["Professional", "Innovation-focused", "Casual", "Business & Strategic", "Friendly"]
72
+ }
73
+ ]
74
+
75
+ try:
76
+ # Get response from LLM
77
+ response = self.llm.invoke(prompt)
78
+
79
+ # Extract the JSON string from the response
80
+ response_text = response.content.strip()
81
+
82
+ # Find the start and end of the JSON array
83
+ start_idx = response_text.find('[')
84
+ end_idx = response_text.rfind(']') + 1
85
+
86
+ if start_idx == -1 or end_idx == 0:
87
+ raise ValueError("Could not find JSON array in response")
88
+
89
+ json_str = response_text[start_idx:end_idx]
90
+
91
+ # Parse the JSON string
92
+ import json
93
+ questions = json.loads(json_str)
94
+
95
+ # Validate the question format
96
+ for question in questions:
97
+ if 'question' not in question:
98
+ raise ValueError("Question missing 'question' field")
99
+ if 'choices' in question and not isinstance(question['choices'], list):
100
+ raise ValueError("'choices' must be a list")
101
+
102
+ # If we successfully parsed the questions, return them
103
+ return questions
104
+
105
+ except Exception as e:
106
+ # print(f"Error parsing LLM response: {str(e)}")
107
+ print("Using default questions as fallback")
108
+ return default_questions
109
+
110
+ def generate_final_prompt(self, context, questions, answers):
111
+ formatted_answers = []
112
+ for i, question in enumerate(questions):
113
+ # Use str(i) to match the string keys in the answers dictionary
114
+ answer = answers.get(str(i), "")
115
+ formatted_answers.append(f"Q: {question['question']}\nA: {answer}")
116
+
117
+ answers_text = "\n".join(formatted_answers)
118
+ # Rest of the method remains the same
119
+ # Rest of the method remains the same
120
+ prompt = (
121
+ f"Your task is to generate a professional prompt for invitation generation by using the below context and answers: \n"
122
+ f" The initial context provided by user to generate the questions are [Context] :{context} and"
123
+ f" The questions and answers provide detail information on how the prompt has to be designed [Answers]: {answers_text}. \n"
124
+ f" Please follow the below instructions while drafting the prompt: \n"
125
+ f" 1. Use the Complete Information in the context and answers. \n"
126
+ f" 2. You Should draft best suitable prompt that can be used for generating personalized invites based on the information provided by user. \n"
127
+ f" 3. Generate only the prompt and DO NOT include any statements like this in the beginning: \n"
128
+ f" [Here is a professional prompt for invitation generation based on the provided context and answers] \n"
129
+
130
+ f" The goal is by using this prompt, the user can obtain personalized invites to wide range of receivers work domain."
131
+ )
132
+ # response = self.llm.invoke(prompt)
133
+ # return response.content.strip()
134
+
135
+ response = self.llm.invoke(prompt)
136
+ return response.content.strip()
137
+
138
+ def generate_response(self, user_prompt, data):
139
+ """Generate a concise response using the LLM based on user prompt and data."""
140
+
141
+ prompt = (
142
+ f"You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable "
143
+ f"and tailored to the user's request and recipient details.\n\n"
144
+ f"User Prompt: {user_prompt}\n"
145
+ f"Recipient Details: {data}\n\n"
146
+ f"**Instructions:**\n"
147
+ f"1. Start the response with an appropriate salutation, for example: 'Hello {data.get('Name', '')}' if available.\n"
148
+ f"2. Match the tone specified in the user prompt. If no tone is mentioned, use a formal tone.\n"
149
+ f"3. Write the invite within 90-100 words unless a specific length is provided.\n"
150
+ f"4. Strictly adhere to all instructions and details given in the user prompt.\n\n"
151
+ f"**Additional Guidelines:**\n"
152
+ f"1. Tailor the invite to align with the recipient's context and profession. For example:\n"
153
+ f" - If the recipient's information is unrelated to the context, provide a general formal invite highlighting key features.\n"
154
+ f" - If the recipient is closely related to the context (e.g., a GENAI engineer for an AI product), highlight specific benefits relevant to their needs.\n"
155
+ f"2. You are free to choose complete or partial recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt to make sure it fits naturally into the invite "
156
+ # f"2. Seamlessly incorporate recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt only if they fit naturally into the invite.\n"
157
+ f"3. Do not forcefully match the applications of the user product with the recipients information.\n"
158
+ # f"4. "
159
+ f"4. Avoid preambles, unnecessary symbols, or extraneous text.\n"
160
+ f"5. Return the final invite text cleanly, in concise with no demeaning language.\n\n"
161
+ f"Validate the invite to make sure it is following all the guidelines. "
162
+ # f"**Goal:** Generate personalized invites suitable for a wide range of recipients while aligning with the product or service described in the user prompt."
163
+ )
164
+
165
+ response = self.llm.invoke(prompt)
166
+ return response.content.strip()
167
+
168
+
169
+ # Prompt for instruction generator:
170
+ prompt1 = (
171
+ f"Your task is to generate a professional prompt for invitation generation by using the below context and answers: \n"
172
+ # f" The initial context provided by user to generate the questions are [Context] :{context} and"
173
+ # f" The questions and answers provide detail information on how the prompt has to be designed [Answers]: {answers_text}. \n"
174
+ f" Please follow the below instructions while drafting the prompt: \n"
175
+ f" 1. Use the Complete Information in the context and answers. \n"
176
+ f" 2. You Should draft best suitable prompt that can be used for generating personalized invites based on the information provided by user. \n"
177
+ f" 3. Generate only the prompt and DO NOT include any statements like this in the beginning: \n"
178
+ f" [Here is a professional prompt for invitation generation based on the provided context and answers] \n"
179
+ # f"In addition, make sure the prompt generated includes the below points: \n"
180
+ # f" 1. If the receivers information is not related to context and answers, generate a professional generic invite.\n "
181
+ # f" for example: If the context is about gpu device, the receiver is a farmer, then provide a generic response highlighting its features. \n"
182
+ # f"but if the receiver is GENAI engineer, provide an invite highlighting on how it is suitable to their needs and ease their work. "
183
+ # f" 2. Aptly fit the receivers information in the invite and make sure it is not forcefully added in the invite"
184
+ f" The goal is by using this prompt, the user can obtain personalized invites to wide range of receivers work domain."
185
+ )
186
+ prompt4 = f"""
187
+ Based on the initial context: "context" and the provided answers: answers_text,
188
+ Generate a professional prompt for invitation generation by USING COMPLETE INFORMATION in the context and answers,
189
+ which is most suitable to generate the best invites.
190
+ The goal is, you should draft best suitable prompt that can be sent to LLM for generating personalized invites
191
+ # based on the information available in context and answers. \n
192
+
193
+ f" STRICTLY provide NO preamble.\n"
194
+ #f"2. If the recipient's field does not match the product domain, generate a professional generic invite instead.\n"
195
+ #f"3. If the recipient is not working at any company[for ex: self employed] do consider this case while drafting the prompt
196
+ #and think on how to handle this case.
197
+
198
+ #The response should consist ONLY of the generated prompt as per these instructions.
199
+ """
200
+
201
+ # prompt for invite generation
202
+
203
+ prompt2 = (
204
+ f"You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable "
205
+ f"and tailored to the user's request.\n\n"
206
+ # f"User Prompt: {user_prompt}\n\n"
207
+ # f"Details of the Recipient: {data}\n\n"
208
+ f"Please follow the below instructions while drafting the Invite of the recipient:\n"
209
+ f"1. The response must start with appropriate salutations.\n"
210
+ f"2. Match the tone of the invite specified in the user prompt. If not mentioned, use a formal tone.\n"
211
+ f"3. Incorporate recipient-specific details (e.g., Job Title, Industry, Areas of Interest) as specified in the user prompt. If not mentioned, "
212
+ f"use the provided recipient details.\n"
213
+ f"4. Adjust the technical depth based on the recipient's expertise level.\n"
214
+ f"5. If the recipient's details does not match the product domain, generate a professional generic invite instead.\n"
215
+ f"6. If the user prompt does not specify the invite length, write the invite within 50-60 words.\n\n"
216
+ f"Constraints:\n"
217
+ f"- Strictly adhere to all details mentioned in the user prompt.\n"
218
+ f"- Avoid preambles, extraneous symbols, or unnecessary text.\n"
219
+ f"- Return only the final invite text in clean, concise language."
220
+ )
221
+ prompt3 = (
222
+ f" You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable "
223
+ f" and tailored as per the user prompt and details of the recipient.\n\n"
224
+ # f"User Prompt: {user_prompt}\n\n"
225
+ # f"Details of the Recipient: {data}\n\n"
226
+ f"Please follow the below instructions while drafting the Invite of the recipient:\n"
227
+ f"1. The response must start with appropriate salutations.\n"
228
+ f"2. Match the tone of the invite specified in the user prompt. If not mentioned, use a formal tone.\n"
229
+ f"3. If the user prompt does not specify the invite length, write the invite within 80-90 words.\n"
230
+ f"4. Make sure to **follow all the instructions** given in the user prompt. \n\n"
231
+ f"In addition, the invite generated SHOULD include the below points: \n"
232
+ f" 1. If the recipients information is not related to context of the user prompt, generate a professional formal invite with NO demeaning words.\n "
233
+ f" for example: If the context is about gpu device, the receiver is a farmer, then provide a generic response highlighting its features. \n"
234
+ f"but if the recipient is GENAI engineer, provide an invite highlighting on how it is suitable to their needs and ease their work. "
235
+ f" 2. Aptly fit the recipient-specific details (e.g., Job Title, Industry, Areas of Interest) as specified in the user prompt in the invite "
236
+ f"and make sure it is not forcefully added in the invite. \n"
237
+ f" 3. Avoid preambles, extraneous symbols, or unnecessary text.\n"
238
+ f" 4. Return only the final invite text in clean, concise language.\n\n"
239
+
240
+ f"The goal is to generate personalized invites to wide range of receivers in terms of work domain, while matching it with the product/service "
241
+ f"provided by the user, make sure the invites are fulfilling this goal. "
242
+
243
+ )
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy~=1.26.4
2
+ pandas~=2.2.3
3
+ sentence-transformers~=3.2.0
4
+ python-dotenv~=1.0.1
5
+ langchain-openai
6
+ langchain_groq
7
+ langchain
8
+ langchain_community
9
+ openai
10
+ langchain-community~=0.3.3
11
+ langchain-core~=0.3.12
12
+ streamlit
13
+ openrouter
14
+ flask
15
+ flask-cors
supplement/api.js ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // api.js
2
+ const API_BASE_URL = 'http://localhost:5000/api';
3
+
4
+ export const generateQuestions = async (context) => {
5
+ const response = await fetch(`${API_BASE_URL}/generate-questions`, {
6
+ method: 'POST',
7
+ headers: {
8
+ 'Content-Type': 'application/json',
9
+ },
10
+ body: JSON.stringify({ context }),
11
+ });
12
+ return response.json();
13
+ };
14
+
15
+ export const generateFinalPrompt = async (context, questions, answers) => {
16
+ const response = await fetch(`${API_BASE_URL}/generate-final-prompt`, {
17
+ method: 'POST',
18
+ headers: {
19
+ 'Content-Type': 'application/json',
20
+ },
21
+ body: JSON.stringify({ context, questions, answers }),
22
+ });
23
+ return response.json();
24
+ };
25
+
26
+ export const processInvitations = async (file, prompt) => {
27
+ const formData = new FormData();
28
+ formData.append('file', file);
29
+ formData.append('prompt', prompt);
30
+
31
+ const response = await fetch(`${API_BASE_URL}/process-invitations`, {
32
+ method: 'POST',
33
+ body: formData,
34
+ });
35
+ return response.json();
36
+ };
37
+
38
+ export const updateSession = async (sessionData) => {
39
+ const response = await fetch(`${API_BASE_URL}/session`, {
40
+ method: 'POST',
41
+ headers: {
42
+ 'Content-Type': 'application/json',
43
+ },
44
+ body: JSON.stringify(sessionData),
45
+ });
46
+ return response.json();
47
+ };
supplement/llm_merger.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain_groq import ChatGroq
4
+ from openai import OpenAI
5
+
6
+ load_dotenv()
7
+
8
+
9
+ class PrimaryLLMHandler:
10
+ def __init__(self, model_name="gpt-4o-mini"):
11
+ """
12
+ Initializes the Primary LLM Handler (GPT0-mini).
13
+ """
14
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
15
+ if not self.openai_api_key:
16
+ raise ValueError("OPENAI_API_KEY environment variable not set.")
17
+
18
+ self.client = OpenAI(api_key=self.openai_api_key)
19
+ self.model_name = model_name
20
+
21
+ def generate_response(self, user_prompt, data):
22
+ """
23
+ Generates a response using the primary LLM.
24
+ """
25
+ prompt = (
26
+ f"You are a professional AI model tasked with writing personalized invite texts "
27
+ f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample."
28
+ f"\n\n"
29
+ f"User prompt: {user_prompt}\n\n"
30
+ f"Details of the individual:\n"
31
+ f"- Name: {data['Name']}\n"
32
+ f"- Job Title: {data['Job Title']}\n"
33
+ f"- Organisation: {data['Organisation']}\n"
34
+ f"- Area of Interest: {data['Area of Interest']}\n"
35
+ f"- Category: {data['Category']}\n\n"
36
+ f"The response should start with 'Hello {data['Name']}'."
37
+ f"Ensure the tone aligns with the instructions. STRICTLY give only one response."
38
+ )
39
+
40
+ completion = self.client.chat.completions.create(
41
+ model=self.model_name,
42
+ messages=[
43
+ {"role": "system", "content": "You are a professional assistant AI."},
44
+ {"role": "user", "content": prompt},
45
+ ],
46
+ )
47
+
48
+ return completion.choices[0].message.content.strip()
49
+
50
+
51
+ class ValidatorLLMHandler:
52
+ def __init__(self, model_name="gemma2-9b-it"):
53
+ """
54
+ Initializes the Validator LLM Handler (Llama 3.3 8B).
55
+ """
56
+ self.groq_api_key = os.getenv("GROQ_API_KEY")
57
+ if not self.groq_api_key:
58
+ raise ValueError("GROQ_API_KEY environment variable not set.")
59
+
60
+ self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
61
+
62
+ def validate_and_correct_response(self, user_prompt, original_response, data):
63
+ """
64
+ Validates and corrects the response using the secondary LLM.
65
+ """
66
+ validation_prompt = (
67
+ f"You are a professional AI model tasked with validating and correcting AI-generated texts. "
68
+ f"The original response must align strictly with the provided user prompt and input details. "
69
+ f"If the response fails to meet the requirements, generate a corrected version."
70
+ f"\n\n"
71
+ f"User prompt: {user_prompt}\n\n"
72
+ f"Details of the individual:\n"
73
+ f"- Name: {data['Name']}\n"
74
+ f"- Job Title: {data['Job Title']}\n"
75
+ f"- Organisation: {data['Organisation']}\n"
76
+ f"- Area of Interest: {data['Area of Interest']}\n"
77
+ f"- Category: {data['Category']}\n\n"
78
+ f"Original response: {original_response}\n\n"
79
+ f"Instructions:\n"
80
+ f"- If the original response aligns with the user prompt and input details, reply with 'Valid Response'.\n"
81
+ f"- Otherwise, provide a corrected version starting with 'Hello {data['Name']}'.\n"
82
+ f"- Keep it concise (less than 40 words) and brochure-suitable.\n"
83
+ )
84
+
85
+ response = self.llm.invoke(validation_prompt)
86
+ return response.content.strip()
supplement/main.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ #from query_handler import LLMHandler
4
+ from openai_llms import LLMHandler
5
+
6
+ def main():
7
+ """
8
+ Main function to process input CSV, query LLM, and save responses.
9
+ """
10
+ # Ask user for input CSV file path and user prompt
11
+ #input_csv = input("Enter the path to the input CSV file: ").strip()
12
+ input_csv = "D:\Projects\Liminal\InviteAI\Test_sample.csv"
13
+ if not os.path.exists(input_csv):
14
+ print(f"Error: File '{input_csv}' not found.")
15
+ return
16
+ user_prompt = input("Enter your user prompt: ").strip()
17
+
18
+ # Output CSV file path
19
+ output_csv = "D:\Projects\Liminal\InviteAI\Response_sample.csv"
20
+
21
+ # Check if the input file exists
22
+ if not os.path.exists(input_csv):
23
+ print(f"Error: File '{input_csv}' not found.")
24
+ return
25
+
26
+ # Initialize the LLM handler
27
+ llm_handler = LLMHandler()
28
+ #llm_handler = LLMOpenAI()
29
+
30
+ # Read the input CSV and process each instance
31
+ with open(input_csv, mode="r", newline="", encoding="utf-8") as infile:
32
+ reader = csv.DictReader(infile)
33
+ fieldnames = reader.fieldnames + ["Generated Text"]
34
+
35
+ rows = []
36
+ for row in reader:
37
+ # Generate response for the current row
38
+ try:
39
+ response = llm_handler.generate_response(user_prompt, row)
40
+ row["Generated Text"] = response
41
+ rows.append(row)
42
+ except Exception as e:
43
+ print(f"Error generating response for UID {row.get('UID')}: {e}")
44
+ row["Generated Text"] = "Error generating response"
45
+ rows.append(row)
46
+
47
+ # Save the updated rows to the output CSV
48
+ with open(output_csv, mode="w", newline="", encoding="utf-8") as outfile:
49
+ writer = csv.DictWriter(outfile, fieldnames=fieldnames)
50
+ writer.writeheader()
51
+ writer.writerows(rows)
52
+
53
+ print(f"Responses saved to '{output_csv}'.")
54
+
55
+ if __name__ == "__main__":
56
+ main()
supplement/openai_llms.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+
7
+
8
+ class LLMHandler:
9
+ def __init__(self, model_name="gpt-4o-mini"):
10
+ """
11
+ Initializes the LLMHandler with the specified OpenAI model.
12
+ """
13
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
14
+ if not self.openai_api_key:
15
+ raise ValueError("OPENAI_API_KEY environment variable not set.")
16
+
17
+ # Initialize OpenAI client
18
+ self.client = OpenAI(api_key=self.openai_api_key)
19
+ self.model_name = model_name
20
+
21
+ def generate_response(self, user_prompt, data):
22
+ """
23
+ Generate a concise response using the LLM based on user prompt and data.
24
+ :param user_prompt: Prompt provided by the user.
25
+ :param data: Dictionary containing the instance information.
26
+ :return: Generated response text.
27
+ """
28
+ # Refined prompt to handle encoding and formatting
29
+ prompt = (
30
+ f"You are a professional AI model tasked with writing personalized invite texts "
31
+ f"that are concise (less than 40 words), brochure-suitable, and tailored as per the user prompt.\n\n"
32
+ f"Consider the user prompt: {user_prompt}\n\n"
33
+ f"Details of the individual:\n"
34
+ f"- Name: {data['Name']}\n"
35
+ f"- Job Title: {data['Job Title']}\n"
36
+ f"- Organisation: {data['Organisation']}\n"
37
+ f"- Area of Interest: {data['Area of Interest']}\n"
38
+ f"- Category: {data['Category']}\n\n"
39
+ f"The response **MUST**:\n"
40
+ f"- Start with 'Hello {data['Name']}'.\n"
41
+ f"- Be concise, professional, and STRICTLY DO NOT generate invalid characters or encoding errors (e.g. 'SoraVR’s').\n"
42
+ f"- Use standard English punctuation, such as single quotes (e.g., 'can't', 'it's').\n"
43
+ f"- STRICTLY Give only one response for the Category the sample belongs to.\n"
44
+ f"- Do NOT include preambles or unnecessary text.\n\n"
45
+ f"Return the final response cleanly, without any extraneous symbols or characters."
46
+ )
47
+
48
+ # Query the OpenAI client and return the response
49
+ completion = self.client.chat.completions.create(
50
+ model=self.model_name,
51
+ messages=[
52
+ {"role": "system", "content": "You are a professional assistant."},
53
+ {"role": "user", "content": prompt},
54
+ ]
55
+ )
56
+
57
+ # Extract and clean the generated response
58
+ response = completion.choices[0].message.content.strip()
59
+
60
+ # Optional: Post-process to clean invalid characters
61
+ #response_cleaned = response.encode('utf-8').decode('utf-8', errors='ignore')
62
+
63
+ return response
supplement/openrouter_llms.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+
8
+ class LLMHandler:
9
+ def __init__(self, model_name="meta-llama/llama-3.3-70b-instruct"):
10
+ self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
11
+ if not self.openrouter_api_key:
12
+ raise ValueError("OPENROUTER_API_KEY environment variable not set.")
13
+
14
+ # Initialize OpenAI client with OpenRouter base URL and default headers
15
+ self.client = OpenAI(
16
+ base_url="https://openrouter.ai/api/v1",
17
+ api_key=self.openrouter_api_key,
18
+ default_headers={
19
+ "HTTP-Referer": "http://localhost:8501", # Local development URL
20
+ "X-Title": "Invite AI", # Application name
21
+ "x-routing-config": '{"provider": {"order": ["Together", "Avian.io", "DeepInfra", "Lambda"]}, "allow_fallbacks": false}'
22
+ }
23
+ )
24
+
25
+ self.model_name = model_name
26
+
27
+ def _make_api_call(self, messages):
28
+ """Helper method to make API calls"""
29
+ return self.client.chat.completions.create(
30
+ model=self.model_name,
31
+ messages=messages
32
+ )
33
+
34
+ def generate_questions(self, context):
35
+ """Generate questions based on the initial context provided by the user."""
36
+ prompt = f"""
37
+ Based on this context about an invitation: "{context}"
38
+
39
+ Generate questions to gather necessary information for creating a professional invitation prompt.
40
+
41
+ Generate 8-12 focused questions. Include multiple choice options where appropriate.
42
+ Questions should cover:
43
+ 1. Senders Company/Organization and role details
44
+ 2. Product/service specific details
45
+ 3. Key specifications or features
46
+ 4. Approximate length of the invite [Word count]
47
+ 5. What information from the receivers details do you want to include and influence in the invite
48
+ 6. Tone and style preferences
49
+ 7. Additional information which you would like to provide [Type N/A if you wish not to]
50
+ 8. Call to action [multiple choice] for example [ contact phone number, visit our website, visit our social media etc]
51
+ 9. In context to Call to action question, ask a followup question [Textual response] for CTA
52
+ to collect the website link/ phone number/ social media handles etc.
53
+
54
+ Return the questions in this exact JSON format:
55
+ [
56
+ {{"question": "Question 1", "choices": ["Choice 1", "Choice 2"]}},
57
+ {{"question": "Question 2"}},
58
+ {{"question": "Question 3", "choices": ["Choice 1", "Choice 2", "Choice 3"]}}
59
+ ]
60
+
61
+ For questions without multiple choice options, omit the 'choices' key.
62
+ Make choices relevant but not exhaustive, as users will have option for custom responses.
63
+ """
64
+
65
+ # Default questions to use as fallback
66
+ default_questions = [
67
+ {
68
+ "question": "What is your role in the company?",
69
+ "choices": ["CEO", "CTO", "Director", "Product Manager"]
70
+ },
71
+ {
72
+ "question": "What is your company name?",
73
+ },
74
+ {
75
+ "question": "What is the name of your product/service?",
76
+ },
77
+ {
78
+ "question": "What is the suggested Invite lenght[word count] you prefer?",
79
+ },
80
+ {
81
+ "question": "What is the key technical specification or feature?",
82
+ },
83
+ {
84
+ "question": "Can you explain in brief about what the invite is about?",
85
+ },
86
+ {
87
+ "question": "Select the preferred tone for the invitation:",
88
+ "choices": ["Professional", "Innovation-focused", "Casual", "Business & Strategic", "Friendly"]
89
+ }
90
+ ]
91
+
92
+ try:
93
+ response = self._make_api_call([{"role": "user", "content": prompt}])
94
+ response_text = response.choices[0].message.content.strip()
95
+
96
+ # Find the start and end of the JSON array
97
+ start_idx = response_text.find('[')
98
+ end_idx = response_text.rfind(']') + 1
99
+
100
+ if start_idx == -1 or end_idx == 0:
101
+ raise ValueError("Could not find JSON array in response")
102
+
103
+ json_str = response_text[start_idx:end_idx]
104
+
105
+ # Parse the JSON string
106
+ import json
107
+ questions = json.loads(json_str)
108
+
109
+ # Validate the question format
110
+ for question in questions:
111
+ if 'question' not in question:
112
+ raise ValueError("Question missing 'question' field")
113
+ if 'choices' in question and not isinstance(question['choices'], list):
114
+ raise ValueError("'choices' must be a list")
115
+
116
+ return questions
117
+
118
+ except Exception as e:
119
+ print("Using default questions as fallback")
120
+ return default_questions
121
+
122
+ def generate_final_prompt(self, context, questions, answers):
123
+ """Generate the final prompt based on context and question answers."""
124
+ formatted_answers = []
125
+ for i, question in enumerate(questions):
126
+ answer = answers[i]
127
+ formatted_answers.append(f"Q: {question['question']}\nA: {answer}")
128
+
129
+ answers_text = "\n".join(formatted_answers)
130
+ prompt = (
131
+ f"Your task is to generate a professional prompt for invitation generation by using the below context and answers: \n"
132
+ f"The initial context provided by user to generate the questions are [Context] :{context} and"
133
+ f"The questions and answers provide detail information on how the prompt has to be designed [Answers]: {answers_text}. \n"
134
+ f"Please follow the below instructions while drafting the prompt: \n"
135
+ f"1. Use the Complete Information in the context and answers. \n"
136
+ f"2. You Should draft best suitable prompt that can be used for generating personalized invites based on the information provided by user. \n"
137
+ f"3. Generate only the prompt and DO NOT include any statements like this in the beginning: \n"
138
+ f"[Here is a professional prompt for invitation generation based on the provided context and answers] \n"
139
+ f"The goal is by using this prompt, the user can obtain personalized invites to wide range of receivers work domain."
140
+ )
141
+
142
+ response = self._make_api_call([{"role": "user", "content": prompt}])
143
+ return response.choices[0].message.content.strip()
144
+
145
+ def generate_response(self, user_prompt, data):
146
+ """Generate a concise response using the LLM based on user prompt and data."""
147
+ prompt = (
148
+ f"You are a professional AI model tasked with writing personalized invite texts that are brochure-suitable "
149
+ f"and tailored to the user's request and recipient details.\n\n"
150
+ f"User Prompt: {user_prompt}\n"
151
+ f"Recipient Details: {data}\n\n"
152
+ f"**Instructions:**\n"
153
+ f"1. Start the response with an appropriate salutation, for example: 'Hello {data.get('Name', '')}' if available.\n"
154
+ f"2. Match the tone specified in the user prompt. If no tone is mentioned, use a formal tone.\n"
155
+ f"3. Write the invite within 90-100 words unless a specific length is provided.\n"
156
+ f"4. Strictly adhere to all instructions and details given in the user prompt.\n\n"
157
+ f"**Additional Guidelines:**\n"
158
+ f"1. Tailor the invite to align with the recipient's context and profession. For example:\n"
159
+ f" - If the recipient's information is unrelated to the context, provide a general formal invite highlighting key features.\n"
160
+ f" - If the recipient is closely related to the context (e.g., a GENAI engineer for an AI product), highlight specific benefits relevant to their needs.\n"
161
+ f"2. You are free to choose complete or partial recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt that would fit naturally into the invite "
162
+ #f"2. Seamlessly incorporate recipient-specific details (e.g., Job Title, Industry) mentioned in user prompt only if they fit naturally into the invite.\n"
163
+ f"3. Do not forcefully match the applications of the user product with the recipients information.\n"
164
+ #f"4. "
165
+ f"4. Avoid preambles, unnecessary symbols, or extraneous text.\n"
166
+ f"5. Return the final invite text cleanly, in concise with no demeaning language.\n\n"
167
+ f"Validate the invite to make sure it is following all the guidelines. "
168
+ #f"**Goal:** Generate personalized invites suitable for a wide range of recipients while aligning with the product or service described in the user prompt."
169
+ )
170
+
171
+ response = self._make_api_call([{"role": "user", "content": prompt}])
172
+ return response.choices[0].message.content.strip()