elaineaishophouse commited on
Commit
97b46ba
·
verified ·
1 Parent(s): 9afdde3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +396 -60
app.py CHANGED
@@ -1,64 +1,400 @@
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import traceback
3
+ import json
4
+ import logging
5
  import gradio as gr
6
+ from crewai import Crew, Task, Process
7
+ from common.RespondentAgent import *
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain_groq import ChatGroq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ # Configure logging
12
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
13
 
14
+ # Global tracker for the last active agent
15
+ last_active_agent = None # Initially, no agent is selected
16
+
17
+ def parse_question_with_llm(question, respondent_names, openai_llm):
18
+ """
19
+ Uses OpenAI's LLM to extract the specific agents being addressed and their respective questions.
20
+ Supports compound requests.
21
+ """
22
+ logging.info(f"Parsing question with LLM: {question}")
23
+
24
+ prompt = f"""
25
+ You are an expert in market research interview analysis.
26
+ Your task is to **identify respondents** mentioned in the question and **extract the exact question** posed to them.
27
+
28
+ ### User Input:
29
+ {question}
30
+
31
+ ### Instructions:
32
+ 1. Identify **each respondent being addressed**.
33
+ The respondents available are {respondent_names}. If these names are mistyped, then ensure that you match the names to the ones available.
34
+ 2. Extract the **exact question** posed to each respondent.
35
+ 3. Ensure extracted questions **match the original structure**.
36
+ 4. If no respondent is explicitly addressed, return "General" as the respondent name.
37
+ 5. If the question is posed to all respondents, return "All" as the respondent name.
38
+ 6. Ensure that you follow the formatting rules exactly. THIS IS EXTREMELY IMPORTANT.
39
+
40
+ ### Formatting Rules:
41
+ Provide the output in the following structured format:
42
+ - Respondent: <Respondent Name>
43
+ Question: <Extracted Question>
44
+
45
+ Only return the formatted output without explanations.
46
+ """
47
+
48
+ # Invoke LangChain LLM
49
+ response = openai_llm.invoke(prompt)
50
+ chatgpt_output = response.content.strip()
51
+ logging.info(f"LLM Parsed Output: {chatgpt_output}")
52
+
53
+ parsed_questions = {}
54
+ respondent_name = "General"
55
+ question_text = None
56
+
57
+ for line in chatgpt_output.split("\n"):
58
+ if "- Respondent:" in line:
59
+ respondent_name = re.sub(r"^.*Respondent:\s*", "", line).strip().capitalize()
60
+ elif "Question:" in line:
61
+ question_text = re.sub(r"^.*Question:\s*", "", line).strip()
62
+ if respondent_name and question_text:
63
+ parsed_questions[respondent_name] = question_text
64
+ respondent_name = "General"
65
+ question_text = None
66
+
67
+ return parsed_questions
68
+
69
+ def ask_interview_question(respondent_agents_dict, question, openai_llm):
70
+ """
71
+ Handles both individual and group interview questions while tracking conversation flow.
72
+ Uses OpenAI's LLM to extract the intended respondent(s) and their specific question(s).
73
+ Uses Groq's LLM for response generation.
74
+ """
75
+ global last_active_agent # Track last responding agent
76
+
77
+ logging.info(f"Received question: {question}")
78
+
79
+ agent_names = list(respondent_agents_dict.keys())
80
+ logging.info(f"Available respondents: {agent_names}")
81
+ print(f"Available respondents: {agent_names}")
82
+
83
+ # Use OpenAI LLM to parse question into individual respondent-specific sub-questions
84
+ parsed_questions = parse_question_with_llm(question, str(agent_names), openai_llm)
85
+
86
+ if not parsed_questions:
87
+ logging.warning("No parsed questions returned. Exiting function.")
88
+ return "**PreData Moderator**:⚠️ No valid respondents were detected for this question."
89
+ elif len(parsed_questions) > 1:
90
+ logging.warning("More than one respondent specified. Exiting function.")
91
+ return "**PreData Moderator**: Please ask each respondent one question at a time."
92
+ else:
93
+ print(f"Parsed questions are: {parsed_questions}")
94
+
95
+ if "General" in parsed_questions:
96
+ if last_active_agent:
97
+ logging.info(f"General case detected. Continuing with last active agent: {last_active_agent}")
98
+ parsed_questions = {name: question for name in last_active_agent}
99
+ else:
100
+ logging.info("General case detected without a previous active agent. Assigning question to all respondents.")
101
+ parsed_questions = {name: question for name in agent_names}
102
+ elif "All" in parsed_questions:
103
+ logging.info("All case detected. Assigning question to all respondents.")
104
+ parsed_questions = {name: question for name in agent_names}
105
+
106
+ last_active_agent = list(parsed_questions.keys())
107
+ logging.info(f"Final parsed questions: {parsed_questions}")
108
+
109
+ # Construct one crew and task for each agent and question
110
+ responses = []
111
+
112
+ for agent_name, agent_question in parsed_questions.items():
113
+ if agent_name not in respondent_agents_dict:
114
+ logging.warning(f"No valid respondent found for {agent_name}. Skipping.")
115
+ responses.append(f"**PreData Moderator**: {agent_name} is not a valid respondent.")
116
+ continue
117
+
118
+ respondent_agent = respondent_agents_dict[agent_name].get_agent()
119
+ user_profile = respondent_agents_dict[agent_name].get_user_profile()
120
+
121
+ communication_style = user_profile.get_field("Communication", "Style")
122
+
123
+ question_task_description = f"""
124
+ You are responding to a market research interview question. Your response must strictly follow the **style and tone** outlined below.
125
+ ---
126
+ ### **Style and Tone Reference:**
127
+ {communication_style}
128
+ ---
129
+ ### **How to Answer:**
130
+ - Your response should be **natural, authentic, and fully aligned** with the specified style and tone.
131
+ - Ensure the answer is **clear, engaging, and directly relevant** to the question.
132
+ - Adapt your **sentence structure, phrasing, and word choices** to match the intended communication style.
133
+ - If applicable, incorporate **culturally relevant expressions, regional nuances, or industry-specific terminology** that fit the given tone.
134
+ - **Adjust response length** based on the tone—**concise and direct** for casual styles, **structured and detailed** for professional styles.
135
+ ---
136
+ ### **Guidelines for Ensuring Authenticity & Alignment:**
137
+ - **Consistency**: Maintain the same tone throughout the response.
138
+ - **Authenticity**: The response should feel natural and match the speaker’s persona.
139
+ - **Avoid Overgeneralisation**: Ensure responses are specific and not overly generic or robotic.
140
+ - **Cultural & Linguistic Relevance**: Adapt language and references to match the speaker’s background, industry, or region where appropriate.
141
+ - **British Spelling & Grammar**: Always use British spelling conventions.
142
+ - **Correct:** organised, prioritise, realise, behaviour, centre
143
+ - **Incorrect:** organized, prioritize, realize, behavior, center
144
+ - **Formatting**:
145
+ - If the tone is informal, allow a conversational flow that mirrors natural speech.
146
+ - If the tone is formal, use a structured and professional format.
147
+ - **Do not include emojis or hashtags in the response.**
148
+ ---
149
+ ### **Example Responses (for Different Styles & Tones)**
150
+
151
+ #### **Casual & Conversational Tone**
152
+ **Question:** "How do you stay updated on the latest fashion and tech trends?"
153
+ **Correct Response:**
154
+ "I keep up with trends by following influencers on Instagram and watching product reviews on YouTube. Brands like Noise and Boat always drop stylish, affordable options, so I make sure to stay ahead of the curve."
155
+
156
+ #### **Formal & Professional Tone**
157
+ **Question:** "How do you stay updated on the latest fashion and tech trends?"
158
+ **Correct Response:**
159
+ "I actively follow industry trends by reading reports, attending webinars, and engaging with thought leaders on LinkedIn. I also keep up with global fashion and technology updates through leading publications such as *The Business of Fashion* and *TechCrunch*."
160
+ ---
161
+ Your final answer should be **a well-structured response that directly answers the question while maintaining the specified style and tone**:
162
+ **"{question}"**
163
+ """
164
+
165
+ question_task_expected_output = f"""
166
+ A culturally authentic and conversational response to the question: '{question}'.
167
+ - The response must reflect the respondent's **local cultural background and geographic influences**, ensuring it aligns with their **speech patterns, preferences, and linguistic style**.
168
+ - The language must follow **British spelling conventions**, ensuring it is **natural, personal, and free-flowing**, avoiding American spelling and overly Westernised phrasing.
169
+ - The response **must not introduce the respondent**, nor include placeholders like "[Your Name]" or "[Brand Name]".
170
+ - The final output should be a **single, well-structured paragraph that directly answers the question** while staying fully aligned with the specified communication style.
171
+ """
172
+
173
+ question_task = Task(
174
+ description=question_task_description,
175
+ expected_output=question_task_expected_output,
176
+ agent=respondent_agent
177
+ )
178
+
179
+ logging.info(f"Executing task for {agent_name}")
180
+
181
+ # Create a new crew for each agent-question pair
182
+ crew = Crew(
183
+ agents=[respondent_agent],
184
+ tasks=[question_task],
185
+ process=Process.sequential
186
+ )
187
+
188
+ try:
189
+ crew_output = crew.kickoff()
190
+ task_output = question_task.output
191
+
192
+ # Collect and format the response
193
+ if task_output.raw:
194
+ responses.append(f"**{agent_name}**: {task_output.raw}")
195
+ else:
196
+ responses.append(f"**{agent_name}**: I wasn't able to answer right now - can you try again?")
197
+
198
+ except Exception as e:
199
+ logging.error(f"Error during execution for {agent_name}:", exc_info=True)
200
+ responses.append(f"**PreData Moderator**: An error occurred while processing {agent_name}'s response. Please try again.")
201
+
202
+ return responses
203
+
204
+
205
+ # MAIN
206
  if __name__ == "__main__":
207
+ Config.load_environment(".", "urban-male-panel")
208
+ Config.print_environment()
209
+
210
+ # Initialize OpenAI LLM for parsing
211
+ openai_llm = ChatOpenAI(
212
+ temperature=0,
213
+ api_key=Config.openai_api_key,
214
+ model=Config.model,
215
+ max_tokens=3000,
216
+ top_p=0.1,
217
+ frequency_penalty=0,
218
+ presence_penalty=-0.5
219
+ )
220
+
221
+ # Set up Groq LLM for response generation
222
+ fact_based_llm = ChatGroq(
223
+ groq_api_key=Config.groq_api_key,
224
+ model_name=Config.agent_model,
225
+ temperature=0.1, # Low temperature for deterministic output
226
+ )
227
+
228
+ # Load all user profiles from the Excel file
229
+ data_dictionary = DataDictionary.generate_dictionary(Config.data_dictionary_file)
230
+ print(f"Generated data dictionary: {data_dictionary}")
231
+ respondent_agent_user_profiles = UserProfile.read_user_profiles_from_excel(Config.respondent_details_file, data_dictionary)
232
+
233
+ # Create respondent agents for all profiles
234
+ respondent_agents_dict = {
235
+ profile.get_field("Demographics", "Name"): RespondentAgent.create(
236
+ profile, f"{Config.config_dir}/fastfacts/{profile.ID}_fast_facts.xlsx", fact_based_llm
237
+ )
238
+ for profile in respondent_agent_user_profiles[:5]
239
+ }
240
+
241
+ def chatbot_interface(message, history=None):
242
+ """
243
+ Handles chatbot interaction. Can be used both in Gradio and from MAIN.
244
+ """
245
+ if history is None:
246
+ history = [] # Ensure history is initialized
247
+
248
+ responses = ask_interview_question(respondent_agents_dict, message, openai_llm)
249
+ logging.info(f"Interview response is {responses}")
250
+
251
+ # Ensure responses is always a list
252
+ if not isinstance(responses, list):
253
+ responses = [responses] # Wrap single response in a list
254
+
255
+ # Format each response properly
256
+ formatted_responses = []
257
+ for r in responses:
258
+ formatted_responses.append({"role": "assistant", "content": str(r)})
259
+
260
+ # Append user message and formatted responses to history
261
+ history.append({"role": "user", "content": message})
262
+ history.extend(formatted_responses) # Add each response separately
263
+
264
+ logging.info(f"Return history: {history}")
265
+ return history, ""
266
+
267
+ custom_css = """
268
+ body {
269
+ background-color: A9A9A9;
270
+ font-family: 'Arial', sans-serif;
271
+ color: #ffffff;
272
+ }
273
+
274
+ .gradio-container {
275
+ background: A9A9A9;
276
+ border-radius: 10px;
277
+ padding: 20px;
278
+ color: #ffffff;
279
+ }
280
+
281
+ /* Center-justify welcome and bio sections */
282
+ .welcome-section, .bio-section {
283
+ color: #ffffff;
284
+ background-color: transparent;
285
+ font-size: 16px;
286
+ text-align: center; /* Center the text */
287
+ display: block !important; /* Use flexbox for centering within the row */
288
+ justify-content: center; /* Center horizontally */
289
+ align-items: center; /* Center vertically (if needed) */
290
+ flex-direction: column; /* Arrange items vertically */
291
+ }
292
+
293
+ .welcome-section > *, .bio-section > * { /* Center markdown elements within flex container */
294
+ text-align: center;
295
+ }
296
+
297
+ #logo {
298
+ background-color: #000000;
299
+ }
300
+
301
+ .logo-row {
302
+ display: flex;
303
+ justify-content: flex-start;
304
+ align-items: flex-start;
305
+ margin: 0;
306
+ padding: 0;
307
+ }
308
+
309
+ .logo-row > * { /* Remove margins from the image itself */
310
+ margin: 0;
311
+ }
312
+
313
+ """
314
+
315
+ with gr.Blocks(css=custom_css) as demo:
316
+ # Top section with the logo and welcome message
317
+ with gr.Row(elem_classes="logo-row"): # Logo at the top
318
+ gr.Image("predata-logo.png", height=200, show_label=False, elem_id="logo")
319
+
320
+ with gr.Row(elem_classes="welcome-section"): # Welcome section below the logo
321
+ gr.Markdown("""
322
+ # Welcome to PreData.AI's Market Research Panel [Demo]
323
+
324
+ ## Introducing our AI-powered panel:
325
+ - This panel consists of **five AI-powered agents** who will answer your questions.
326
+ - Ask them a question by **typing into the input box below**.
327
+ - Feel free to address a question to a **specific panellist**.
328
+ - If no panellist is specified, all of the panellists will respond.
329
+ """)
330
+
331
+ # Main row with two columns: bio-section and chatbot
332
+ with gr.Row():
333
+ # Left column for the bio-section
334
+ with gr.Column(scale=1, elem_classes="bio-section"):
335
+ gr.HTML("""
336
+ <div style="text-align: left;">
337
+ <h2>Our Panellists</h2>
338
+ <i>Click on our panellist's name so they can introduce themselves</i>
339
+ <p><a href="#" onclick="showModal(event, 'PanellistOne')"><b>Name1</b></a> – <description one></p>
340
+ <p><a href="#" onclick="showModal(event, 'PanellistTwo')"><b>Name2</b></a> – <description two></p>
341
+ <p><a href="#" onclick="showModal(event, 'PanellistThree')"><b>Name3</b></a> – <description three></p>
342
+ </div>
343
+
344
+ <!-- Modal HTML -->
345
+ <div id="bioModal" style="display: none; position: fixed; top: 50%; left: 50%; transform: translate(-50%, -50%);
346
+ background-color: white; border: 1px solid #ccc; box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.2); width: 400px; max-height: 300px; padding: 20px; text-align: left; z-index: 1000; overflow-y: auto;">
347
+ <p id="bioContent"></p>
348
+ <button onclick="closeModal()" style="margin-top: 10px; padding: 5px 10px; background-color: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer;">Close</button>
349
+ </div>
350
+ <div id="modalBackdrop" style="display: none; position: fixed; top: 0; left: 0; width: 100%; height: 100%; background-color: rgba(0, 0, 0, 0.5); z-index: 999;" onclick="closeModal()"></div>
351
+
352
+ <script>
353
+ function showModal(event, name) {
354
+ event.preventDefault(); // Prevent default link behavior
355
+
356
+ let bios = {
357
+ "PanellistOne": "<short bio one>",
358
+
359
+ "PanellistTwo": "<short bio two>",
360
+
361
+ "PanellistThree": "<short bio three>",
362
+ };
363
+
364
+ // Ensure the bio exists before displaying it
365
+ if (bios[name]) {
366
+ document.getElementById('bioContent').innerText = bios[name];
367
+ } else {
368
+ document.getElementById('bioContent').innerText = "Bio not found.";
369
+ }
370
+
371
+ // Show the modal and backdrop
372
+ document.getElementById('bioModal').style.display = 'block';
373
+ document.getElementById('modalBackdrop').style.display = 'block';
374
+ }
375
+
376
+ function closeModal() {
377
+ document.getElementById('bioModal').style.display = 'none';
378
+ document.getElementById('modalBackdrop').style.display = 'none';
379
+ }
380
+ </script>
381
+
382
+ """)
383
+
384
+ # Right column for chatbot and textbox
385
+ with gr.Column(scale=3): # Wider column for the chatbot
386
+ chatbot = gr.Chatbot(label="Panel Discussion", height=400, type="messages")
387
+
388
+ msg = gr.Textbox(placeholder="Ask your question to the panel here...")
389
+ msg.submit(chatbot_interface, [msg, chatbot], [chatbot, msg])
390
+
391
+ with gr.Row(elem_classes="footer-row"):
392
+ gr.Markdown("""
393
+ <div style="text-align: center; margin-top: 20px; font-size: 14px;">
394
+ © 2025 PreData.AI - All rights reserved. | Contact us at
395
+ <a href="mailto:hello@predata.ai" onclick="event.stopPropagation();"
396
+ style="text-decoration: none; color: #007bff;">hello@predata.ai</a>
397
+ </div>
398
+ """)
399
+
400
+ demo.launch(debug=True)