elaineaishophouse commited on
Commit
3c9d57d
·
verified ·
1 Parent(s): c9e272a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -26
app.py CHANGED
@@ -9,16 +9,15 @@ sys.path.append(os.path.abspath('common'))
9
 
10
  from crewai import Crew, Task, Process
11
  from RespondentAgent import *
12
- from langchain_openai import ChatOpenAI
13
- from langchain_groq import ChatGroq
14
-
15
  # Configure logging
16
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
17
 
18
  # Global tracker for the last active agent
19
  last_active_agent = None # Initially, no agent is selected
20
 
21
- def parse_question_with_llm(question, respondent_names, openai_llm):
22
  """
23
  Uses OpenAI's LLM to extract the specific agents being addressed and their respective questions.
24
  Supports compound requests.
@@ -50,7 +49,7 @@ def parse_question_with_llm(question, respondent_names, openai_llm):
50
  """
51
 
52
  # Invoke LangChain LLM
53
- response = openai_llm.invoke(prompt)
54
  chatgpt_output = response.content.strip()
55
  logging.info(f"LLM Parsed Output: {chatgpt_output}")
56
 
@@ -70,7 +69,7 @@ def parse_question_with_llm(question, respondent_names, openai_llm):
70
 
71
  return parsed_questions
72
 
73
- def ask_interview_question(respondent_agents_dict, question, openai_llm):
74
  """
75
  Handles both individual and group interview questions while tracking conversation flow.
76
  Uses OpenAI's LLM to extract the intended respondent(s) and their specific question(s).
@@ -85,7 +84,7 @@ def ask_interview_question(respondent_agents_dict, question, openai_llm):
85
  print(f"Available respondents: {agent_names}")
86
 
87
  # Use OpenAI LLM to parse question into individual respondent-specific sub-questions
88
- parsed_questions = parse_question_with_llm(question, str(agent_names), openai_llm)
89
 
90
  if not parsed_questions:
91
  logging.warning("No parsed questions returned. Exiting function.")
@@ -211,23 +210,8 @@ if __name__ == "__main__":
211
  Config.load_environment(".", "genz.dev1")
212
  Config.print_environment()
213
 
214
- # Initialize OpenAI LLM for parsing
215
- openai_llm = ChatOpenAI(
216
- temperature=0,
217
- api_key=Config.openai_api_key,
218
- model=Config.model,
219
- max_tokens=3000,
220
- top_p=0.1,
221
- frequency_penalty=0,
222
- presence_penalty=-0.5
223
- )
224
-
225
- # Set up Groq LLM for response generation
226
- fact_based_llm = ChatGroq(
227
- groq_api_key=Config.groq_api_key,
228
- model_name=Config.agent_model,
229
- temperature=0.1, # Low temperature for deterministic output
230
- )
231
 
232
  # Load all user profiles from the Excel file
233
  data_dictionary = DataDictionary.generate_dictionary(Config.data_dictionary_file)
@@ -237,7 +221,7 @@ if __name__ == "__main__":
237
  # Create respondent agents for all profiles
238
  respondent_agents_dict = {
239
  profile.get_field("Demographics", "Name"): RespondentAgent.create(
240
- profile, f"{Config.config_dir}/fastfacts/{profile.ID}_fast_facts.xlsx", fact_based_llm
241
  )
242
  for profile in respondent_agent_user_profiles[:5]
243
  }
@@ -249,7 +233,7 @@ if __name__ == "__main__":
249
  if history is None:
250
  history = [] # Ensure history is initialized
251
 
252
- responses = ask_interview_question(respondent_agents_dict, message, openai_llm)
253
  logging.info(f"Interview response is {responses}")
254
 
255
  # Ensure responses is always a list
 
9
 
10
  from crewai import Crew, Task, Process
11
  from RespondentAgent import *
12
+ from LLMConfig import *
13
+
 
14
  # Configure logging
15
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
16
 
17
  # Global tracker for the last active agent
18
  last_active_agent = None # Initially, no agent is selected
19
 
20
+ def parse_question_with_llm(question, respondent_names, processor_llm):
21
  """
22
  Uses OpenAI's LLM to extract the specific agents being addressed and their respective questions.
23
  Supports compound requests.
 
49
  """
50
 
51
  # Invoke LangChain LLM
52
+ response = processor_llm.invoke(prompt)
53
  chatgpt_output = response.content.strip()
54
  logging.info(f"LLM Parsed Output: {chatgpt_output}")
55
 
 
69
 
70
  return parsed_questions
71
 
72
+ def ask_interview_question(respondent_agents_dict, question, processor_llm):
73
  """
74
  Handles both individual and group interview questions while tracking conversation flow.
75
  Uses OpenAI's LLM to extract the intended respondent(s) and their specific question(s).
 
84
  print(f"Available respondents: {agent_names}")
85
 
86
  # Use OpenAI LLM to parse question into individual respondent-specific sub-questions
87
+ parsed_questions = parse_question_with_llm(question, str(agent_names), processor_llm)
88
 
89
  if not parsed_questions:
90
  logging.warning("No parsed questions returned. Exiting function.")
 
210
  Config.load_environment(".", "genz.dev1")
211
  Config.print_environment()
212
 
213
+ processor_llm = get_processor_llm_instance()
214
+ respondent_agent_llm = get_respondent_agent_llm_instance()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
  # Load all user profiles from the Excel file
217
  data_dictionary = DataDictionary.generate_dictionary(Config.data_dictionary_file)
 
221
  # Create respondent agents for all profiles
222
  respondent_agents_dict = {
223
  profile.get_field("Demographics", "Name"): RespondentAgent.create(
224
+ profile, f"{Config.config_dir}/fastfacts/{profile.ID}_fast_facts.xlsx", respondent_agent_llm
225
  )
226
  for profile in respondent_agent_user_profiles[:5]
227
  }
 
233
  if history is None:
234
  history = [] # Ensure history is initialized
235
 
236
+ responses = ask_interview_question(respondent_agents_dict, message, processor_llm)
237
  logging.info(f"Interview response is {responses}")
238
 
239
  # Ensure responses is always a list