fudii0921 commited on
Commit
478a7a9
·
verified ·
1 Parent(s): f00d449

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -15
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import gradio as gr
2
  from gradio import ChatMessage
3
- from crewai.tools import tool, BaseTool
4
- from crewai_tools import CodeInterpreterTool
5
- from groq import Groq
6
  import requests
7
  import uvicorn
8
  #from fastapi.middleware.cors import CORSMiddleware
@@ -14,8 +13,8 @@ import mysql.connector
14
 
15
  from autogen import AssistantAgent, UserProxyAgent, GroupChat, GroupChatManager
16
  from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
17
- from pathlib import Path
18
- from typing import Annotated
19
 
20
  # MySQLに接続
21
  conn = mysql.connector.connect(
@@ -78,8 +77,8 @@ function createGradioAnimation() {
78
  async def autogen(qry: str):
79
  # Prompts
80
  userPrompt=qry
81
- basicAssistantAgentPrompt="あなたはヘッドハンティング会社のアナリストです。"
82
- professionalAssistantAgentPrompt="あなたは高度な知識を持つヘッドハンティング会社の上席コンサルタントです。Basic_Assistant_Agent による応答を分析し、詳細情報を提供します。"
83
 
84
  # Cohere config
85
  config_list = [
@@ -130,7 +129,6 @@ async def autogen(qry: str):
130
 
131
  # Create a group chat between all agents
132
  groupChat = GroupChat(
133
- #agents=[ragproxyagent, atlas_assistant_agent, basic_assistant_agent, professional_assistant_agent],
134
  agents=[assistant, ragproxyagent],
135
  messages=[],
136
  max_round=3,
@@ -146,17 +144,26 @@ async def autogen(qry: str):
146
  #groupChatManager,
147
  #message=userPrompt
148
  #)
149
- chat_result = ragproxyagent.initiate_chat(assistant, message=userPrompt)
 
 
 
 
150
 
151
  chat_result_dict = vars(chat_result)
152
  user_contents = [entry['content'] for entry in chat_result_dict['chat_history'] if entry['role'] == 'user']
153
- #print(chat_result_dict)
154
- print(user_contents[0])
155
- return user_contents[0]
 
 
 
156
 
157
- #basic_content = [entry["content"] for entry in chat_result_dict["chat_history"] if entry["name"] == "assistant"]
 
158
  #print(basic_content)
159
  #return basic_content
 
160
 
161
 
162
  #user_contents = [entry['content'] for entry in chat_result['chat_history'] if entry['role'] == 'user']
@@ -272,5 +279,5 @@ with gr.Blocks(js=js,title="人材スカウトのガイドライン") as llm:
272
  outputs=[auto_output]
273
  )
274
 
275
- if __name__ == "__main__":
276
- llm.launch()
 
1
  import gradio as gr
2
  from gradio import ChatMessage
3
+ #from crewai.tools import tool, BaseTool
4
+ #om groq import Groq
 
5
  import requests
6
  import uvicorn
7
  #from fastapi.middleware.cors import CORSMiddleware
 
13
 
14
  from autogen import AssistantAgent, UserProxyAgent, GroupChat, GroupChatManager
15
  from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
16
+ #from pathlib import Path
17
+ #from typing import Annotated
18
 
19
  # MySQLに接続
20
  conn = mysql.connector.connect(
 
77
  async def autogen(qry: str):
78
  # Prompts
79
  userPrompt=qry
80
+ assistant="あなたはヘッドハンティング会社のアナリストです。"
81
+ ragproxyagent="あなたは高度な知識を持つヘッドハンティング会社の上席コンサルタントです。assistantによる応答を分析し、詳細情報を提供します。"
82
 
83
  # Cohere config
84
  config_list = [
 
129
 
130
  # Create a group chat between all agents
131
  groupChat = GroupChat(
 
132
  agents=[assistant, ragproxyagent],
133
  messages=[],
134
  max_round=3,
 
144
  #groupChatManager,
145
  #message=userPrompt
146
  #)
147
+ chat_result = ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=userPrompt)
148
+ #user_contents = [entry['content'] for entry in chat_result['chat_history'] if entry['role'] == 'user']
149
+
150
+ #chat_result = ragproxyagent.initiate_chat(assistant, message=userPrompt)
151
+ #print(chat_result)
152
 
153
  chat_result_dict = vars(chat_result)
154
  user_contents = [entry['content'] for entry in chat_result_dict['chat_history'] if entry['role'] == 'user']
155
+ #print("user_contents")
156
+ #print(user_contents)
157
+ #print("\n\n")
158
+
159
+ #print(user_contents[0])
160
+ #return user_contents[0]
161
 
162
+ basic_content = [entry["content"] for entry in chat_result_dict["chat_history"] if entry["role"] == "assistant"]
163
+ #print("basic_content")
164
  #print(basic_content)
165
  #return basic_content
166
+ return user_contents[0]+"\n\n"+basic_content[0]
167
 
168
 
169
  #user_contents = [entry['content'] for entry in chat_result['chat_history'] if entry['role'] == 'user']
 
279
  outputs=[auto_output]
280
  )
281
 
282
+ if __name__ == "__main__":
283
+ llm.launch()