Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from gradio import ChatMessage | |
| #from crewai.tools import tool, BaseTool | |
| from groq import Groq | |
| import requests | |
| import uvicorn | |
| import os | |
| import mysql.connector | |
| from qdrant_client import QdrantClient | |
| from autogen import AssistantAgent, UserProxyAgent, GroupChat, GroupChatManager | |
| from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent | |
| # MySQLに接続 | |
| conn = mysql.connector.connect( | |
| host="www.ryhintl.com", | |
| user="smairuser", | |
| password="smairuser", | |
| port=36000, | |
| database="smair" | |
| ) | |
| # カーソルを取得 | |
| cursor = conn.cursor(dictionary=True) | |
| # List API Keys | |
| select_one_data_query = "SELECT * FROM agentic_apis" | |
| cursor.execute(select_one_data_query) | |
| result = cursor.fetchall() | |
| # JSONをパースしてkeyを抽出 | |
| keys = [item['key'] for item in result] | |
| os.environ["GROQ_API_KEY"] = keys[2] | |
| os.environ["COHERE_API_KEY"] = keys[3] | |
| js = """ | |
| function createGradioAnimation() { | |
| var container = document.createElement('div'); | |
| container.id = 'gradio-animation'; | |
| container.style.fontSize = '2em'; | |
| container.style.fontWeight = 'bold'; | |
| container.style.textAlign = 'center'; | |
| container.style.marginBottom = '20px'; | |
| var text = '人工知能(AI)の現状と未来'; | |
| for (var i = 0; i < text.length; i++) { | |
| (function(i){ | |
| setTimeout(function(){ | |
| var letter = document.createElement('span'); | |
| var randomColor = "#" + Math.floor(Math.random() * 16777215).toString(16); | |
| letter.style.color = randomColor; | |
| letter.style.opacity = '0'; | |
| letter.style.transition = 'opacity 0.5s'; | |
| letter.innerText = text[i]; | |
| container.appendChild(letter); | |
| setTimeout(function() { | |
| letter.style.opacity = '1'; | |
| }, 50); | |
| }, i * 250); | |
| })(i); | |
| } | |
| var gradioContainer = document.querySelector('.gradio-container'); | |
| gradioContainer.insertBefore(container, gradioContainer.firstChild); | |
| return 'Animation created'; | |
| } | |
| """ | |
| sel_method = "COHERE" | |
| def on_change(selected_option): | |
| global sel_method | |
| sel_method = selected_option | |
| return f"選択されたオプション: {selected_option}" | |
| async def autogen(qry: str): | |
| # Prompts | |
| userPrompt=qry | |
| assistant="あなたはAI会社のアナリストです。" | |
| ragproxyagent="あなたは高度な知識を持つAI会社の上席コンサルタントです。assistantによる応答を分析し、詳細情報を提供します。" | |
| global sel_method | |
| if sel_method == "COHERE": | |
| # Cohere config | |
| config_list = [ | |
| { | |
| "model": "command-r-plus-08-2024", | |
| "api_key": os.environ["COHERE_API_KEY"], | |
| "api_type": "cohere", | |
| } | |
| ] | |
| else: | |
| # GROQ config | |
| config_list = [ | |
| { | |
| "model": "llama-3.3-70b-versatile", | |
| "api_key": os.environ["GROQ_API_KEY"], | |
| "api_type": "groq", | |
| } | |
| ] | |
| assistant = AssistantAgent( | |
| name="assistant", | |
| system_message="You are a helpful assistant.", | |
| llm_config={ | |
| "timeout": 600, | |
| "cache_seed": 42, | |
| "config_list": config_list, | |
| }, | |
| ) | |
| client = QdrantClient(url="https://02cbe366-829e-43a6-adf5-3b712a886c21.us-west-1-0.aws.cloud.qdrant.io", api_key="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.hWu5_qKaYHUhuMAjUScqw1R_1kkXiidv337wuGKcu9o") | |
| # Create user proxy agent RetrieveUserProxyAgent( | |
| ragproxyagent = RetrieveUserProxyAgent( | |
| name="ragproxyagent", | |
| human_input_mode="NEVER", | |
| max_consecutive_auto_reply=3, | |
| retrieve_config = { | |
| "task": "qa", | |
| "docs_path": [ | |
| "https://www.ryhintl.com/ai-predict.md" | |
| ], | |
| "chunk_token_size": 2000, | |
| "model": config_list[0]["model"], | |
| "vector_db": "qdrant", # Qdrant Cloud database | |
| "collection_name": "ai_predict", | |
| #"db_config": { | |
| #"connection_string": "https://02cbe366-829e-43a6-adf5-3b712a886c21.us-west-1-0.aws.cloud.qdrant.io", # Qdrant Cloud connection string | |
| #"api_key": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.hWu5_qKaYHUhuMAjUScqw1R_1kkXiidv337wuGKcu9o", # Qdrant Cloud API key | |
| #"database_name": "human_database", # Qdrant Cloud database (if applicable) | |
| #"index_name": "scout_index", | |
| #"wait_until_index_ready": 120.0, # Setting to wait 120 seconds or until index is constructed before querying | |
| #"wait_until_document_ready": 120.0, # Setting to wait 120 seconds or until document is properly indexed after insertion/update | |
| #}, | |
| "db_config": {"client": client}, | |
| "get_or_create": True, # set to False if you don't want to reuse an existing collection | |
| "overwrite": True, # set to True if you want to overwrite an existing collection, each overwrite will force a index creation and reupload of documents | |
| }, | |
| code_execution_config=False, # set to False if you don't want to execute the code | |
| ) | |
| # Create a group chat between all agents | |
| groupChat = GroupChat( | |
| agents=[assistant, ragproxyagent], | |
| messages=[], | |
| max_round=3, | |
| ) | |
| groupChatManager = GroupChatManager( | |
| groupchat=groupChat, | |
| llm_config={"config_list": config_list} | |
| ) | |
| # Initiate the chat using the User Proxy Agent and pass the user prompt | |
| #chat_result = ragproxyagent.initiate_chat( | |
| #groupChatManager, | |
| #message=userPrompt | |
| #) | |
| chat_result = ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=userPrompt) | |
| #user_contents = [entry['content'] for entry in chat_result['chat_history'] if entry['role'] == 'user'] | |
| #chat_result = ragproxyagent.initiate_chat(assistant, message=userPrompt) | |
| #print(chat_result) | |
| chat_result_dict = vars(chat_result) | |
| user_contents = [entry['content'] for entry in chat_result_dict['chat_history'] if entry['role'] == 'user'] | |
| #print("user_contents") | |
| #print(user_contents) | |
| #print("\n\n") | |
| #print(user_contents[0]) | |
| #return user_contents[0] | |
| basic_content = [entry["content"] for entry in chat_result_dict["chat_history"] if entry["role"] == "assistant"] | |
| #print("basic_content") | |
| #print(basic_content) | |
| #return basic_content | |
| return user_contents[0]+"\n\n"+basic_content[0] | |
| #user_contents = [entry['content'] for entry in chat_result['chat_history'] if entry['role'] == 'user'] | |
| #basic_content = [entry["content"] for entry in chat_result_dict["chat_history"] if entry["name"] == "Basic_Assistant_Agent"] | |
| #professional_content = [entry["content"] for entry in chat_result_dict["chat_history"] if entry["name"] == "Professional_Assistant_Agent"] | |
| #professional_content = [entry['content'] for entry in chat_result['chat_history'] if entry['name'] == 'Professional_Assistant_Agent'] | |
| #summary = chat_result['summary'] | |
| #return chat_result | |
| #comments = "Basic Assitant: \n"+str(basic_content[0])+"\n\nProfessional Assistant: \n"+str(professional_content[0]) | |
| #return comments | |
| #return professional_content[0] | |
| def process_rag(prompt): | |
| if prompt == "": | |
| return "プロンプトを入力してください。", "プロンプトは必須です。" | |
| else: | |
| url = 'http://www.ryhintl.com/eprag-be/llm?query='+prompt | |
| res = requests.get(url) | |
| rtn = res.content.decode('utf-8') | |
| return rtn | |
| def process_eprag(prompt): | |
| if prompt == "": | |
| return "プロンプトを入力してください。", "プロンプトは必須です。" | |
| else: | |
| client = Groq(api_key=os.environ["GROQ_API_KEY"]) | |
| # Set the system prompt | |
| system_prompt = { | |
| "role": "system", | |
| "content": "あなたは便利なアシスタントです。質問には簡潔に答えてください。" | |
| } | |
| # Set the user prompt | |
| user_input = prompt | |
| user_prompt = { | |
| "role": "user", "content": user_input | |
| } | |
| # Initialize the chat history | |
| chat_history = [system_prompt, user_prompt] | |
| response = client.chat.completions.create( | |
| #model="llama3-70b-8192", | |
| model="llama-3.3-70b-versatile", | |
| messages=chat_history, | |
| max_tokens=1024, | |
| temperature=0) | |
| return response.choices[0].message.content | |
| with gr.Blocks(js=js,title="人材スカウトのガイドライン") as llm: | |
| with gr.Tab("LLM"): | |
| gr.Markdown("# 🗞️ LLM") | |
| gr.Text(label="人工知能(AI)の現状と未来",value="AIが未来にどうなるか心配しています。人工知能(AI)を中心とする ICT の進化はどうなるかについて教えてください。",lines=3) | |
| with gr.Row(): | |
| eprag_input = gr.Textbox(value="AIが未来にどうなるか心配しています。人工知能(AI)を中心とする ICT の進化はどうなるかについて教えてください。",lines=5,label="プロンプト", type="text") | |
| with gr.Row(): | |
| eprag_output = gr.Textbox(label="AIアシスタントの応答") | |
| submit_button = gr.Button("LLM プロセス", variant="primary") | |
| submit_button.click( | |
| process_eprag, | |
| inputs=[eprag_input], | |
| outputs=[eprag_output] | |
| ) | |
| with gr.Tab("RAG"): | |
| gr.Markdown("# 🗞️ RAG") | |
| gr.Text(label="人工知能(AI)の現状と未来",value="AIが未来にどうなるか心配しています。人工知能(AI)を中心とする ICT の進化はどうなるかについて教えてください。",lines=3) | |
| with gr.Row(): | |
| rag_input = gr.Textbox(value="AIが未来にどうなるか心配しています。人工知能(AI)を中心とする ICT の進化はどうなるかについて教えてください。",lines=5,label="プロンプト", type="text") | |
| with gr.Row(): | |
| rag_output = gr.Textbox(label="AIアシスタントの応答") | |
| submit_button = gr.Button("RAG プロセス", variant="primary") | |
| submit_button.click( | |
| process_rag, | |
| inputs=[rag_input], | |
| outputs=[rag_output] | |
| ) | |
| with gr.Tab("AGENT AUTOGEN"): | |
| gr.Markdown("# 🗞️ AGENTIC RAG AUTOGEN") | |
| gr.Text(label="人工知能(AI)の現状と未来",value="AIが未来にどうなるか心配しています。人工知能(AI)を中心とする ICT の進化はどうなるかについて教えてください。",lines=3) | |
| with gr.Row(): | |
| auto_input = gr.Textbox(value="AIが未来にどうなるか心配しています。人工知能(AI)を中心とする ICT の進化はどうなるかについて教えてください。",lines=5,label="プロンプト", type="text") | |
| with gr.Row(): | |
| method = gr.Dropdown( | |
| ["COHERE", "GROQ"], label="LLM", info="LLMを選んでください" | |
| ) | |
| selected_method = gr.Text(label="Method", visible=False) | |
| method.change(on_change, inputs=method, outputs=selected_method) | |
| with gr.Row(): | |
| auto_output = gr.Textbox(label="AIアシスタントの応答") | |
| submit_button = gr.Button("AUTOGEN プロセス", variant="primary") | |
| submit_button.click( | |
| autogen, | |
| inputs=[auto_input], | |
| outputs=[auto_output] | |
| ) | |
| if __name__ == "__main__": | |
| llm.launch() |