File size: 2,146 Bytes
408c946
 
 
 
 
 
29dc222
4359d28
 
 
 
ceb70c7
 
4359d28
408c946
4359d28
408c946
 
 
 
 
 
 
4359d28
408c946
 
 
 
 
4359d28
408c946
ceb70c7
4359d28
408c946
 
 
4359d28
 
408c946
 
 
 
 
 
 
 
 
 
 
 
 
 
bc87248
408c946
 
cedce85
408c946
cedce85
408c946
cedce85
408c946
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
# SPDX-License-Identifier: Apache-2.0
#

import traceback
from config import MODEL
from ..core.web_configuration import WebConfiguration
from ..engine.browser_engine import BrowserEngine
from ..tools.tool_manager import local_tools
from ..client.openai_client import setup_client
from .response.setup import setup_response
from .response.generator import generate_response
from .tools.interaction import tools_setup

def searchgpt_playground(user_message, chat_history):
    if not isinstance(user_message, str) or not user_message.strip():
        yield []
        return

    output_content = ""
    
    try:
        server, client_initialization_error = setup_client()
        if client_initialization_error:
            output_content = client_initialization_error
            yield output_content
            return

        conversation_messages = setup_response(chat_history, user_message)
        tool_response = ""
        
        for tool_update in tools_setup(
            server=server,
            model_name=MODEL,
            conversation_messages=conversation_messages,
            tool_definitions=local_tools(),
            search_engine=BrowserEngine(WebConfiguration())
        ):
            if isinstance(tool_update, str):
                tool_response = tool_update
                yield tool_response
            else:
                conversation_messages = tool_update[0]
                tool_response = tool_update[1]

        if tool_response:
            yield tool_response + "\n\n"

        final_response_generator = generate_response(
            server=server,
            model_name=MODEL,
            conversation_messages=conversation_messages
        )
        
        for final_response in final_response_generator:
            if tool_response:
                yield tool_response + "\n\n" + final_response
            else:
                yield final_response

    except Exception as processing_error:
        output_content += f"\nError: {str(processing_error)}\n"
        output_content += traceback.format_exc()
        yield output_content