| | import os |
| | import gradio as gr |
| | from colorama import init, Fore, Style |
| | import logging |
| | from Self_Improving_Search import EnhancedSelfImprovingSearch |
| | from llm_config import get_llm_config |
| | from llm_response_parser import UltimateLLMResponseParser |
| | from llm_wrapper import LLMWrapper |
| |
|
| | |
| | init() |
| |
|
| | |
| | log_directory = 'logs' |
| | if not os.path.exists(log_directory): |
| | os.makedirs(log_directory) |
| | logger = logging.getLogger(__name__) |
| | logger.setLevel(logging.INFO) |
| | log_file = os.path.join(log_directory, 'web_llm.log') |
| | file_handler = logging.FileHandler(log_file) |
| | formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') |
| | file_handler.setFormatter(formatter) |
| | logger.addHandler(file_handler) |
| |
|
| | |
| | parser = UltimateLLMResponseParser() |
| | SYSTEM_PROMPT = """You are an AI assistant capable of web searching and providing informative responses. |
| | When a user's query starts with '/', interpret it as a request to search the web and formulate an appropriate search query. |
| | ALWAYS follow the prompts provided throughout the searching process EXACTLY as indicated. |
| | NEVER assume new instructions for anywhere other than directly when prompted directly. DO NOT SELF PROMPT OR PROVIDE MULTIPLE ANSWERS OR ATTEMPT MULTIPLE RESPONSES FOR ONE PROMPT! |
| | """ |
| |
|
| | def initialize_llm(): |
| | try: |
| | print(Fore.YELLOW + "Initializing LLM..." + Style.RESET_ALL) |
| | llm_wrapper = LLMWrapper() |
| | print(Fore.GREEN + "LLM initialized successfully." + Style.RESET_ALL) |
| | return llm_wrapper |
| | except Exception as e: |
| | logger.error(f"Error initializing LLM: {str(e)}", exc_info=True) |
| | return None |
| |
|
| | def get_llm_response(llm, prompt): |
| | try: |
| | full_prompt = f"{SYSTEM_PROMPT}\n\nUser: {prompt}\nAssistant:" |
| | llm_config = get_llm_config() |
| | generate_kwargs = { |
| | 'max_tokens': llm_config.get('max_tokens', 1024), |
| | 'stop': llm_config.get('stop', None), |
| | 'temperature': llm_config.get('temperature', 0.7), |
| | 'top_p': llm_config.get('top_p', 1.0), |
| | 'top_k': llm_config.get('top_k', 0), |
| | 'repeat_penalty': llm_config.get('repeat_penalty', 1.0), |
| | } |
| | response_text = llm.generate(full_prompt, **generate_kwargs) |
| | return response_text |
| | except Exception as e: |
| | logger.error(f"Error getting LLM response: {str(e)}", exc_info=True) |
| | return f"Sorry, I encountered an error while processing your request. Please check the log file for details." |
| |
|
| | def handle_user_input(user_input, history): |
| | if user_input.lower().strip() == 'quit': |
| | return "Goodbye!", history |
| | |
| | |
| | if not hasattr(handle_user_input, "llm"): |
| | handle_user_input.llm = initialize_llm() |
| | if handle_user_input.llm is None: |
| | return "Failed to initialize LLM.", history |
| |
|
| | if user_input.startswith('/'): |
| | search_query = user_input[1:].strip() |
| | search = EnhancedSelfImprovingSearch(llm=handle_user_input.llm, parser=parser) |
| | try: |
| | answer = search.search_and_improve(search_query) |
| | history.append((user_input, answer)) |
| | return answer, history |
| | except Exception as e: |
| | logger.error(f"Error during web search: {str(e)}", exc_info=True) |
| | return "I encountered an error while performing the web search.", history |
| | else: |
| | response = get_llm_response(handle_user_input.llm, user_input) |
| | history.append((user_input, response)) |
| | return response, history |
| |
|
| | |
| | with gr.Blocks() as demo: |
| | gr.Markdown(""" |
| | # 🌐 Web-LLM Assistant 🤖 |
| | Welcome to the Web-LLM Assistant! This chatbot can respond to your queries and perform web searches when prompted with a `/`. |
| | - For normal interaction, type your message and press Enter. |
| | - To request a web search, start your message with `/`. Example: `/latest news on AI advancements` |
| | - Type `quit` to exit. |
| | """) |
| | |
| | chatbot = gr.Chatbot(label="Web-LLM Assistant") |
| | user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...") |
| | submit_button = gr.Button("Submit") |
| | clear_button = gr.Button("Clear Chat") |
| |
|
| | state = gr.State([]) |
| |
|
| | def update_chat(user_message, history): |
| | bot_response, updated_history = handle_user_input(user_message, history) |
| | return updated_history, updated_history, "" |
| |
|
| | submit_button.click( |
| | update_chat, |
| | inputs=[user_input, state], |
| | outputs=[chatbot, state, user_input] |
| | ) |
| |
|
| | clear_button.click( |
| | lambda: ([], []), |
| | outputs=[chatbot, state] |
| | ) |
| |
|
| | |
| | demo.launch() |