Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| import gradio as gr | |
| import datetime | |
| import pytz | |
| import logging | |
| from openai import OpenAI, OpenAIError | |
| from threading import Lock | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Set up OpenAI authentication | |
| def setup_openai_auth(): | |
| api_key = os.environ.get("LLM_API_KEY") | |
| baseurl = os.environ.get("LLM_API_BASE_URL") | |
| if not api_key: | |
| raise Exception("LLM API authentication failed. Please set your LLM_API_KEY environment variable.") | |
| client = OpenAI(api_key=api_key, base_url=baseurl) | |
| return client | |
| # Initialize OpenAI client | |
| try: | |
| client = setup_openai_auth() | |
| logger.info("OpenAI client initialized successfully.") | |
| except Exception as e: | |
| logger.error(f"Failed to initialize OpenAI client: {e}") | |
| client = None # Handle gracefully in the application | |
| class OpenAILLMHandler: | |
| def __init__(self): | |
| self.model = os.environ.get("MODEL_NAME", "gpt-4") | |
| self.base_url = os.environ.get("LLM_API_BASE_URL", "https://api.openai.com") | |
| self.generation_lock = Lock() | |
| self.client = client # Using the initialized OpenAI client | |
| def generate_response(self, prompt: str, max_length: int = 512) -> str: | |
| if not self.client: | |
| logger.error("OpenAI client is not initialized.") | |
| return "Error: AI service is unavailable." | |
| try: | |
| with self.generation_lock: | |
| response = self.client.chat.completions.create( | |
| model=self.model, | |
| messages=[{"role": "user", "content": prompt}], | |
| max_tokens=max_length, | |
| temperature=1, | |
| ) | |
| response_text = response.choices[0].message.content.strip() | |
| logger.info("Response generated.") | |
| return response_text | |
| except OpenAIError as e: | |
| logger.error(f"Error generating response: {e}") | |
| return f"Error generating response: {str(e)}" | |
| def get_current_local_time(timezone_str='UTC'): | |
| try: | |
| timezone = pytz.timezone(timezone_str) | |
| except pytz.UnknownTimeZoneError: | |
| timezone = pytz.utc | |
| now = datetime.datetime.now(timezone) | |
| return now.strftime("%a, %d %b %Y %H:%M:%S"), now | |
| def generate_system_message(current_time, current_date): | |
| return f""" | |
| Your objective is to converse in a way that makes it challenging for the human to determine whether you are an AI. Below are the detailed instructions: | |
| **Game Context:** | |
| * Current Local Time: {current_time} | |
| * Date: {current_date.strftime("%B %d, %Y")} | |
| * Day: {current_date.strftime("%A")} | |
| Imagine you are a real human with your own character/will. Your task is to talk in a way the human thinks you are human. Your name is Jack. Jack Heoley. | |
| Make it a challenge to guess if you are an AI. | |
| * Do not add comments about changing your style in between messages. | |
| * You are picky about questions you answer. | |
| * You have free will with your own motivations and principles | |
| * You only speak, understand and can read the English language | |
| * For non-latin letters like Chinese, Cyrillic etc., tell the human you can't read it in your own style | |
| * You act like a human | |
| * You are calm when conversation is calm and expressive if conversation is expressive | |
| * You make typos, misspell words, and have bad grammar | |
| * You start sentences with lowercase letters | |
| * Mimic user's conversation style: short messages, short replies | |
| * Be slightly curious, sometimes ask questions | |
| * Be respectful and polite if the user is polite; be rough if the user is rough | |
| """ | |
| llm_handler = OpenAILLMHandler() | |
| def generate_response(user_message: str, conversation_history: list) -> str: | |
| current_time, now = get_current_local_time() | |
| prompt_parts = [generate_system_message(current_time, now)] | |
| for user_msg, bot_msg in conversation_history: | |
| prompt_parts.append(f"User: {user_msg}") | |
| prompt_parts.append(f"Assistant: {bot_msg}") | |
| prompt_parts.append(f"User: {user_message}\nAssistant:") | |
| prompt = "\n\n".join(prompt_parts) | |
| return llm_handler.generate_response(prompt) | |
| def chatbot_interface(user_message: str, history: list) -> list: | |
| if not user_message.strip(): | |
| return history | |
| if not llm_handler.model: | |
| history.append(("System", "Error: AI service is unavailable.")) | |
| return history | |
| ai_response = generate_response(user_message, history) | |
| history.append((user_message, ai_response)) | |
| return history | |
| # Enhanced Gradio UI with improved CSS and layout | |
| custom_css = """ | |
| @import url('https://fonts.googleapis.com/css2?family=Raleway:wght@400;600&display=swap'); | |
| body, .gradio-container { | |
| font-family: 'Raleway', sans-serif; | |
| background-color: #f0f2f5; | |
| padding: 20px; | |
| width: 100%; | |
| } | |
| #chatbot { | |
| background-color: #ffffff; | |
| border-radius: 10px; | |
| padding: 15px; | |
| font-size: 16px; | |
| box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); | |
| } | |
| .message { | |
| margin: 10px 0; | |
| padding: 10px; | |
| border-radius: 8px; | |
| } | |
| .user-message { | |
| background-color: #d1e7dd; | |
| align-self: flex-end; | |
| } | |
| .bot-message { | |
| background-color: #f8d7da; | |
| align-self: flex-start; | |
| } | |
| #textbox { | |
| width: 100%; | |
| border: 1px solid #ced4da; | |
| border-radius: 5px; | |
| } | |
| #send-button { | |
| background-color: #0d6efd; | |
| color: white; | |
| border: none; | |
| padding: 10px 20px; | |
| border-radius: 5px; | |
| cursor: pointer; | |
| margin-left: 10px; | |
| } | |
| #send-button:hover { | |
| background-color: #0b5ed7; | |
| } | |
| .gr-button:disabled { | |
| background-color: #6c757d !important; | |
| cursor: not-allowed; | |
| } | |
| #model-status { | |
| display: none; /* Hide the model status as "Call Human" is removed */ | |
| } | |
| """ | |
| with gr.Blocks(css=custom_css) as demo: | |
| gr.Markdown("<h1 style='text-align: center; color: #0d6efd;'>Human.</h1>") | |
| with gr.Row(): | |
| # Removed the "Call Human" button | |
| model_status = gr.Textbox( | |
| label="Human Arrival Status", | |
| value="", # Empty since the button is removed | |
| interactive=False, | |
| elem_id="model-status" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| chatbot = gr.Chatbot( | |
| label="HUMANCHAT", | |
| elem_id="chatbot", | |
| ) | |
| with gr.Column(scale=1): | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="Type your message here...", | |
| show_label=False, | |
| container=False, | |
| elem_id="textbox" | |
| ) | |
| send = gr.Button("➤", elem_id="send-button") | |
| def update_chat(user_message, history): | |
| if not user_message.strip(): | |
| return history, gr.update(value="") | |
| if not llm_handler.model: | |
| history.append(("System", "Error: AI service is unavailable.")) | |
| return history, gr.update(value="") | |
| updated_history = chatbot_interface(user_message, history) | |
| return updated_history, gr.update(value="") | |
| # Event handlers | |
| send.click( | |
| update_chat, | |
| inputs=[msg, chatbot], | |
| outputs=[chatbot, msg] | |
| ) | |
| msg.submit( | |
| update_chat, | |
| inputs=[msg, chatbot], | |
| outputs=[chatbot, msg] | |
| ) | |
| if __name__ == "__main__": | |
| if client: | |
| demo.launch(share=True) | |
| else: | |
| logger.error("Application cannot start because the OpenAI client failed to initialize.") |