Spaces:
Sleeping
Sleeping
| # app.py | |
| # Import dotenv to load environment variables from a .env file | |
| from dotenv import load_dotenv | |
| import gradio as gr | |
| from openai import OpenAI | |
| import os | |
| from huggingface_hub import login | |
| # Load environment variables from a .env file | |
| load_dotenv() | |
| # ----------------------------- | |
| # Configuration | |
| # ----------------------------- | |
| # Use os.getenv to load environment variables | |
| OPENAI_API_KEY = os.getenv("OPENAI_MOCEAN_KEY") | |
| PROMPT_ID = os.getenv("PROMPT_ID") | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| if "HF_TOKEN" in os.environ: | |
| login(os.environ["HF_TOKEN"]) | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| # Server-side memory | |
| conversation_state = {} | |
| def generate_response(chat_id, message): | |
| if chat_id not in conversation_state: | |
| conversation_state[chat_id] = [] | |
| history = conversation_state[chat_id] | |
| history.append({"role": "user", "content": message}) | |
| input_messages = history.copy() | |
| stream = client.responses.stream( | |
| model="gpt-5", # Changed to a model that supports tools | |
| input=input_messages, | |
| prompt={"id": PROMPT_ID} | |
| ) | |
| full_reply = "" | |
| with stream as response_stream: | |
| for event in response_stream: | |
| if hasattr(event, "type") and event.type == "response.output_text.delta": | |
| token = event.delta | |
| full_reply += token | |
| yield full_reply | |
| history.append({"role": "assistant", "content": full_reply}) | |
| def start_chat(): | |
| """Creates a unique chat session ID.""" | |
| import uuid | |
| return str(uuid.uuid4()) | |
| # ---------- GRADIO UI -------------- | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.HTML(""" | |
| <div style='display:flex; align-items:center; gap:15px;'> | |
| <img src='https://mocean-digital.com/wp-content/uploads/2024/04/mocean_Logo_full_white_ring-gold.png' style='height:60px;'> | |
| <h2 style='margin:0;'>Mocean Digital Inquiry Assistant</h2> | |
| </div> | |
| """) | |
| chat_id = gr.State() | |
| chatbot = gr.Chatbot(height=450) | |
| msg = gr.Textbox( | |
| placeholder="I am here to help you find the right compilation of mocean crews to help fullfil your true digital transformation", | |
| label="Please enter your Role, Industry and Question" | |
| ) | |
| def user_send(message, chat_history, cid): | |
| if not cid: | |
| cid = start_chat() | |
| chat_history.append((message, None)) # Temporary blank | |
| return "", chat_history, cid | |
| def bot_reply(chat_history, cid): | |
| user_msg = chat_history[-1][0] | |
| response_stream = generate_response(cid, user_msg) | |
| final_reply = "" | |
| for chunk in response_stream: | |
| final_reply = chunk | |
| chat_history[-1] = (user_msg, final_reply) | |
| yield chat_history | |
| msg.submit( | |
| user_send, | |
| inputs=[msg, chatbot, chat_id], | |
| outputs=[msg, chatbot, chat_id] | |
| ).then( | |
| bot_reply, | |
| inputs=[chatbot, chat_id], | |
| outputs=chatbot | |
| ) | |
| demo.launch() |