import os import gradio as gr from together import Together # ---------------------------------------------------------------------------- # Configuration & Constants # ---------------------------------------------------------------------------- MODEL_NAME = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" SYSTEM_PROMPT_BASE = ( "You are CyberGuard, a senior-level cybersecurity expert assistant. " "You autonomously enforce security best practices, making informed decisions when rule-based policies fail." ) # ---------------------------------------------------------------------------- # Together Client Initialization with API Key # ---------------------------------------------------------------------------- api_key = os.environ.get("TOGETHER_API_KEY") if not api_key: raise ValueError("Missing TOGETHER_API_KEY environment variable") together_client = Together(api_key=api_key) # ---------------------------------------------------------------------------- # In-Memory Context Tracking # ---------------------------------------------------------------------------- conversation_history = [] # List of (user_msg, assistant_msg) context_summary = "" # Running summary of conversation # ---------------------------------------------------------------------------- # Helper: Update Context Summary # ---------------------------------------------------------------------------- def update_summary(history): messages = [ {"role": "system", "content": SYSTEM_PROMPT_BASE}, {"role": "user", "content": ( "Summarize the following cybersecurity conversation in 3-5 bullet points, " "focusing on key decisions and context:\n" + "\n".join([f"user: {u}\nassistant: {a}" for u, a in history]) )} ] resp = together_client.chat.completions.create( model=MODEL_NAME, messages=messages, stream=False ) return resp.choices[0].message.content.strip() # ---------------------------------------------------------------------------- # Core Chat Functions for Gradio # ---------------------------------------------------------------------------- def user_submit(user_input, history): history = history or [] if not user_input: return "", history # Frame as cybersecurity expert if not user_input.lower().startswith("as a cybersecurity expert"): user_input = f"(As a cybersecurity expert) {user_input}" # Append placeholder for assistant history.append((user_input, "")) return "", history def assistant_stream(history): global context_summary if not history: return history # Build messages for model model_msgs = [{"role": "system", "content": SYSTEM_PROMPT_BASE}] if context_summary: model_msgs[0]["content"] += f"\n\nPrevious summary:\n{context_summary}" for user_msg, assistant_msg in history[:-1]: model_msgs.append({"role": "user", "content": user_msg}) model_msgs.append({"role": "assistant", "content": assistant_msg}) # Current user turn user_msg, _ = history[-1] model_msgs.append({"role": "user", "content": user_msg}) # Stream from together stream = together_client.chat.completions.create( model=MODEL_NAME, messages=model_msgs, stream=True ) for token in stream: if hasattr(token, 'choices'): delta = token.choices[0].delta.content history[-1] = (history[-1][0], history[-1][1] + delta) yield history # After full response, update summary context_summary = update_summary(history) # ---------------------------------------------------------------------------- # Launch Gradio Interface # ---------------------------------------------------------------------------- def launch_interface(): with gr.Blocks() as demo: gr.Markdown("## CyberGuard – Autonomous Cybersecurity Chat") chatbot = gr.Chatbot() txt = gr.Textbox(show_label=False, placeholder="Enter your security query...") txt.submit(user_submit, [txt, chatbot], [txt, chatbot], queue=False) txt.submit(lambda: None, None, txt) # clear input chatbot.stream(assistant_stream, chatbot) demo.launch(share=True, server_name='0.0.0.0', server_port=7860) if __name__ == "__main__": launch_interface()