iarfmoose3 commited on
Commit
7a8ac18
·
verified ·
1 Parent(s): 4667719

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -61
app.py CHANGED
@@ -1,77 +1,85 @@
 
1
  import gradio as gr
2
- import logging
3
- import json
4
  from together import Together
5
 
6
  # ----------------------------------------------------------------------------
7
  # Configuration & Constants
8
  # ----------------------------------------------------------------------------
9
  MODEL_NAME = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
10
- SYSTEM_PROMPT = (
11
  "You are CyberGuard, a senior-level cybersecurity expert assistant. "
12
  "You autonomously enforce security best practices, making informed decisions when rule-based policies fail."
13
  )
14
- HISTORY_FILE = "conversation_history.json"
15
 
16
  # ----------------------------------------------------------------------------
17
- # Setup Logging
18
  # ----------------------------------------------------------------------------
19
- logging.basicConfig(
20
- level=logging.INFO,
21
- format='%(asctime)s - %(levelname)s - %(message)s'
22
- )
23
- logger = logging.getLogger(__name__)
24
 
25
  # ----------------------------------------------------------------------------
26
- # Conversation Persistence Utilities
27
  # ----------------------------------------------------------------------------
28
- def load_history(filepath: str) -> list:
29
- """Load conversation history from a JSON file."""
30
- try:
31
- with open(filepath, 'r') as f:
32
- history = json.load(f)
33
- logger.info("Loaded existing conversation history.")
34
- return history
35
- except FileNotFoundError:
36
- logger.info("No existing history found, starting fresh.")
37
- return []
38
-
39
-
40
- def save_history(history: list, filepath: str) -> None:
41
- """Persist conversation history to disk."""
42
- with open(filepath, 'w') as f:
43
- json.dump(history, f, indent=2)
44
- logger.info("Conversation history saved.")
45
 
46
  # ----------------------------------------------------------------------------
47
- # Together Client Initialization
48
  # ----------------------------------------------------------------------------
49
- together_client = Together()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  # ----------------------------------------------------------------------------
52
  # Core Chat Functionality
53
  # ----------------------------------------------------------------------------
54
- def append_and_stream(user_input: str, history: list):
55
- """
56
- Append user input to history, call the LLM streaming API, and yield token-by-token.
57
- """
58
  # Frame as cybersecurity expert if not already
59
  if not user_input.lower().startswith("as a cybersecurity expert"):
60
  user_input = f"(As a cybersecurity expert) {user_input}"
61
 
62
- # Append system prompt at start if missing
63
- if not history or history[0]['role'] != 'system':
64
- history.insert(0, {'role': 'system', 'content': SYSTEM_PROMPT})
 
 
 
 
65
 
66
- # Append user message and prepare assistant placeholder
67
- history.append({'role': 'user', 'content': user_input})
68
- history.append({'role': 'assistant', 'content': ''})
69
- save_history(history, HISTORY_FILE)
 
70
 
71
  # Stream tokens from the model
72
  stream = together_client.chat.completions.create(
73
  model=MODEL_NAME,
74
- messages=history,
75
  stream=True
76
  )
77
 
@@ -79,38 +87,32 @@ def append_and_stream(user_input: str, history: list):
79
  for token in stream:
80
  if hasattr(token, 'choices'):
81
  delta = token.choices[0].delta.content
82
- history[-1]['content'] += delta
83
- save_history(history, HISTORY_FILE)
84
- yield history
 
 
85
 
86
  # ----------------------------------------------------------------------------
87
  # Gradio Interface Definition
88
  # ----------------------------------------------------------------------------
89
  def launch_interface():
90
- # Load previous history or start a new one
91
- history = load_history(HISTORY_FILE)
92
-
93
  with gr.Blocks() as demo:
94
  gr.Markdown("## CyberGuard – Autonomous Cybersecurity Chat")
95
- chatbot = gr.Chatbot(value=[(msg['role'], msg['content']) for msg in history if msg['role'] != 'system'])
96
- state = gr.State(history)
97
 
98
  txt = gr.Textbox(show_label=False, placeholder="Enter your security query...")
99
 
100
- # Handle user submission
101
- def on_submit(user_msg, hist):
102
- return hist, hist + [] # trigger state change
103
 
104
- txt.submit(lambda *_: None, None, txt) # Clear input box
105
  txt.submit(on_submit, [txt, state], [state, chatbot], queue=False)
106
-
107
- # Stream assistant response when history updates
108
- state.change(fn=append_and_stream, inputs=state, outputs=chatbot)
109
 
110
  demo.launch(share=True, server_name='0.0.0.0', server_port=7860)
111
 
112
  if __name__ == "__main__":
113
- try:
114
- launch_interface()
115
- except Exception as e:
116
- logger.exception("Failed to launch CyberGuard chat interface.")
 
1
+ import os
2
  import gradio as gr
 
 
3
  from together import Together
4
 
5
  # ----------------------------------------------------------------------------
6
  # Configuration & Constants
7
  # ----------------------------------------------------------------------------
8
  MODEL_NAME = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
9
+ SYSTEM_PROMPT_BASE = (
10
  "You are CyberGuard, a senior-level cybersecurity expert assistant. "
11
  "You autonomously enforce security best practices, making informed decisions when rule-based policies fail."
12
  )
 
13
 
14
  # ----------------------------------------------------------------------------
15
+ # Together Client Initialization with API Key
16
  # ----------------------------------------------------------------------------
17
+ # Expecting TOGETHER_API_KEY set in environment (e.g., Gradio Secrets)
18
+ api_key = os.environ.get("TOGETHER_API_KEY")
19
+ if not api_key:
20
+ raise ValueError("Missing TOGETHER_API_KEY environment variable")
21
+ together_client = Together(api_key=api_key)
22
 
23
  # ----------------------------------------------------------------------------
24
+ # In-Memory Context Tracking
25
  # ----------------------------------------------------------------------------
26
+ conversation_history = [] # List of {'role': str, 'content': str}
27
+ context_summary = "" # Running summary of the conversation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # ----------------------------------------------------------------------------
30
+ # Helper: Update Context Summary
31
  # ----------------------------------------------------------------------------
32
+ def update_summary(history):
33
+ """
34
+ Call the model to generate a concise summary of the conversation history.
35
+ """
36
+ # Build summarization prompt
37
+ messages = [
38
+ {"role": "system", "content": SYSTEM_PROMPT_BASE},
39
+ {"role": "user", "content": (
40
+ "Summarize the following cybersecurity conversation in 3-5 bullet points, "
41
+ "focusing on key decisions and context:\n" +
42
+ "\n".join([f"{msg['role']}: {msg['content']}" for msg in history])
43
+ )}
44
+ ]
45
+
46
+ response = together_client.chat.completions.create(
47
+ model=MODEL_NAME,
48
+ messages=messages,
49
+ stream=False
50
+ )
51
+ # Extract full summary text
52
+ summary_text = response.choices[0].message.content
53
+ return summary_text.strip()
54
 
55
  # ----------------------------------------------------------------------------
56
  # Core Chat Functionality
57
  # ----------------------------------------------------------------------------
58
+ def append_and_stream(user_input: str):
59
+ global context_summary
60
+
 
61
  # Frame as cybersecurity expert if not already
62
  if not user_input.lower().startswith("as a cybersecurity expert"):
63
  user_input = f"(As a cybersecurity expert) {user_input}"
64
 
65
+ # Append user message
66
+ conversation_history.append({'role': 'user', 'content': user_input})
67
+
68
+ # Build system prompt including updated context summary
69
+ system_prompt = SYSTEM_PROMPT_BASE
70
+ if context_summary:
71
+ system_prompt += "\n\nPrevious context summary:\n" + context_summary
72
 
73
+ # Assemble messages for streaming
74
+ messages = ([{'role': 'system', 'content': system_prompt}] +
75
+ conversation_history)
76
+ # Add placeholder for assistant
77
+ conversation_history.append({'role': 'assistant', 'content': ''})
78
 
79
  # Stream tokens from the model
80
  stream = together_client.chat.completions.create(
81
  model=MODEL_NAME,
82
+ messages=messages,
83
  stream=True
84
  )
85
 
 
87
  for token in stream:
88
  if hasattr(token, 'choices'):
89
  delta = token.choices[0].delta.content
90
+ conversation_history[-1]['content'] += delta
91
+ yield [(msg['role'], msg['content']) for msg in conversation_history if msg['role'] != 'system']
92
+
93
+ # After full reply, update the context summary
94
+ context_summary = update_summary(conversation_history)
95
 
96
  # ----------------------------------------------------------------------------
97
  # Gradio Interface Definition
98
  # ----------------------------------------------------------------------------
99
  def launch_interface():
 
 
 
100
  with gr.Blocks() as demo:
101
  gr.Markdown("## CyberGuard – Autonomous Cybersecurity Chat")
102
+ chatbot = gr.Chatbot()
103
+ state = gr.State([]) # placeholder to trigger streaming
104
 
105
  txt = gr.Textbox(show_label=False, placeholder="Enter your security query...")
106
 
107
+ # On submit: trigger streaming
108
+ def on_submit(user_msg, _state):
109
+ return [[('user', user_msg)]], None # just triggers state change
110
 
111
+ txt.submit(lambda *_: None, None, txt)
112
  txt.submit(on_submit, [txt, state], [state, chatbot], queue=False)
113
+ state.change(fn=lambda s: append_and_stream(txt.value), inputs=state, outputs=chatbot)
 
 
114
 
115
  demo.launch(share=True, server_name='0.0.0.0', server_port=7860)
116
 
117
  if __name__ == "__main__":
118
+ launch_interface()