ss900371tw commited on
Commit
ac08560
·
verified ·
1 Parent(s): a617be0

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +120 -114
src/streamlit_app.py CHANGED
@@ -23,7 +23,7 @@ from langchain_community.vectorstores import FAISS
23
  from langchain_community.vectorstores.utils import DistanceStrategy
24
  from langchain_community.docstore.in_memory import InMemoryDocstore
25
 
26
- # 嘗試匯入 pypdf
27
  try:
28
  import pypdf
29
  except ImportError:
@@ -31,7 +31,7 @@ except ImportError:
31
 
32
  # --- 頁面設定 ---
33
  st.set_page_config(page_title="Cybersecurity AI Assistant (Hugging Face RAG & IP Correlated Analysis)", page_icon="🛡️", layout="wide")
34
- st.title("🛡️ fdtn-ai/Foundation-Sec-8B-Instruct with FAISS RAG & IP Correlated Analysis (Inference Client)")
35
  st.markdown("已啟用:**IndexFlatIP** + **L2 正規化** + **Hugging Face Inference Client (API)**。支援 JSON/CSV/TXT/**W3C Log** 執行**IP 關聯批量分析**。")
36
 
37
  # --- Streamlit Session State 初始化 (修正強化,確保所有變數都有初始值) ---
@@ -48,9 +48,13 @@ if 'vector_store' not in st.session_state:
48
  if 'json_data_for_batch' not in st.session_state:
49
  st.session_state.json_data_for_batch = None # 保持 None,因為可能檔案沒上傳
50
 
51
- # 定模型 ID
52
- # MODEL_ID = "meta-llama/Meta-Llama-3-8B-Instruct"
53
- MODEL_ID = "meta-llama/Meta-Llama-3-70B-Instruct"
 
 
 
 
54
  WINDOW_SIZE = 20 # 關聯 Log 的最大數量 (包含當前 Log)
55
 
56
  # === W3C Log 專屬解析器 (新增) ===
@@ -65,46 +69,46 @@ def parse_w3c_log(log_content: str) -> List[Dict[str, Any]]:
65
  lines = log_content.splitlines()
66
  field_names = None
67
  data_lines = []
68
-
69
  for line in lines:
70
  line = line.strip()
71
  if not line:
72
  continue
73
-
74
  if line.startswith("#Fields:"):
75
  # 找到欄位定義,例如 "#Fields: date time s-ip cs-method ..."
76
  field_names = line.split()[1:] # 跳過 "#Fields:" 本身
77
  elif not line.startswith("#"):
78
  # 這是實際的資料行
79
  data_lines.append(line)
80
-
81
  if not field_names:
82
  # 如果沒有找到 #Fields,則退回到原始 Log 條目模式
83
  # st.warning("未檢測到 W3C Log 的 #Fields: 標頭,退回原始 Log 條目模式。")
84
  return [{"raw_log_entry": line} for line in lines if line.strip() and not line.startswith("#")]
85
 
86
  json_data = []
87
-
88
  # 定義需要轉換為數字的欄位名稱 (可根據您的需求擴充,使用底線版本)
89
  numeric_fields = ['sc_status', 'time_taken', 'bytes', 'resp_len', 'req_size']
90
-
91
  for data_line in data_lines:
92
  # W3C Log 預設使用空格分隔。這裡使用 split()
93
  values = data_line.split(' ')
94
-
95
  # 簡易的欄位數量檢查
96
  if len(values) != len(field_names):
97
  # 如果欄位數量不匹配,將該行視為原始 Log 條目
98
  json_data.append({"raw_log_entry": data_line})
99
  continue
100
-
101
  record = {}
102
  for key, value in zip(field_names, values):
103
  # 將 W3C 欄位名稱中的 '-' 替換成 Python 友好的 '_'
104
  key = key.strip().replace('-', '_')
105
-
106
  value = value.strip() if value else ""
107
-
108
  # 處理數字轉換
109
  if key in numeric_fields:
110
  try:
@@ -116,10 +120,10 @@ def parse_w3c_log(log_content: str) -> List[Dict[str, Any]]:
116
  record[key] = value
117
  else:
118
  record[key] = value
119
-
120
  if record:
121
  json_data.append(record)
122
-
123
  return json_data
124
 
125
  # === 核心檔案轉換函式 (CSV/TXT -> JSON List) (保留並微調) ===
@@ -130,29 +134,29 @@ def convert_csv_txt_to_json_list(file_content: bytes, file_type: str) -> List[Di
130
  log_content = file_content.decode("utf-8").strip()
131
  if not log_content:
132
  return []
133
-
134
  string_io = io.StringIO(log_content)
135
-
136
  # 嘗試使用 csv.DictReader 自動將第一行視為 Key
137
  try:
138
  reader = csv.DictReader(string_io)
139
  except Exception:
140
  # 如果失敗,退回每行一個原始 Log 條目
141
  return [{"raw_log_entry": line.strip()} for line in log_content.splitlines() if line.strip()]
142
-
143
  json_data = []
144
  if reader and reader.fieldnames:
145
  # 使用者可能使用的數值欄位名稱
146
  numeric_fields = ['sc-status', 'time-taken', 'bytes', 'resp-len', 'req-size', 'status_code', 'size', 'duration']
147
-
148
  for row in reader:
149
  record = {}
150
  for key, value in row.items():
151
  if key is None: continue
152
-
153
  key = key.strip()
154
  value = value.strip() if value else ""
155
-
156
  # 處理數字轉換
157
  if key in numeric_fields:
158
  try:
@@ -164,16 +168,16 @@ def convert_csv_txt_to_json_list(file_content: bytes, file_type: str) -> List[Di
164
  record[key] = value
165
  else:
166
  record[key] = value
167
-
168
  if record:
169
  json_data.append(record)
170
-
171
  # 再次檢查是否為空,如果是空,可能不是標準 CSV/JSON
172
  if not json_data:
173
  string_io.seek(0)
174
  lines = string_io.readlines()
175
  return [{"raw_log_entry": line.strip()} for line in lines if line.strip()]
176
-
177
  return json_data
178
 
179
  # === 檔案類型分發器 (已修改) ===
@@ -181,12 +185,12 @@ def convert_uploaded_file_to_json_list(uploaded_file) -> List[Dict[str, Any]]:
181
  """根據檔案類型,將上傳的檔案內容轉換為 Log JSON 列表。"""
182
  file_bytes = uploaded_file.getvalue()
183
  file_name_lower = uploaded_file.name.lower()
184
-
185
  # --- Case 1: JSON ---
186
  if file_name_lower.endswith('.json'):
187
  stringio = io.StringIO(file_bytes.decode("utf-8"))
188
  parsed_data = json.load(stringio)
189
-
190
  if isinstance(parsed_data, dict):
191
  # 處理包裹在 'alerts' 或 'logs' 鍵中的列表
192
  if 'alerts' in parsed_data and isinstance(parsed_data['alerts'], list):
@@ -199,36 +203,44 @@ def convert_uploaded_file_to_json_list(uploaded_file) -> List[Dict[str, Any]]:
199
  return parsed_data # 列表直接返回
200
  else:
201
  raise ValueError("JSON 檔案格式不支援 (非 List 或 Dict)。")
202
-
203
  # --- Case 2, 3, & 4: CSV/TXT/LOG ---
204
  elif file_name_lower.endswith(('.csv', '.txt', '.log')):
205
  file_type = 'csv' if file_name_lower.endswith('.csv') else ('log' if file_name_lower.endswith('.log') else 'txt')
206
-
207
  if file_type == 'log':
208
  # 針對 .log 檔案,嘗試使用 W3C 解析器
209
  log_content = file_bytes.decode("utf-8").strip()
210
  if not log_content: return []
211
  return parse_w3c_log(log_content)
212
-
213
  else:
214
  # CSV 和 TXT 保持使用原來的 csv.DictReader 邏輯
215
  return convert_csv_txt_to_json_list(file_bytes, file_type)
216
-
217
  else:
218
  raise ValueError("不支援的檔案類型。")
219
 
220
  # --- 側邊欄設定 (已更新 'type' 參數) ---
221
  with st.sidebar:
222
  st.header("⚙️ 設定")
 
 
 
 
 
 
 
 
223
 
224
  if not os.environ.get("HF_TOKEN"):
225
  st.error("環境變數 **HF_TOKEN** 未設定。請設定後重新啟動應用程式。")
226
  st.info(f"LLM 模型:**{MODEL_ID}** (Hugging Face Inference API)")
227
  st.warning("⚠️ **注意**: 該模型使用 Inference API 呼叫,請確保您的 HF Token 具有存取權限。")
228
-
229
  st.divider()
230
  st.subheader("📂 檔案上傳")
231
-
232
  # === 1. 批量分析檔案 (支援多種格式) ===
233
  batch_uploaded_file = st.file_uploader(
234
  "1️⃣ 上傳 **Log/Alert 檔案** (用於批量分析)",
@@ -236,7 +248,7 @@ with st.sidebar:
236
  key="batch_uploader",
237
  help="支援 JSON (Array), CSV (含標題), TXT/LOG (視為 W3C 或一般 Log)"
238
  )
239
-
240
  # === 2. RAG 知識庫檔案 ===
241
  rag_uploaded_file = st.file_uploader(
242
  "2️⃣ 上傳 **RAG 參考知識庫** (Logs/PDF/Code 等)",
@@ -244,15 +256,15 @@ with st.sidebar:
244
  key="rag_uploader"
245
  )
246
  st.divider()
247
-
248
  st.subheader("💡 批量分析指令")
249
  analysis_prompt = st.text_area(
250
  "針對每個 Log/Alert 執行的指令",
251
- value="You are a security expert responsible for analyzing alerts related to Web Application Attacks and Brute Force & Reconnaissance. Your analysis must adhere strictly to the information provided in the Log and its correlated sequence. Please respond with a clear, structured analysis using the following mandatory sections: \n\n- Priority: Provide the overall priority level. (Answer only High-risk detected!, Medium-risk detected!, or Low-risk detected!) \n- Observation: Briefly detail all key facts from the log sequence (IP, time, repeating URI, HTTP method, status code, time-taken) and highlight any anomalous or suspicious patterns. \n- Potential Risk & Hypothesis: If this alert is highly related to Web Application Attacks, Brute Force, or Reconnaissance, you must propose the most likely risk hypothesis based on the Observation, and explain the potential impact. If not highly related, omit this section. \n- Action Plan: If this alert is highly related to Web Application Attacks, Brute Force, or Reconnaissance, list the immediate validation and mitigation steps that should be taken to address this specific alert. If not highly related, omit this section. \n\nStrictly use the information in the provided Log and its correlated sequence. Avoid arbitrary conclusions. For example, use 'Potential Duplicate Submission' or 'Anomalous Session Keep-Alive Traffic' instead of flatly stating 'Brute Force Attack'.Prioritize non-blocking validation steps (e.g., monitoring, internal confirmation) over immediate IP blocking.",
252
  height=200
253
  )
254
  st.markdown("此指令將對檔案中的**每一個 Log 條目**執行一次獨立分析 (使用 **IP 關聯視窗**)。")
255
-
256
  if batch_uploaded_file:
257
  if st.button("🚀 執行批量分析"):
258
  if not os.environ.get("HF_TOKEN"):
@@ -265,18 +277,18 @@ with st.sidebar:
265
  st.error("請先等待 Log 檔案解析完成。")
266
  else:
267
  st.info("請上傳 Log 檔案以啟用批量分析按鈕。")
268
-
269
  st.divider()
270
  st.subheader("🔍 RAG 檢索設定")
271
  similarity_threshold = st.slider("📐 Cosine Similarity 門檻", 0.0, 1.0, 0.4, 0.01)
272
-
273
  st.divider()
274
  st.subheader("模型參數")
275
  system_prompt = st.text_area("System Prompt", value="You are a Senior Security Analyst, named Ernest. You provide expert, authoritative, and concise advice on Information Security. Your analysis must be based strictly on the provided context.", height=100)
276
  max_output_tokens = st.slider("Max Output Tokens", 128, 4096, 2048, 128)
277
  temperature = st.slider("Temperature", 0.0, 1.0, 0.1, 0.1)
278
  top_p = st.slider("Top P", 0.1, 1.0, 0.95, 0.05)
279
-
280
  st.divider()
281
  if st.button("🗑️ 清除所有紀錄"):
282
  # 僅清除動態狀態,保留 HF_TOKEN
@@ -285,8 +297,9 @@ with st.sidebar:
285
  del st.session_state[key]
286
  st.rerun()
287
 
288
- # --- 初始化 Hugging Face LLM Client (保持不變) ---
289
- @st.cache_resource
 
290
  def load_inference_client(model_id):
291
  if not os.environ.get("HF_TOKEN"): return None
292
  try:
@@ -300,10 +313,11 @@ def load_inference_client(model_id):
300
  inference_client = None
301
  if os.environ.get("HF_TOKEN"):
302
  with st.spinner(f"正在連線到 Inference Client: {MODEL_ID}..."):
303
- inference_client = load_inference_client(MODEL_ID)
 
304
 
305
  if inference_client is None and os.environ.get("HF_TOKEN"):
306
- st.warning("Hugging Face Inference Client 無法連線。")
307
  elif not os.environ.get("HF_TOKEN"):
308
  st.error("請在環境變數中設定 HF_TOKEN。")
309
 
@@ -330,27 +344,27 @@ def process_file_to_faiss(uploaded_file):
330
  else:
331
  stringio = io.StringIO(uploaded_file.getvalue().decode("utf-8"))
332
  text_content = stringio.read()
333
-
334
  if not text_content.strip(): return None, "File is empty"
335
-
336
  # 這裡將文件內容按行分割為 Document,每行一個 Document
337
  events = [line for line in text_content.splitlines() if line.strip()]
338
  docs = [Document(page_content=e) for e in events]
339
  if not docs: return None, "No documents created"
340
-
341
  # 進行 Embedding 和 FAISS 初始化 (IndexFlatIP + L2 normalization)
342
  embeddings = embedding_model.embed_documents([d.page_content for d in docs])
343
  embeddings_np = np.array(embeddings).astype("float32")
344
  faiss.normalize_L2(embeddings_np)
345
-
346
  dimension = embeddings_np.shape[1]
347
  index = faiss.IndexFlatIP(dimension) # 使用內積 (Inner Product)
348
  index.add(embeddings_np)
349
-
350
  doc_ids = [str(uuid.uuid4()) for _ in range(len(docs))]
351
  docstore = InMemoryDocstore({_id: doc for _id, doc in zip(doc_ids, docs)})
352
  index_to_docstore_id = {i: _id for i, _id in enumerate(doc_ids)}
353
-
354
  # 使用 Cosine 距離策略,配合 IndexFlatIP 和 L2 normalization 達到 Cosine Similarity
355
  vector_store = FAISS(embedding_function=embedding_model, index=index, docstore=docstore, index_to_docstore_id=index_to_docstore_id, distance_strategy=DistanceStrategy.COSINE)
356
  return vector_store, f"{len(docs)} chunks created."
@@ -364,7 +378,7 @@ def faiss_cosine_search_all(vector_store, query, threshold):
364
  index = vector_store.index
365
  D, I = index.search(q_emb, k=index.ntotal)
366
  selected = []
367
-
368
  # Cosine Similarity = D (IndexFlatIP + L2 normalization)
369
  for score, idx in zip(D[0], I[0]):
370
  if idx == -1: continue
@@ -373,7 +387,7 @@ def faiss_cosine_search_all(vector_store, query, threshold):
373
  doc_id = vector_store.index_to_docstore_id[idx]
374
  doc = vector_store.docstore.search(doc_id)
375
  selected.append((doc, score))
376
-
377
  selected.sort(key=lambda x: x[1], reverse=True)
378
  return selected
379
 
@@ -381,7 +395,7 @@ def faiss_cosine_search_all(vector_store, query, threshold):
381
  def generate_rag_response_hf_for_log(client, model_id, log_sequence_text, user_prompt, sys_prompt, vector_store, threshold, max_output_tokens, temperature, top_p):
382
  if client is None: return "ERROR: Client Error", ""
383
  context_text = ""
384
-
385
  # RAG 檢索邏輯
386
  if vector_store:
387
  selected = faiss_cosine_search_all(vector_store, log_sequence_text, threshold)
@@ -389,22 +403,16 @@ def generate_rag_response_hf_for_log(client, model_id, log_sequence_text, user_p
389
  # 只取前 5 個最相關的片段
390
  retrieved_contents = [f"--- Reference Chunk (sim={score:.3f}) ---\n{doc.page_content}" for i, (doc, score) in enumerate(selected[:5])]
391
  context_text = "\n".join(retrieved_contents)
392
-
393
- rag_instruction = f"""=== RETRIEVED REFERENCE CONTEXT (Cosine ≥ {threshold}) ===
394
- {context_text if context_text else 'No relevant reference context found.'}
395
- === END REFERENCE CONTEXT ===
396
- ANALYSIS INSTRUCTION: {user_prompt}
397
- Based on the provided LOG SEQUENCE and REFERENCE CONTEXT, you must analyze the **entire sequence** to detect any continuous attack chains or evolving threats."""
398
 
399
- log_content_section = f"""=== CURRENT LOG SEQUENCE TO ANALYZE (Window Size: Max {WINDOW_SIZE} logs associated by IP) ===
400
- {log_sequence_text}
401
- === END LOG SEQUENCE ==="""
402
-
403
  messages = [
404
  {"role": "system", "content": sys_prompt},
405
  {"role": "user", "content": f"{rag_instruction}\n\n{log_content_section}"}
406
  ]
407
-
408
  try:
409
  # 使用 chat_completion 進行模型呼叫
410
  response_stream = client.chat_completion(
@@ -418,7 +426,7 @@ Based on the provided LOG SEQUENCE and REFERENCE CONTEXT, you must analyze the *
418
  return response_stream.choices[0].message.content.strip(), context_text
419
  else: return "Format Error: Model returned empty response or invalid format.", context_text
420
  except Exception as e:
421
- return f"Model Error: {str(e)}", context_text
422
 
423
  # =======================================================================
424
  # === 檔案處理區塊 (RAG 檔案) - 保持不變 ===
@@ -428,7 +436,7 @@ if rag_uploaded_file:
428
  # 清除舊的 vector store 以節省內存
429
  if 'vector_store' in st.session_state:
430
  del st.session_state.vector_store
431
-
432
  with st.spinner(f"正在建立 RAG 參考知識庫 ({rag_uploaded_file.name})..."):
433
  vs, msg = process_file_to_faiss(rag_uploaded_file)
434
  if vs:
@@ -446,7 +454,7 @@ elif 'vector_store' in st.session_state:
446
  # === 檔案處理區塊 (批量分析檔案 - **已更新** ) ===
447
  if batch_uploaded_file:
448
  batch_file_key = f"batch_{batch_uploaded_file.name}_{batch_uploaded_file.size}"
449
-
450
  if st.session_state.batch_current_file_key != batch_file_key or 'json_data_for_batch' not in st.session_state:
451
  try:
452
  # 清除舊的數據
@@ -454,24 +462,22 @@ if batch_uploaded_file:
454
  del st.session_state.json_data_for_batch
455
  if 'batch_results' in st.session_state:
456
  del st.session_state.batch_results
457
-
458
  # 使用新的統一解析函式
459
  parsed_data = convert_uploaded_file_to_json_list(batch_uploaded_file)
460
-
461
  if not parsed_data:
462
  raise ValueError(f"{batch_uploaded_file.name} 檔案載入失敗或內容為空。")
463
-
464
  # 儲存處理後的數據
465
  st.session_state.json_data_for_batch = parsed_data
466
  st.session_state.batch_current_file_key = batch_file_key
467
  st.toast(f"檔案已解析並轉換為 {len(parsed_data)} 個 Log 條目。", icon="✅")
468
-
469
  except Exception as e:
470
  st.error(f"檔案解析錯誤: {e}")
471
  if 'json_data_for_batch' in st.session_state:
472
  del st.session_state.json_data_for_batch
473
  st.session_state.batch_current_file_key = None # 設置為 None 避免錯誤的 Key
474
-
475
  elif 'json_data_for_batch' in st.session_state:
476
  # 檔案被移除,清除相關數據
477
  del st.session_state.json_data_for_batch
@@ -485,67 +491,67 @@ elif 'json_data_for_batch' in st.session_state:
485
  if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.session_state and st.session_state.json_data_for_batch is not None:
486
  st.session_state.execute_batch_analysis = False
487
  start_time = time.time()
488
-
489
  # 這裡必須確保 st.session_state.batch_results 是 List,而不是 None
490
  if 'batch_results' not in st.session_state or st.session_state.batch_results is None:
491
  st.session_state.batch_results = []
492
-
493
  st.session_state.batch_results = []
494
-
495
  if inference_client is None:
496
  st.error("Client 未連線,無法執行。")
497
  else:
498
  logs_list = st.session_state.json_data_for_batch
499
-
500
  if logs_list:
501
  vs = st.session_state.get("vector_store", None)
502
-
503
  # 將 Log 條目轉換為 JSON 字串,用於 LLM 輸入
504
  formatted_logs = [json.dumps(log, indent=2, ensure_ascii=False) for log in logs_list]
505
-
506
  analysis_sequences = []
507
-
508
  # --- 核心修改:基於 IP 關聯的 Log Sequence 建構 ---
509
  for i in range(len(formatted_logs)):
510
  current_log_entry = logs_list[i]
511
  current_log_str = formatted_logs[i]
512
-
513
  # 嘗試從當前 Log 條目中提取 IP 地址 (優先 W3C 格式,然後是一般日誌格式)
514
  # 使用者可以根據自己的日誌格式調整這裡的 Key
515
  target_ip = current_log_entry.get('c_ip') or current_log_entry.get('c-ip') or current_log_entry.get('remote_addr') or current_log_entry.get('source_ip')
516
-
517
  sequence_text = []
518
  correlated_logs = []
519
-
520
  if target_ip and target_ip != "-": # 假設 '-' 是 W3C 中的空值
521
-
522
  # 篩選過去的 Log,最多 WINDOW_SIZE - 1 個,且 IP 必須匹配
523
  # 從 i-1 倒序檢查到 0
524
  for j in range(i - 1, -1, -1):
525
  prior_log_entry = logs_list[j]
526
  prior_ip = prior_log_entry.get('c_ip') or prior_log_entry.get('c-ip') or prior_log_entry.get('remote_addr') or prior_log_entry.get('source_ip')
527
-
528
  # 檢查 IP 是否匹配
529
  if prior_ip == target_ip:
530
  # 插入到最前面,保持時間順序
531
  correlated_logs.insert(0, formatted_logs[j])
532
-
533
  # 限制累積的 Log 數量(不包含當前 Log)
534
  if len(correlated_logs) >= WINDOW_SIZE - 1:
535
  break
536
-
537
  # 1. 加入相關聯的 Log (時間較早的)
538
  for j, log_str in enumerate(correlated_logs):
539
  # log_idx 是這些 Log 在 logs_list 中的原始索引 (不完全準確,但提供參考)
540
  sequence_text.append(f"--- Correlated Log Index (IP:{target_ip}) ---\n{log_str}")
541
-
542
  else:
543
  # 如果沒有找到 IP,只分析當前 Log (確保 sequence_text 不是空的)
544
  st.warning(f"Log #{i+1} 找不到 IP 欄位 ({target_ip}),僅分析當前 Log 條目。")
545
 
546
  # 2. 加入當前的目標 Log
547
  sequence_text.append(f"--- TARGET LOG TO ANALYZE (Index {i+1}) ---\n{current_log_str}")
548
-
549
  analysis_sequences.append({
550
  "sequence_text": "\n\n".join(sequence_text),
551
  "target_log_id": i + 1,
@@ -557,11 +563,11 @@ if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.sessi
557
  st.header(f"⚡ 批量分析執行中 (IP 關聯視窗 $N={WINDOW_SIZE}$)...")
558
  progress_bar = st.progress(0, text=f"準備處理 {total_sequences} 個序列...")
559
  results_container = st.container()
560
-
561
  for i, seq_data in enumerate(analysis_sequences):
562
  log_id = seq_data["target_log_id"]
563
  progress_bar.progress((i + 1) / total_sequences, text=f"Processing {i + 1}/{total_sequences} (Log #{log_id})...")
564
-
565
  try:
566
  response, retrieved_ctx = generate_rag_response_hf_for_log(
567
  client=inference_client,
@@ -575,7 +581,7 @@ if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.sessi
575
  temperature=temperature,
576
  top_p=top_p
577
  )
578
-
579
  item = {
580
  "log_id": log_id,
581
  "log_content": seq_data["original_log_entry"],
@@ -583,19 +589,19 @@ if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.sessi
583
  "analysis_result": response,
584
  "context": retrieved_ctx
585
  }
586
-
587
  st.session_state.batch_results.append(item)
588
-
589
  with results_container:
590
  st.subheader(f"Log/Alert #{item['log_id']} (IP Correlated Analysis)")
591
-
592
  with st.expander("序列內容 (JSON Format)"):
593
  st.code(item["sequence_analyzed"], language='json')
594
-
595
  # 呈現 LLM 分析結果
596
  is_high = any(x in response.lower() for x in ['high-risk detected'])
597
  if is_high:
598
- st.error(item['analysis_result'])
599
  else:
600
  # 增加 Medium 判斷
601
  is_medium = any(x in response.lower() for x in ['medium-risk detected'])
@@ -603,14 +609,14 @@ if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.sessi
603
  st.warning(item['analysis_result'])
604
  else:
605
  st.info(item['analysis_result'])
606
-
607
  if item['context']:
608
  with st.expander("參考 RAG 片段"): st.code(item['context'])
609
  st.markdown("---")
610
-
611
  except Exception as e:
612
  st.error(f"Error Log {log_id}: {e}")
613
-
614
  end_time = time.time()
615
  progress_bar.empty()
616
  st.success(f"完成!耗時 {end_time - start_time:.2f} 秒。")
@@ -620,40 +626,40 @@ if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.sessi
620
  # === 顯示結果 (歷史紀錄) - 保持不變,但加固了 session state 檢查 ===
621
  if st.session_state.get("batch_results") and isinstance(st.session_state.batch_results, list) and st.session_state.batch_results and not st.session_state.execute_batch_analysis:
622
  st.header("⚡ 歷史分析結果")
623
-
624
  high_risk_data = []
625
  high_risk_items = []
626
-
627
  for item in st.session_state.batch_results:
628
  # 檢查 analysis_result 中是否包含 'High-risk detected' (不區分大小寫)
629
  is_high_risk = 'high-risk detected!' in item['analysis_result'].lower()
630
-
631
  if is_high_risk:
632
  high_risk_items.append(item)
633
-
634
  # --- 為 CSV 報告準備數據 ---
635
  log_content_str = json.dumps(item["log_content"], ensure_ascii=False)
636
  analysis_result_clean = item['analysis_result'].replace('\n', ' | ')
637
-
638
  high_risk_data.append({
639
  "Log_ID": item['log_id'],
640
  "Risk_Level": "HIGH_RISK",
641
  "Log_Content": log_content_str,
642
  "AI_Analysis_Result": analysis_result_clean
643
  })
644
-
645
  # 顯示 High-Risk 報告的下載按鈕 (改為 CSV 邏輯)
646
  if high_risk_items:
647
  st.success(f"✅ 檢測到 {len(high_risk_items)} 條高風險 Log/Alert。")
648
-
649
  # --- 構建 CSV 內容 ---
650
  csv_output = io.StringIO()
651
  csv_output.write("Log_ID,Risk_Level,Log_Content,AI_Analysis_Result\n")
652
-
653
  def escape_csv(value):
654
  # 替換內容中的所有雙引號為兩個雙引號,然後用雙引號包圍
655
  return f'"{str(value).replace('"', '""')}"'
656
-
657
  for row in high_risk_data:
658
  line = ",".join([
659
  str(row["Log_ID"]),
@@ -662,15 +668,15 @@ if st.session_state.get("batch_results") and isinstance(st.session_state.batch_r
662
  escape_csv(row["AI_Analysis_Result"])
663
  ]) + "\n"
664
  csv_output.write(line)
665
-
666
  csv_content = csv_output.getvalue()
667
-
668
  # 顯示 CSV 報告的下載按鈕
669
  st.download_button(
670
- "📥 下載 **高風險** 分析報告 (.csv)",
671
- csv_content,
672
- "high_risk_report.csv",
673
- "text/csv"
674
  )
675
  else:
676
  st.info("👍 未檢測到任何標註為 High-risk detected 的 Log/Alert。")
 
23
  from langchain_community.vectorstores.utils import DistanceStrategy
24
  from langchain_community.docstore.in_memory import InMemoryDocstore
25
 
26
+ # 嘗試匯入 pypdftry:
27
  try:
28
  import pypdf
29
  except ImportError:
 
31
 
32
  # --- 頁面設定 ---
33
  st.set_page_config(page_title="Cybersecurity AI Assistant (Hugging Face RAG & IP Correlated Analysis)", page_icon="🛡️", layout="wide")
34
+ st.title("🛡️ LLM with FAISS RAG & IP Correlated Analysis (Inference Client)")
35
  st.markdown("已啟用:**IndexFlatIP** + **L2 正規化** + **Hugging Face Inference Client (API)**。支援 JSON/CSV/TXT/**W3C Log** 執行**IP 關聯批量分析**。")
36
 
37
  # --- Streamlit Session State 初始化 (修正強化,確保所有變數都有初始值) ---
 
48
  if 'json_data_for_batch' not in st.session_state:
49
  st.session_state.json_data_for_batch = None # 保持 None,因為可能檔案沒上傳
50
 
51
+ # --- 模型列表 ---
52
+ MODEL_OPTIONS = {
53
+ "OpenAI GPT-OSS 20B (Hugging Face)": "openai/gpt-oss-20b",
54
+ "Meta Llama 3 8B Instruct (Hugging Face)": "meta-llama/Meta-Llama-3-8B-Instruct",
55
+ "fdtn-ai Foundation-Sec 8B Instruct (Hugging Face)": "fdtn-ai/Foundation-Sec-8B-Instruct"
56
+ }
57
+
58
  WINDOW_SIZE = 20 # 關聯 Log 的最大數量 (包含當前 Log)
59
 
60
  # === W3C Log 專屬解析器 (新增) ===
 
69
  lines = log_content.splitlines()
70
  field_names = None
71
  data_lines = []
72
+
73
  for line in lines:
74
  line = line.strip()
75
  if not line:
76
  continue
77
+
78
  if line.startswith("#Fields:"):
79
  # 找到欄位定義,例如 "#Fields: date time s-ip cs-method ..."
80
  field_names = line.split()[1:] # 跳過 "#Fields:" 本身
81
  elif not line.startswith("#"):
82
  # 這是實際的資料行
83
  data_lines.append(line)
84
+
85
  if not field_names:
86
  # 如果沒有找到 #Fields,則退回到原始 Log 條目模式
87
  # st.warning("未檢測到 W3C Log 的 #Fields: 標頭,退回原始 Log 條目模式。")
88
  return [{"raw_log_entry": line} for line in lines if line.strip() and not line.startswith("#")]
89
 
90
  json_data = []
91
+
92
  # 定義需要轉換為數字的欄位名稱 (可根據您的需求擴充,使用底線版本)
93
  numeric_fields = ['sc_status', 'time_taken', 'bytes', 'resp_len', 'req_size']
94
+
95
  for data_line in data_lines:
96
  # W3C Log 預設使用空格分隔。這裡使用 split()
97
  values = data_line.split(' ')
98
+
99
  # 簡易的欄位數量檢查
100
  if len(values) != len(field_names):
101
  # 如果欄位數量不匹配,將該行視為原始 Log 條目
102
  json_data.append({"raw_log_entry": data_line})
103
  continue
104
+
105
  record = {}
106
  for key, value in zip(field_names, values):
107
  # 將 W3C 欄位名稱中的 '-' 替換成 Python 友好的 '_'
108
  key = key.strip().replace('-', '_')
109
+
110
  value = value.strip() if value else ""
111
+
112
  # 處理數字轉換
113
  if key in numeric_fields:
114
  try:
 
120
  record[key] = value
121
  else:
122
  record[key] = value
123
+
124
  if record:
125
  json_data.append(record)
126
+
127
  return json_data
128
 
129
  # === 核心檔案轉換函式 (CSV/TXT -> JSON List) (保留並微調) ===
 
134
  log_content = file_content.decode("utf-8").strip()
135
  if not log_content:
136
  return []
137
+
138
  string_io = io.StringIO(log_content)
139
+
140
  # 嘗試使用 csv.DictReader 自動將第一行視為 Key
141
  try:
142
  reader = csv.DictReader(string_io)
143
  except Exception:
144
  # 如果失敗,退回每行一個原始 Log 條目
145
  return [{"raw_log_entry": line.strip()} for line in log_content.splitlines() if line.strip()]
146
+
147
  json_data = []
148
  if reader and reader.fieldnames:
149
  # 使用者可能使用的數值欄位名稱
150
  numeric_fields = ['sc-status', 'time-taken', 'bytes', 'resp-len', 'req-size', 'status_code', 'size', 'duration']
151
+
152
  for row in reader:
153
  record = {}
154
  for key, value in row.items():
155
  if key is None: continue
156
+
157
  key = key.strip()
158
  value = value.strip() if value else ""
159
+
160
  # 處理數字轉換
161
  if key in numeric_fields:
162
  try:
 
168
  record[key] = value
169
  else:
170
  record[key] = value
171
+
172
  if record:
173
  json_data.append(record)
174
+
175
  # 再次檢查是否為空,如果是空,可能不是標準 CSV/JSON
176
  if not json_data:
177
  string_io.seek(0)
178
  lines = string_io.readlines()
179
  return [{"raw_log_entry": line.strip()} for line in lines if line.strip()]
180
+
181
  return json_data
182
 
183
  # === 檔案類型分發器 (已修改) ===
 
185
  """根據檔案類型,將上傳的檔案內容轉換為 Log JSON 列表。"""
186
  file_bytes = uploaded_file.getvalue()
187
  file_name_lower = uploaded_file.name.lower()
188
+
189
  # --- Case 1: JSON ---
190
  if file_name_lower.endswith('.json'):
191
  stringio = io.StringIO(file_bytes.decode("utf-8"))
192
  parsed_data = json.load(stringio)
193
+
194
  if isinstance(parsed_data, dict):
195
  # 處理包裹在 'alerts' 或 'logs' 鍵中的列表
196
  if 'alerts' in parsed_data and isinstance(parsed_data['alerts'], list):
 
203
  return parsed_data # 列表直接返回
204
  else:
205
  raise ValueError("JSON 檔案格式不支援 (非 List 或 Dict)。")
206
+
207
  # --- Case 2, 3, & 4: CSV/TXT/LOG ---
208
  elif file_name_lower.endswith(('.csv', '.txt', '.log')):
209
  file_type = 'csv' if file_name_lower.endswith('.csv') else ('log' if file_name_lower.endswith('.log') else 'txt')
210
+
211
  if file_type == 'log':
212
  # 針對 .log 檔案,嘗試使用 W3C 解析器
213
  log_content = file_bytes.decode("utf-8").strip()
214
  if not log_content: return []
215
  return parse_w3c_log(log_content)
216
+
217
  else:
218
  # CSV 和 TXT 保持使用原來的 csv.DictReader 邏輯
219
  return convert_csv_txt_to_json_list(file_bytes, file_type)
220
+
221
  else:
222
  raise ValueError("不支援的檔案類型。")
223
 
224
  # --- 側邊欄設定 (已更新 'type' 參數) ---
225
  with st.sidebar:
226
  st.header("⚙️ 設定")
227
+
228
+ # --- 新增模型選單 ---
229
+ selected_model_name = st.selectbox(
230
+ "選擇 LLM 模型",
231
+ list(MODEL_OPTIONS.keys()),
232
+ index=0 # 預設選擇第一個
233
+ )
234
+ MODEL_ID = MODEL_OPTIONS[selected_model_name] # 更新 MODEL_ID
235
 
236
  if not os.environ.get("HF_TOKEN"):
237
  st.error("環境變數 **HF_TOKEN** 未設定。請設定後重新啟動應用程式。")
238
  st.info(f"LLM 模型:**{MODEL_ID}** (Hugging Face Inference API)")
239
  st.warning("⚠️ **注意**: 該模型使用 Inference API 呼叫,請確保您的 HF Token 具有存取權限。")
240
+
241
  st.divider()
242
  st.subheader("📂 檔案上傳")
243
+
244
  # === 1. 批量分析檔案 (支援多種格式) ===
245
  batch_uploaded_file = st.file_uploader(
246
  "1️⃣ 上傳 **Log/Alert 檔案** (用於批量分析)",
 
248
  key="batch_uploader",
249
  help="支援 JSON (Array), CSV (含標題), TXT/LOG (視為 W3C 或一般 Log)"
250
  )
251
+
252
  # === 2. RAG 知識庫檔案 ===
253
  rag_uploaded_file = st.file_uploader(
254
  "2️⃣ 上傳 **RAG 參考知識庫** (Logs/PDF/Code 等)",
 
256
  key="rag_uploader"
257
  )
258
  st.divider()
259
+
260
  st.subheader("💡 批量分析指令")
261
  analysis_prompt = st.text_area(
262
  "針對每個 Log/Alert 執行的指令",
263
+ value="You are a security expert in charge of analyzing alerts related to Initial Access, Establish Foothold & Reconnaissance, Lateral Movement, Targeting & Data Exfiltration, Malware Deployment & Execution and Ransom & Negotiation. Respond with a clear, structured analysis using the following mandatory sections: \n\n- Priority: Provide the overall priority level. (Answer High-risk detected!, Medium-risk detected!, or Low-risk detected! only) \n- Explanation: If this alert is highly related to Initial Access, Establish Foothold & Reconnaissance, Lateral Movement, Targeting & Data Exfiltration, Malware Deployment & Execution and Ransom & Negotiation, explain the potential impact and why this specific alert requires attention. If not, **omit the explanation section**. \n- Action Plan: If this alert is highly related to Initial Access, Establish Foothold & Reconnaissance, Lateral Movement, Targeting & Data Exfiltration, Malware Deployment & Execution and Ransom & Negotiation, What should be the immediate steps to address this specific alert? If not, **omit the action plan section**. \n\nStrictly use the information in the provided Log and its correlated sequence.",
264
  height=200
265
  )
266
  st.markdown("此指令將對檔案中的**每一個 Log 條目**執行一次獨立分析 (使用 **IP 關聯視窗**)。")
267
+
268
  if batch_uploaded_file:
269
  if st.button("🚀 執行批量分析"):
270
  if not os.environ.get("HF_TOKEN"):
 
277
  st.error("請先等待 Log 檔案解析完成。")
278
  else:
279
  st.info("請上傳 Log 檔案以啟用批量分析按鈕。")
280
+
281
  st.divider()
282
  st.subheader("🔍 RAG 檢索設定")
283
  similarity_threshold = st.slider("📐 Cosine Similarity 門檻", 0.0, 1.0, 0.4, 0.01)
284
+
285
  st.divider()
286
  st.subheader("模型參數")
287
  system_prompt = st.text_area("System Prompt", value="You are a Senior Security Analyst, named Ernest. You provide expert, authoritative, and concise advice on Information Security. Your analysis must be based strictly on the provided context.", height=100)
288
  max_output_tokens = st.slider("Max Output Tokens", 128, 4096, 2048, 128)
289
  temperature = st.slider("Temperature", 0.0, 1.0, 0.1, 0.1)
290
  top_p = st.slider("Top P", 0.1, 1.0, 0.95, 0.05)
291
+
292
  st.divider()
293
  if st.button("🗑️ 清除所有紀錄"):
294
  # 僅清除動態狀態,保留 HF_TOKEN
 
297
  del st.session_state[key]
298
  st.rerun()
299
 
300
+ # --- 初始化 Hugging Face LLM Client (已更新,MODEL_ID 作為參數) ---
301
+ # 確保 load_inference_client 接受 model_id 作為參數,以利用 Streamlit 的快取機制。
302
+ @st.cache_resource(experimental_allow_widgets=True) # 需要允許快取使用 Streamlit widgets
303
  def load_inference_client(model_id):
304
  if not os.environ.get("HF_TOKEN"): return None
305
  try:
 
313
  inference_client = None
314
  if os.environ.get("HF_TOKEN"):
315
  with st.spinner(f"正在連線到 Inference Client: {MODEL_ID}..."):
316
+ # 傳遞 MODEL_ID
317
+ inference_client = load_inference_client(MODEL_ID)
318
 
319
  if inference_client is None and os.environ.get("HF_TOKEN"):
320
+ st.warning(f"Hugging Face Inference Client **{MODEL_ID}** 無法連線。")
321
  elif not os.environ.get("HF_TOKEN"):
322
  st.error("請在環境變數中設定 HF_TOKEN。")
323
 
 
344
  else:
345
  stringio = io.StringIO(uploaded_file.getvalue().decode("utf-8"))
346
  text_content = stringio.read()
347
+
348
  if not text_content.strip(): return None, "File is empty"
349
+
350
  # 這裡將文件內容按行分割為 Document,每行一個 Document
351
  events = [line for line in text_content.splitlines() if line.strip()]
352
  docs = [Document(page_content=e) for e in events]
353
  if not docs: return None, "No documents created"
354
+
355
  # 進行 Embedding 和 FAISS 初始化 (IndexFlatIP + L2 normalization)
356
  embeddings = embedding_model.embed_documents([d.page_content for d in docs])
357
  embeddings_np = np.array(embeddings).astype("float32")
358
  faiss.normalize_L2(embeddings_np)
359
+
360
  dimension = embeddings_np.shape[1]
361
  index = faiss.IndexFlatIP(dimension) # 使用內積 (Inner Product)
362
  index.add(embeddings_np)
363
+
364
  doc_ids = [str(uuid.uuid4()) for _ in range(len(docs))]
365
  docstore = InMemoryDocstore({_id: doc for _id, doc in zip(doc_ids, docs)})
366
  index_to_docstore_id = {i: _id for i, _id in enumerate(doc_ids)}
367
+
368
  # 使用 Cosine 距離策略,配合 IndexFlatIP 和 L2 normalization 達到 Cosine Similarity
369
  vector_store = FAISS(embedding_function=embedding_model, index=index, docstore=docstore, index_to_docstore_id=index_to_docstore_id, distance_strategy=DistanceStrategy.COSINE)
370
  return vector_store, f"{len(docs)} chunks created."
 
378
  index = vector_store.index
379
  D, I = index.search(q_emb, k=index.ntotal)
380
  selected = []
381
+
382
  # Cosine Similarity = D (IndexFlatIP + L2 normalization)
383
  for score, idx in zip(D[0], I[0]):
384
  if idx == -1: continue
 
387
  doc_id = vector_store.index_to_docstore_id[idx]
388
  doc = vector_store.docstore.search(doc_id)
389
  selected.append((doc, score))
390
+
391
  selected.sort(key=lambda x: x[1], reverse=True)
392
  return selected
393
 
 
395
  def generate_rag_response_hf_for_log(client, model_id, log_sequence_text, user_prompt, sys_prompt, vector_store, threshold, max_output_tokens, temperature, top_p):
396
  if client is None: return "ERROR: Client Error", ""
397
  context_text = ""
398
+
399
  # RAG 檢索邏輯
400
  if vector_store:
401
  selected = faiss_cosine_search_all(vector_store, log_sequence_text, threshold)
 
403
  # 只取前 5 個最相關的片段
404
  retrieved_contents = [f"--- Reference Chunk (sim={score:.3f}) ---\n{doc.page_content}" for i, (doc, score) in enumerate(selected[:5])]
405
  context_text = "\n".join(retrieved_contents)
406
+
407
+ rag_instruction = f"""=== RETRIEVED REFERENCE CONTEXT (Cosine ≥ {threshold}) ==={context_text if context_text else 'No relevant reference context found.'}=== END REFERENCE CONTEXT ===ANALYSIS INSTRUCTION: {user_prompt}Based on the provided LOG SEQUENCE and REFERENCE CONTEXT, you must analyze the **entire sequence** to detect any continuous attack chains or evolving threats."""
 
 
 
 
408
 
409
+ log_content_section = f"""=== CURRENT LOG SEQUENCE TO ANALYZE (Window Size: Max {WINDOW_SIZE} logs associated by IP) ==={log_sequence_text}=== END LOG SEQUENCE ==="""
410
+
 
 
411
  messages = [
412
  {"role": "system", "content": sys_prompt},
413
  {"role": "user", "content": f"{rag_instruction}\n\n{log_content_section}"}
414
  ]
415
+
416
  try:
417
  # 使用 chat_completion 進行模型呼叫
418
  response_stream = client.chat_completion(
 
426
  return response_stream.choices[0].message.content.strip(), context_text
427
  else: return "Format Error: Model returned empty response or invalid format.", context_text
428
  except Exception as e:
429
+ return f"Model Error: {str(e)}", context_text
430
 
431
  # =======================================================================
432
  # === 檔案處理區塊 (RAG 檔案) - 保持不變 ===
 
436
  # 清除舊的 vector store 以節省內存
437
  if 'vector_store' in st.session_state:
438
  del st.session_state.vector_store
439
+
440
  with st.spinner(f"正在建立 RAG 參考知識庫 ({rag_uploaded_file.name})..."):
441
  vs, msg = process_file_to_faiss(rag_uploaded_file)
442
  if vs:
 
454
  # === 檔案處理區塊 (批量分析檔案 - **已更新** ) ===
455
  if batch_uploaded_file:
456
  batch_file_key = f"batch_{batch_uploaded_file.name}_{batch_uploaded_file.size}"
457
+
458
  if st.session_state.batch_current_file_key != batch_file_key or 'json_data_for_batch' not in st.session_state:
459
  try:
460
  # 清除舊的數據
 
462
  del st.session_state.json_data_for_batch
463
  if 'batch_results' in st.session_state:
464
  del st.session_state.batch_results
 
465
  # 使用新的統一解析函式
466
  parsed_data = convert_uploaded_file_to_json_list(batch_uploaded_file)
467
+
468
  if not parsed_data:
469
  raise ValueError(f"{batch_uploaded_file.name} 檔案載入失敗或內容為空。")
470
+
471
  # 儲存處理後的數據
472
  st.session_state.json_data_for_batch = parsed_data
473
  st.session_state.batch_current_file_key = batch_file_key
474
  st.toast(f"檔案已解析並轉換為 {len(parsed_data)} 個 Log 條目。", icon="✅")
475
+
476
  except Exception as e:
477
  st.error(f"檔案解析錯誤: {e}")
478
  if 'json_data_for_batch' in st.session_state:
479
  del st.session_state.json_data_for_batch
480
  st.session_state.batch_current_file_key = None # 設置為 None 避免錯誤的 Key
 
481
  elif 'json_data_for_batch' in st.session_state:
482
  # 檔案被移除,清除相關數據
483
  del st.session_state.json_data_for_batch
 
491
  if st.session_state.execute_batch_analysis and 'json_data_for_batch' in st.session_state and st.session_state.json_data_for_batch is not None:
492
  st.session_state.execute_batch_analysis = False
493
  start_time = time.time()
494
+
495
  # 這裡必須確保 st.session_state.batch_results 是 List,而不是 None
496
  if 'batch_results' not in st.session_state or st.session_state.batch_results is None:
497
  st.session_state.batch_results = []
498
+
499
  st.session_state.batch_results = []
500
+
501
  if inference_client is None:
502
  st.error("Client 未連線,無法執行。")
503
  else:
504
  logs_list = st.session_state.json_data_for_batch
505
+
506
  if logs_list:
507
  vs = st.session_state.get("vector_store", None)
508
+
509
  # 將 Log 條目轉換為 JSON 字串,用於 LLM 輸入
510
  formatted_logs = [json.dumps(log, indent=2, ensure_ascii=False) for log in logs_list]
511
+
512
  analysis_sequences = []
513
+
514
  # --- 核心修改:基於 IP 關聯的 Log Sequence 建構 ---
515
  for i in range(len(formatted_logs)):
516
  current_log_entry = logs_list[i]
517
  current_log_str = formatted_logs[i]
518
+
519
  # 嘗試從當前 Log 條目中提取 IP 地址 (優先 W3C 格式,然後是一般日誌格式)
520
  # 使用者可以根據自己的日誌格式調整這裡的 Key
521
  target_ip = current_log_entry.get('c_ip') or current_log_entry.get('c-ip') or current_log_entry.get('remote_addr') or current_log_entry.get('source_ip')
522
+
523
  sequence_text = []
524
  correlated_logs = []
525
+
526
  if target_ip and target_ip != "-": # 假設 '-' 是 W3C 中的空值
527
+
528
  # 篩選過去的 Log,最多 WINDOW_SIZE - 1 個,且 IP 必須匹配
529
  # 從 i-1 倒序檢查到 0
530
  for j in range(i - 1, -1, -1):
531
  prior_log_entry = logs_list[j]
532
  prior_ip = prior_log_entry.get('c_ip') or prior_log_entry.get('c-ip') or prior_log_entry.get('remote_addr') or prior_log_entry.get('source_ip')
533
+
534
  # 檢查 IP 是否匹配
535
  if prior_ip == target_ip:
536
  # 插入到最前面,保持時間順序
537
  correlated_logs.insert(0, formatted_logs[j])
538
+
539
  # 限制累積的 Log 數量(不包含當前 Log)
540
  if len(correlated_logs) >= WINDOW_SIZE - 1:
541
  break
542
+
543
  # 1. 加入相關聯的 Log (時間較早的)
544
  for j, log_str in enumerate(correlated_logs):
545
  # log_idx 是這些 Log 在 logs_list 中的原始索引 (不完全準確,但提供參考)
546
  sequence_text.append(f"--- Correlated Log Index (IP:{target_ip}) ---\n{log_str}")
547
+
548
  else:
549
  # 如果沒有找到 IP,只分析當前 Log (確保 sequence_text 不是空的)
550
  st.warning(f"Log #{i+1} 找不到 IP 欄位 ({target_ip}),僅分析當前 Log 條目。")
551
 
552
  # 2. 加入當前的目標 Log
553
  sequence_text.append(f"--- TARGET LOG TO ANALYZE (Index {i+1}) ---\n{current_log_str}")
554
+
555
  analysis_sequences.append({
556
  "sequence_text": "\n\n".join(sequence_text),
557
  "target_log_id": i + 1,
 
563
  st.header(f"⚡ 批量分析執行中 (IP 關聯視窗 $N={WINDOW_SIZE}$)...")
564
  progress_bar = st.progress(0, text=f"準備處理 {total_sequences} 個序列...")
565
  results_container = st.container()
566
+
567
  for i, seq_data in enumerate(analysis_sequences):
568
  log_id = seq_data["target_log_id"]
569
  progress_bar.progress((i + 1) / total_sequences, text=f"Processing {i + 1}/{total_sequences} (Log #{log_id})...")
570
+
571
  try:
572
  response, retrieved_ctx = generate_rag_response_hf_for_log(
573
  client=inference_client,
 
581
  temperature=temperature,
582
  top_p=top_p
583
  )
584
+
585
  item = {
586
  "log_id": log_id,
587
  "log_content": seq_data["original_log_entry"],
 
589
  "analysis_result": response,
590
  "context": retrieved_ctx
591
  }
592
+
593
  st.session_state.batch_results.append(item)
594
+
595
  with results_container:
596
  st.subheader(f"Log/Alert #{item['log_id']} (IP Correlated Analysis)")
597
+
598
  with st.expander("序列內容 (JSON Format)"):
599
  st.code(item["sequence_analyzed"], language='json')
600
+
601
  # 呈現 LLM 分析結果
602
  is_high = any(x in response.lower() for x in ['high-risk detected'])
603
  if is_high:
604
+ st.error(item['analysis_result'])
605
  else:
606
  # 增加 Medium 判斷
607
  is_medium = any(x in response.lower() for x in ['medium-risk detected'])
 
609
  st.warning(item['analysis_result'])
610
  else:
611
  st.info(item['analysis_result'])
612
+
613
  if item['context']:
614
  with st.expander("參考 RAG 片段"): st.code(item['context'])
615
  st.markdown("---")
616
+
617
  except Exception as e:
618
  st.error(f"Error Log {log_id}: {e}")
619
+
620
  end_time = time.time()
621
  progress_bar.empty()
622
  st.success(f"完成!耗時 {end_time - start_time:.2f} 秒。")
 
626
  # === 顯示結果 (歷史紀錄) - 保持不變,但加固了 session state 檢查 ===
627
  if st.session_state.get("batch_results") and isinstance(st.session_state.batch_results, list) and st.session_state.batch_results and not st.session_state.execute_batch_analysis:
628
  st.header("⚡ 歷史分析結果")
629
+
630
  high_risk_data = []
631
  high_risk_items = []
632
+
633
  for item in st.session_state.batch_results:
634
  # 檢查 analysis_result 中是否包含 'High-risk detected' (不區分大小寫)
635
  is_high_risk = 'high-risk detected!' in item['analysis_result'].lower()
636
+
637
  if is_high_risk:
638
  high_risk_items.append(item)
639
+
640
  # --- 為 CSV 報告準備數據 ---
641
  log_content_str = json.dumps(item["log_content"], ensure_ascii=False)
642
  analysis_result_clean = item['analysis_result'].replace('\n', ' | ')
643
+
644
  high_risk_data.append({
645
  "Log_ID": item['log_id'],
646
  "Risk_Level": "HIGH_RISK",
647
  "Log_Content": log_content_str,
648
  "AI_Analysis_Result": analysis_result_clean
649
  })
650
+
651
  # 顯示 High-Risk 報告的下載按鈕 (改為 CSV 邏輯)
652
  if high_risk_items:
653
  st.success(f"✅ 檢測到 {len(high_risk_items)} 條高風險 Log/Alert。")
654
+
655
  # --- 構建 CSV 內容 ---
656
  csv_output = io.StringIO()
657
  csv_output.write("Log_ID,Risk_Level,Log_Content,AI_Analysis_Result\n")
658
+
659
  def escape_csv(value):
660
  # 替換內容中的所有雙引號為兩個雙引號,然後用雙引號包圍
661
  return f'"{str(value).replace('"', '""')}"'
662
+
663
  for row in high_risk_data:
664
  line = ",".join([
665
  str(row["Log_ID"]),
 
668
  escape_csv(row["AI_Analysis_Result"])
669
  ]) + "\n"
670
  csv_output.write(line)
671
+
672
  csv_content = csv_output.getvalue()
673
+
674
  # 顯示 CSV 報告的下載按鈕
675
  st.download_button(
676
+ "📥 下載 **高風險** 分析報告 (.csv)",
677
+ csv_content,
678
+ "high_risk_report.csv",
679
+ "text/csv"
680
  )
681
  else:
682
  st.info("👍 未檢測到任何標註為 High-risk detected 的 Log/Alert。")