watchtowerai-log-analyzer / streamlit_app.py
Tuathe's picture
Update streamlit_app.py
4ce11fa verified
import streamlit as st
import os
import hashlib
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
from upload_to_s3 import upload_file_to_s3
from notify_slack import send_slack_alert
# Load model
model_path = "Tuathe/codementor-flan-watchtower"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
# Secrets from Hugging Face Spaces
aws_key = st.secrets["AWS_ACCESS_KEY_ID"]
aws_secret = st.secrets["AWS_SECRET_ACCESS_KEY"]
aws_region = st.secrets["AWS_REGION"]
bucket_name = st.secrets["S3_BUCKET_NAME"]
slack_url = st.secrets["SLACK_WEBHOOK_URL"]
# Fallback classifier
def fallback_label(log):
log_lower = log.lower()
if "login" in log_lower and "failed" in log_lower:
return "SECURITY"
elif "error" in log_lower or "failed" in log_lower:
return "ERROR"
elif "timeout" in log_lower or "not responding" in log_lower:
return "CRITICAL"
elif "cpu" in log_lower or "memory" in log_lower:
return "WARNING"
else:
return "INFO"
# Classifier
def classify_log(log):
prompt = f"""Classify this log message as one of: INFO, WARNING, ERROR, CRITICAL, SECURITY.
Log: {log}
Label:"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=3)
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().upper()
valid_labels = {"INFO", "WARNING", "ERROR", "CRITICAL", "SECURITY"}
return prediction if prediction in valid_labels else fallback_label(log), ("Model" if prediction in valid_labels else "Fallback")
# Runbook generator
def generate_runbook(log_text, label):
prompt = f"""
You are an expert SRE. Create a step-by-step runbook in markdown format for the following {label} log.
Log message: "{log_text}"
Include:
1. Summary
2. Possible causes
3. Troubleshooting steps
4. Mitigation actions
5. Responsible team or escalation
Only output valid markdown text.
"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=300)
runbook_text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
hash_id = hashlib.md5(log_text.encode()).hexdigest()[:8]
runbook_path = f"runbooks/runbook_{hash_id}.md"
os.makedirs("runbooks", exist_ok=True)
with open(runbook_path, "w", encoding="utf-8") as f:
f.write(runbook_text)
return runbook_path, runbook_text
# Streamlit UI
st.set_page_config(page_title="WatchTowerAI", layout="centered")
st.title(" WatchTowerAI - Log Classification + Runbook Generator")
log_input = st.text_input(" Enter a log message")
uploaded_file = st.file_uploader(" Or upload a .log file", type=["txt", "log"])
if st.button(" Classify + Generate Runbook"):
logs = []
if log_input:
logs.append(log_input.strip())
if uploaded_file:
content = uploaded_file.read().decode("utf-8")
logs.extend([line.strip() for line in content.splitlines() if line.strip()])
if not logs:
st.warning("Please enter a log or upload a file.")
else:
for log in logs:
with st.spinner(f"Processing: {log}"):
label, source = classify_log(log)
st.markdown(f"**Classification:** `{label}` via `{source}`")
st.markdown(f"**Log:** {log}")
if label in {"CRITICAL", "SECURITY"}:
runbook_path, runbook_md = generate_runbook(log, label)
s3_path = runbook_path.replace("\\", "/")
success = upload_file_to_s3(runbook_path, bucket_name, s3_path, aws_key, aws_secret, aws_region)
if success:
s3_url = f"https://{bucket_name}.s3.{aws_region}.amazonaws.com/{s3_path}"
send_slack_alert(log, s3_url)
st.success(" Slack alert sent.")
st.markdown(f"[ View Runbook on S3]({s3_url})")
st.download_button(" Download Runbook", runbook_md, file_name=os.path.basename(runbook_path))
else:
st.error(" Failed to upload runbook to S3.")
else:
st.info(" No runbook generated for this log.")