import streamlit as st from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification import torch import pypdf import pandas as pd import torch._dynamo import re # --- VIZUALIZACE --- from streamlit_agraph import agraph, Node, Edge, Config # --- GLIREL IMPORT --- from glirel import GLiREL # Potlačení chyb pro Windows torch._dynamo.config.suppress_errors = True st.set_page_config(page_title="CTI Intelligence Suite", page_icon="🛡️", layout="wide") # ========================================== # 1. NAČÍTÁNÍ MODELŮ # ========================================== @st.cache_resource def load_ner_model(): """ Načte SecureModernBERT pro entity. """ device = 0 if torch.cuda.is_available() else -1 model_name = "attack-vector/SecureModernBERT-NER" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForTokenClassification.from_pretrained(model_name) pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple", device=device) return pipe @st.cache_resource def load_glirel_model(): """ Načte GLiREL pro vztahy. """ device = "cuda" if torch.cuda.is_available() else "cpu" try: model = GLiREL.from_pretrained("jackboyla/glirel-large-v0") model.to(device) model.eval() return model except Exception as e: st.error(f"Nepodařilo se stáhnout model GLiREL: {e}") return None # ========================================== # 2. LOGIKA ZPRACOVÁNÍ TEXTU # ========================================== def extract_text_from_pdf(uploaded_file): try: pdf_reader = pypdf.PdfReader(uploaded_file) text = "" for page in pdf_reader.pages: t = page.extract_text() if t: text += t + "\n\n" return text except Exception as e: st.error(f"Error reading PDF: {e}") return "" def analyze_ner_batched(pipeline, text, batch_size=8): """ Projde CELÝ text po kusech (nezkracuje ho), aby našel všechny entity. """ chunk_size = 4000 results = [] for i in range(0, len(text), chunk_size): chunk = text[i : i + chunk_size] if not chunk.strip(): continue # Analýza kusu chunk_results = pipeline(chunk) # Posun indexů for entity in chunk_results: entity['start'] += i entity['end'] += i results.append(entity) return results def merge_close_entities(results, original_text, max_char_distance=2): if not results: return [] merged = [] current = results[0].copy() for next_entity in results[1:]: gap_start = current['end'] gap_end = next_entity['start'] if gap_start > gap_end: gap_start = gap_end gap_text = original_text[gap_start:gap_end] if (current['entity_group'] == next_entity['entity_group'] and len(gap_text) <= max_char_distance and "." not in gap_text and "," not in gap_text): current['end'] = next_entity['end'] current['score'] = max(current['score'], next_entity['score']) else: merged.append(current) current = next_entity.copy() merged.append(current) return merged # ========================================== # 3. FILTRACE VĚT (SMART FILTER) # ========================================== def filter_text_smartly(text, ner_results): """ Vezme celý text a výsledky NERu. Vrátí pouze ty věty, které obsahují alespoň jednu entitu. """ sentence_spans = [] for match in re.finditer(r'[^.!?]+[.!?]', text): sentence_spans.append((match.start(), match.end(), match.group())) if not sentence_spans and text: sentence_spans.append((0, len(text), text)) relevant_sentences = [] for s_start, s_end, s_text in sentence_spans: has_entity = False for ent in ner_results: if ent['start'] >= s_start and ent['end'] <= s_end: has_entity = True break if has_entity: relevant_sentences.append(s_text.strip()) clean_text = " ".join(relevant_sentences) # Pojistka: Limit pro GLiREL return clean_text[:3000] # ========================================== # 4. GLIREL LOGIKA (CLEAN & ROBUST) # ========================================== def align_and_predict_relations(glirel_model, text, ner_results, threshold=0.4): if not glirel_model: return [] tokens = text.split() token_spans = [] curr = 0 for t in tokens: start = text.find(t, curr) if start == -1: start = curr end = start + len(t) token_spans.append((start, end)) curr = end glirel_ner = [] token_to_full_entity = {} for ent in ner_results: c_start, c_end = ent['start'], ent['end'] full_name = text[c_start:c_end].strip() t_start, t_end = -1, -1 for i, (ts, te) in enumerate(token_spans): if ts >= c_start and t_start == -1: t_start = i if te <= c_end: t_end = i if t_start != -1 and t_end != -1: glirel_ner.append([t_start, t_end, ent['entity_group'], full_name]) for t_idx in range(t_start, t_end + 1): token_to_full_entity[t_idx] = full_name if not glirel_ner: return [] labels = [ "uses", "targets", "communicates_with", "drops", "located_at", "attributed_to", "exploits", "compromises", "downloads", "resolves_to", "variant_of" ] try: relations = glirel_model.predict_relations( tokens, labels, threshold=threshold, ner=glirel_ner, top_k=3 ) except Exception as e: st.error(f"GLiREL Error: {e}") return [] # Pomocná funkce na čištění textu (odstraní [], Unknown, prázdné stringy) def clean_entity_text(raw_val): if raw_val is None: return None if isinstance(raw_val, list): if not raw_val: return None raw_val = " ".join([str(x) for x in raw_val]) text_val = str(raw_val).strip() if text_val in ["", "[]", "['']", "Unknown", "None"]: return None return text_val best_relations = {} for rel in relations: head_idx = rel['head_pos'][0] tail_idx = rel['tail_pos'][0] raw_head = token_to_full_entity.get(head_idx, rel.get('head_text')) raw_tail = token_to_full_entity.get(tail_idx, rel.get('tail_text')) head = clean_entity_text(raw_head) tail = clean_entity_text(raw_tail) if head and tail and head != tail: current_score = rel['score'] relation_label = rel['label'] pair = sorted([head, tail]) unique_key = (pair[0], pair[1], relation_label) if unique_key not in best_relations: best_relations[unique_key] = { "source": head, "target": tail, "relation": relation_label, "confidence": current_score } else: if current_score > best_relations[unique_key]['confidence']: best_relations[unique_key] = { "source": head, "target": tail, "relation": relation_label, "confidence": current_score } return list(best_relations.values()) # ========================================== # 5. UI APLIKACE # ========================================== with st.sidebar: st.title("🧭 Navigation") page = st.radio("Go to:", ["Analyzer", "Visualizations"]) st.markdown("---") if page == "Analyzer": st.subheader("⚙️ Settings") confidence_threshold = st.slider( "Relation Confidence (%)", min_value=0, max_value=100, value=59, help="Zobrazí jen vztahy, kde si je model jistý na více než X %." ) if page == "Analyzer": st.title("CTI Analyzer") with st.spinner("Loading models..."): ner_pipe = load_ner_model() col1, col2 = st.columns([1, 2]) with col1: st.subheader("📂 Input") uploaded_file = st.file_uploader("Upload PDF Report", type=["pdf"]) with col2: # ZMĚNA ZDE: NOVÝ DEFAULT TEXT default_text = r"""Lazarus Group, often linked to the North Korean government, has been observed targeting the financial sector and cryptocurrency exchanges in Japan. The threat actor uses AppleJeus malware to infiltrate networks. The malware was found located at C:\Windows\Temp\update.exe. Security researchers attributed this campaign to Hidden Cobra. In a recent incident, Lazarus Group also targeted Sony Pictures.""" if uploaded_file: with st.spinner("Reading PDF..."): txt = extract_text_from_pdf(uploaded_file) st.info(f"Loaded {len(txt)} characters from PDF.") st.text_area("Preview", value=txt[:500] + "...", height=150, disabled=True) else: txt = st.text_area("Or Paste Text Here", value=default_text, height=150) st.divider() if st.button("Analyze", type="primary"): if not txt.strip(): st.warning("Please enter some text or upload a PDF.") else: with st.status("Running analysis...") as status: # 1. NER - SCAN CELÉHO DOKUMENTU status.write("1. Scanning full document for Entities...") full_raw_ents = analyze_ner_batched(ner_pipe, txt) # 2. FILTRACE TEXTU status.write("2. Selecting key sentences...") optimized_text = filter_text_smartly(txt, full_raw_ents) if not optimized_text: optimized_text = txt[:2000] st.info(f"Text optimized from {len(txt)} to {len(optimized_text)} chars.") # 3. RE-ALIGNMENT status.write("3. Re-aligning entities...") final_raw_ents = analyze_ner_batched(ner_pipe, optimized_text) final_ents = merge_close_entities(final_raw_ents, optimized_text) # 4. GLIREL status.write("4. Extracting Relations...") glirel = load_glirel_model() threshold_float = confidence_threshold / 100.0 rels = align_and_predict_relations(glirel, optimized_text, final_ents, threshold=threshold_float) status.update(label="Done!", state="complete") # Uložení výsledků df_ents = pd.DataFrame([{ "Entity": optimized_text[e['start']:e['end']], "Type": e['entity_group'], "Confidence": e['score'] } for e in final_ents]) # Konverze NER dat na JSON string pro download ner_json = df_ents.to_json(orient="records", indent=4) # Čistění duplicit ve vztazích df_rels = pd.DataFrame(rels) if not df_rels.empty: df_rels = df_rels.drop_duplicates(subset=["source", "target", "relation"]) st.session_state['data'] = df_ents st.session_state['rels'] = df_rels c1, c2 = st.columns(2) with c1: st.subheader(f"Entities ({len(df_ents)})") st.dataframe(df_ents, use_container_width=True) # --- TLAČÍTKO PRO DOWNLOAD JSON --- st.download_button( label="📥 Download NER JSON", data=ner_json, file_name="ner_entities.json", mime="application/json" ) # ---------------------------------- with c2: st.subheader(f"Relations ({len(df_rels)})") st.dataframe(st.session_state['rels'], use_container_width=True) elif page == "Visualizations": st.title("Knowledge Graph") if 'data' in st.session_state and not st.session_state['data'].empty: nodes, edges = [], [] added = set() type_colors = {"MALWARE": "#ff4b4b", "ACTOR": "#ffa421", "TOOL": "#1c83e1", "IP": "#21c354"} for _, row in st.session_state['data'].iterrows(): ent = row['Entity'] if ent not in added: color = type_colors.get(row['Type'], "#888") nodes.append(Node(id=ent, label=ent, size=20, color=color)) added.add(ent) if 'rels' in st.session_state and not st.session_state['rels'].empty: for _, row in st.session_state['rels'].iterrows(): if row['source'] not in added: nodes.append(Node(id=row['source'], label=row['source'], size=20, color="#888")) added.add(row['source']) if row['target'] not in added: nodes.append(Node(id=row['target'], label=row['target'], size=20, color="#888")) added.add(row['target']) edges.append(Edge( source=row['source'], target=row['target'], label=row['relation'], color="red", arrows="to" )) config = Config(width="100%", height=600, directed=True, physics=True) agraph(nodes=nodes, edges=edges, config=config) else: st.warning("No data found. Please run analysis first.")