File size: 2,585 Bytes
2483751
 
 
 
 
 
20f8090
6e0c450
 
20f8090
6e0c450
 
 
 
 
 
 
ed2cbf7
2483751
6e0c450
20f8090
6e0c450
 
 
 
 
 
20f8090
6e0c450
 
 
 
 
 
 
20f8090
6e0c450
20f8090
6e0c450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20f8090
6e0c450
 
 
 
 
 
 
 
20f8090
 
6e0c450
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from huggingface_hub import HfApi
import os

# Clear the cache for your model
os.system("rm -rf ~/.cache/huggingface/hub/models--dsuyu1--rascal")

import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Load model and tokenizer
model_name = "dsuyu1/rascal"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16,
    device_map="auto",
    load_in_8bit=True,
    use_auth_token=False
)

def generate_playbook(incident_type, target_asset, detection_source, 
                     initial_vector, severity, tactics):
    """Generate incident response playbook"""
    
    prompt = f"""### Instruction:
Generate an incident response playbook for the following incident.

### Input:
Incident Type: {incident_type}
Target Asset: {target_asset}
Detection Source: {detection_source}
Initial Vector: {initial_vector}
Severity: {severity}
Tactics & Techniques: {tactics}

### Response:
"""
    
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=512,
            temperature=0.7,
            top_p=0.9,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id
        )
    
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    # Extract just the response part
    return response.split("### Response:")[1].strip()

# Create Gradio interface
demo = gr.Interface(
    fn=generate_playbook,
    inputs=[
        gr.Textbox(label="Incident Type", placeholder="e.g., Ransomware"),
        gr.Textbox(label="Target Asset", placeholder="e.g., Windows AD Server"),
        gr.Textbox(label="Detection Source", placeholder="e.g., EDR Alert"),
        gr.Textbox(label="Initial Vector", placeholder="e.g., Email Phishing"),
        gr.Dropdown(["Low", "Medium", "High", "Critical"], label="Severity"),
        gr.Textbox(label="Tactics & Techniques", 
                  placeholder="e.g., Initial Access: Phishing, Execution: User Execution")
    ],
    outputs=gr.Textbox(label="Generated Playbook", lines=15),
    title="🚨 RASCAL - Incident Response Playbook Generator",
    description="Generate detailed incident response playbooks based on incident details.",
    examples=[
        ["Ransomware", "Windows AD Server", "EDR Alert - Encryption", 
         "Email Phishing", "High", 
         "Initial Access: Phishing, Execution: User Execution, Impact: Data Encrypted"]
    ]
)

demo.launch()