File size: 2,230 Bytes
e908c46
7df9b2b
 
 
 
e908c46
 
 
 
7df9b2b
e908c46
7df9b2b
 
 
 
 
 
 
 
 
 
 
e908c46
7df9b2b
 
 
 
 
 
 
 
 
e908c46
7df9b2b
 
e908c46
7df9b2b
 
 
 
e908c46
7df9b2b
e908c46
7df9b2b
e908c46
7df9b2b
e908c46
7df9b2b
 
e908c46
7df9b2b
e908c46
 
 
 
7df9b2b
 
 
 
 
 
 
 
 
e908c46
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from huggingface_hub import InferenceClient, login
from transformers import TextStreamer
import torch
from unsloth import FastLanguageModel

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
login()

model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "aayushpuri01/Llama-3.1-8B-Threat-Intelligent-v2",
    max_seq_length = 2048,
    device_map = "auto"
)


FastLanguageModel.for_inference(model)

prompt_style = """You are a cybersecurity genius and expert threat hunter and analyst who can answer about any level of cybersecurity scenarios.
Based on the given Instruction and Input, generate appropriate Output.

### Instruction:
Please analyse the given scenario, provide diagnosis of the situation in between <diagnosis></diagnosis>. Write Solutions in between <solution></solution>.

### Input:
{}

### Output:
{}
"""

def generate_response(scenario):
    formatted_prompt = prompt_style.format(scenario, "")

    inputs = tokenizer(
        [formatted_prompt],
        return_tensors = "pt",
    ).to("cuda" if torch.cuda.is_available() else "cpu")

    text_streamer = TextStreamer(tokenizer)

    outputs = model.generate(**inputs, streamer=text_streamer, max_new_tokens=1028)

    response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    output_start = response.find("### Output:") + len("### Output:\n")
    output_text = response[output_start:].strip()

    return output_text
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
    fn = generate_response,
    inputs=gr.Textbox(
        label="Cyberthreat scenario",
        placeholder="Enter a scene (e.g, 'Cryptowall 2.0 began using the Tor anonymity network...')",
        lines=5),
    outputs = gr.Markdown(label="Analysis and Solutions"),
    title = "Threat Intelligence with Llama-3.1-8B",
    description = "Enter a cybersecurity scenario to get a detailed analysis and solutions from a fine tuned LLM model",
    theme = "huggingface"
)


if __name__ == "__main__":
    demo.launch()