Hmehdi515 commited on
Commit
a61db6a
·
verified ·
1 Parent(s): bd7bfb6

Upload 5 files

Browse files
Files changed (5) hide show
  1. demo.py +161 -0
  2. evaluate.py +203 -0
  3. split_dataset.py +80 -0
  4. test.py +140 -0
  5. train.py +196 -0
demo.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from unsloth import FastLanguageModel
3
+ from transformers import TextStreamer
4
+ import torch
5
+ import random
6
+ import logging
7
+
8
+ def setup_logging():
9
+ logging.basicConfig(
10
+ level=logging.INFO,
11
+ format='%(asctime)s - %(levelname)s - %(message)s'
12
+ )
13
+ return logging.getLogger(__name__)
14
+
15
+ def print_separator(title="", char="=", length=80):
16
+ """Print a separator with optional title"""
17
+ if title:
18
+ side_length = (length - len(title) - 2) // 2
19
+ print(char * side_length + f" {title} " + char * side_length)
20
+ else:
21
+ print(char * length)
22
+
23
+ def format_flow_prompt(row):
24
+ """Format a single network flow into a prompt"""
25
+ flow_text = f"""Network Flow Description:
26
+ Source: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']})
27
+ Destination: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']})
28
+ Protocol Information:
29
+ - Protocol ID: {row['PROTOCOL']}
30
+ - Layer 7 Protocol: {row['L7_PROTO']}
31
+ - TCP Flags: {row['TCP_FLAGS']}
32
+ Traffic Metrics:
33
+ - Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound
34
+ - Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound
35
+ - Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds"""
36
+
37
+ # Format in LLaMA-3 style
38
+ return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
39
+ Analyze this network flow for potential security threats:
40
+
41
+ {flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
42
+
43
+ def analyze_single_flow(model_path="cybersec_model_output/checkpoint-4329",
44
+ test_file="data/test.csv",
45
+ index=None,
46
+ attack_type=None):
47
+ """Analyze a single network flow and show the model's complete response"""
48
+ logger = setup_logging()
49
+
50
+ print_separator("LOADING DATA AND MODEL", "=")
51
+ # Load test data
52
+ logger.info(f"Loading test data from {test_file}")
53
+ test_df = pd.read_csv(test_file)
54
+
55
+ # Select sample based on criteria
56
+ if attack_type:
57
+ attack_samples = test_df[test_df['Attack'].str.lower() == attack_type.lower()]
58
+ if len(attack_samples) == 0:
59
+ raise ValueError(f"No samples found for attack type: {attack_type}")
60
+ sample = attack_samples.iloc[random.randint(0, len(attack_samples)-1)]
61
+ elif index is not None:
62
+ sample = test_df.iloc[index]
63
+ else:
64
+ sample = test_df.iloc[random.randint(0, len(test_df)-1)]
65
+
66
+ # Load model
67
+ logger.info(f"Loading model from {model_path}")
68
+ model, tokenizer = FastLanguageModel.from_pretrained(
69
+ model_path,
70
+ max_seq_length=2048,
71
+ load_in_4bit=True,
72
+ )
73
+
74
+ # Set up tokenizer
75
+ tokenizer.pad_token = tokenizer.eos_token
76
+ tokenizer.padding_side = "right"
77
+ FastLanguageModel.for_inference(model)
78
+
79
+ print_separator("SAMPLE INFORMATION", "=")
80
+ logger.info(f"Selected flow index: {sample.name}")
81
+ logger.info(f"True label: {sample['Attack']}")
82
+
83
+ # Prepare input
84
+ prompt = format_flow_prompt(sample)
85
+ inputs = tokenizer(
86
+ prompt,
87
+ return_tensors="pt",
88
+ truncation=True,
89
+ max_length=2048
90
+ ).to("cuda")
91
+
92
+ # Set up streamer for real-time output
93
+ streamer = TextStreamer(tokenizer)
94
+
95
+ with open("model_output.txt", "w") as f:
96
+ # Write separators and metadata
97
+ f.write("=" * 80 + "\n")
98
+ f.write(f"NETWORK FLOW ANALYSIS\n")
99
+ f.write("=" * 80 + "\n\n")
100
+
101
+ f.write("-" * 80 + "\n")
102
+ f.write("METADATA\n")
103
+ f.write("-" * 80 + "\n")
104
+ f.write(f"Flow Index: {sample.name}\n")
105
+ f.write(f"True Label: {sample['Attack']}\n\n")
106
+
107
+ f.write("-" * 80 + "\n")
108
+ f.write("INPUT PROMPT\n")
109
+ f.write("-" * 80 + "\n")
110
+ f.write(f"{prompt}\n\n")
111
+
112
+ print_separator("MODEL OUTPUT", "=")
113
+ logger.info("Generating analysis...")
114
+
115
+ # Generate and capture output
116
+ outputs = model.generate(
117
+ **inputs,
118
+ max_new_tokens=256,
119
+ streamer=streamer,
120
+ use_cache=True
121
+ )
122
+
123
+ # Write the complete output
124
+ f.write("-" * 80 + "\n")
125
+ f.write("COMPLETE OUTPUT (including special tokens)\n")
126
+ f.write("-" * 80 + "\n")
127
+ full_output = tokenizer.decode(outputs[0], skip_special_tokens=False)
128
+ f.write(f"{full_output}\n\n")
129
+
130
+ # Write cleaned output
131
+ f.write("-" * 80 + "\n")
132
+ f.write("CLEANED OUTPUT (without special tokens)\n")
133
+ f.write("-" * 80 + "\n")
134
+ cleaned_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
135
+ f.write(cleaned_output)
136
+
137
+ f.write("\n" + "=" * 80 + "\n")
138
+
139
+ print_separator()
140
+ logger.info("Output saved to model_output.txt")
141
+ print_separator()
142
+
143
+ def main():
144
+ # Example usage:
145
+ print_separator("STARTING ANALYSIS", "=")
146
+
147
+ # Choose one of these options:
148
+
149
+ # 1. Random sample
150
+ analyze_single_flow()
151
+
152
+ # 2. Specific attack type
153
+ # analyze_single_flow(attack_type="ddos")
154
+
155
+ # 3. Specific index
156
+ # analyze_single_flow(index=42)
157
+
158
+ print_separator("ANALYSIS COMPLETE", "=")
159
+
160
+ if __name__ == "__main__":
161
+ main()
evaluate.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
4
+ import numpy as np
5
+ import re
6
+ from typing import Dict, List, Tuple
7
+ import logging
8
+ from collections import defaultdict
9
+
10
+ def setup_logging():
11
+ logging.basicConfig(
12
+ level=logging.INFO,
13
+ format='%(asctime)s - %(levelname)s - %(message)s'
14
+ )
15
+ return logging.getLogger(__name__)
16
+
17
+ class ModelEvaluator:
18
+ def __init__(self):
19
+ self.logger = setup_logging()
20
+ self.attack_types = {
21
+ 'benign': ['benign', 'normal', 'legitimate'],
22
+ 'dos': ['dos', 'denial of service'],
23
+ 'ddos': ['ddos', 'distributed denial of service'],
24
+ 'injection': ['injection', 'sql injection', 'command injection'],
25
+ 'scanning': ['scanning', 'port scan', 'network scan'],
26
+ 'password': ['password', 'brute force', 'credential'],
27
+ 'mitm': ['mitm', 'man in the middle'],
28
+ 'backdoor': ['backdoor'],
29
+ 'ransomware': ['ransomware'],
30
+ 'xss': ['xss', 'cross site scripting']
31
+ }
32
+
33
+ def extract_prediction(self, analysis_text: str) -> str:
34
+ """Extract the predicted attack type from model output"""
35
+ analysis_text = analysis_text.lower()
36
+
37
+ # First check if there's an explicit classification
38
+ match = re.search(r"classified as (\w+)", analysis_text)
39
+ if match:
40
+ return match.group(1)
41
+
42
+ # Check for each attack type in the text
43
+ for attack_type, keywords in self.attack_types.items():
44
+ for keyword in keywords:
45
+ if keyword in analysis_text:
46
+ return attack_type
47
+
48
+ # Default to benign if no attack type is found
49
+ return 'benign'
50
+
51
+ def evaluate_results(self, results_file: str, test_data_file: str) -> Dict:
52
+ """Evaluate model predictions against ground truth"""
53
+ # Load results and test data
54
+ with open(results_file, 'r') as f:
55
+ results = json.load(f)
56
+ test_df = pd.read_csv(test_data_file)
57
+
58
+ # Extract predictions and true labels
59
+ predictions = []
60
+ true_labels = []
61
+ confidence_scores = defaultdict(list)
62
+
63
+ for result in results:
64
+ flow_id = result['flow_id']
65
+ true_label = test_df.iloc[flow_id]['Attack'].lower()
66
+ prediction = self.extract_prediction(result['analysis'])
67
+
68
+ predictions.append(prediction)
69
+ true_labels.append(true_label)
70
+
71
+ # Track prediction confidence
72
+ confidence = 1.0
73
+ if 'possibly' in result['analysis'].lower() or 'potential' in result['analysis'].lower():
74
+ confidence = 0.7
75
+ if 'suspicious' in result['analysis'].lower():
76
+ confidence = 0.8
77
+ confidence_scores[prediction].append(confidence)
78
+
79
+ # Calculate metrics
80
+ accuracy = float(accuracy_score(true_labels, predictions))
81
+ class_report = classification_report(true_labels, predictions,
82
+ labels=list(self.attack_types.keys()),
83
+ output_dict=True)
84
+ conf_matrix = confusion_matrix(true_labels, predictions,
85
+ labels=list(self.attack_types.keys()))
86
+
87
+ # Calculate average confidence per class
88
+ avg_confidence = {k: float(np.mean(v)) if v else 0.0
89
+ for k, v in confidence_scores.items()}
90
+
91
+ # Analyze error patterns
92
+ error_analysis = self.analyze_errors(true_labels, predictions, results)
93
+
94
+ # Prepare metrics for JSON serialization
95
+ metrics = {
96
+ 'accuracy': accuracy,
97
+ 'classification_report': {},
98
+ 'confusion_matrix': conf_matrix.tolist(),
99
+ 'average_confidence': avg_confidence,
100
+ 'error_analysis': error_analysis
101
+ }
102
+
103
+ # Process classification report for JSON serialization
104
+ for class_name, metrics_dict in class_report.items():
105
+ if isinstance(metrics_dict, dict):
106
+ metrics['classification_report'][class_name] = {
107
+ k: float(v) for k, v in metrics_dict.items()
108
+ if isinstance(v, (int, float, np.number))
109
+ }
110
+
111
+ return metrics
112
+
113
+ def analyze_errors(self, true_labels: List[str], predictions: List[str],
114
+ results: List[Dict]) -> Dict:
115
+ """Analyze patterns in model errors"""
116
+ error_patterns = {
117
+ 'false_positives': defaultdict(list),
118
+ 'false_negatives': defaultdict(list),
119
+ }
120
+
121
+ for idx, (true, pred, result) in enumerate(zip(true_labels, predictions, results)):
122
+ if true != pred:
123
+ # Track the specific flow details for this error
124
+ flow_details = {
125
+ 'flow_id': result['flow_id'],
126
+ 'source_ip': result['source_ip'],
127
+ 'destination_ip': result['destination_ip'],
128
+ 'model_explanation': result['analysis']
129
+ }
130
+
131
+ # Categorize error type
132
+ if true == 'benign':
133
+ error_patterns['false_positives'][pred].append(flow_details)
134
+ else:
135
+ error_patterns['false_negatives'][true].append(flow_details)
136
+
137
+ # Convert defaultdict to regular dict for JSON serialization
138
+ return {
139
+ 'false_positives': dict(error_patterns['false_positives']),
140
+ 'false_negatives': dict(error_patterns['false_negatives'])
141
+ }
142
+
143
+ def print_evaluation_summary(self, metrics: Dict):
144
+ """Print a human-readable summary of evaluation metrics"""
145
+ self.logger.info("\n=== Model Evaluation Summary ===")
146
+
147
+ # Overall Accuracy
148
+ self.logger.info(f"\nOverall Accuracy: {metrics['accuracy']:.4f}")
149
+
150
+ # Per-class Performance
151
+ self.logger.info("\nPer-class Performance:")
152
+ class_metrics = metrics['classification_report']
153
+ for class_name in self.attack_types.keys():
154
+ if class_name in class_metrics:
155
+ metrics_dict = class_metrics[class_name]
156
+ self.logger.info(f"\n{class_name.upper()}:")
157
+ self.logger.info(f" Precision: {metrics_dict.get('precision', 0):.4f}")
158
+ self.logger.info(f" Recall: {metrics_dict.get('recall', 0):.4f}")
159
+ self.logger.info(f" F1-Score: {metrics_dict.get('f1-score', 0):.4f}")
160
+ if class_name in metrics['average_confidence']:
161
+ self.logger.info(f" Avg Confidence: {metrics['average_confidence'][class_name]:.4f}")
162
+
163
+ # Error Analysis Summary
164
+ self.logger.info("\nError Analysis Summary:")
165
+ error_analysis = metrics['error_analysis']
166
+
167
+ # False Positives
168
+ self.logger.info("\nMost Common False Positives:")
169
+ for attack_type, errors in error_analysis['false_positives'].items():
170
+ self.logger.info(f" {attack_type}: {len(errors)} instances")
171
+
172
+ # False Negatives
173
+ self.logger.info("\nMost Common False Negatives:")
174
+ for attack_type, errors in error_analysis['false_negatives'].items():
175
+ self.logger.info(f" {attack_type}: {len(errors)} instances")
176
+
177
+ def main():
178
+ logger = setup_logging()
179
+
180
+ # Initialize evaluator
181
+ evaluator = ModelEvaluator()
182
+
183
+ try:
184
+ # Evaluate results
185
+ metrics = evaluator.evaluate_results(
186
+ results_file='analysis_results.json',
187
+ test_data_file='data/test.csv'
188
+ )
189
+
190
+ # Print evaluation summary
191
+ evaluator.print_evaluation_summary(metrics)
192
+
193
+ # Save detailed metrics
194
+ with open('evaluation_metrics.json', 'w') as f:
195
+ json.dump(metrics, f, indent=2)
196
+ logger.info("\nSaved detailed metrics to evaluation_metrics.json")
197
+
198
+ except Exception as e:
199
+ logger.error(f"Error during evaluation: {str(e)}")
200
+ raise
201
+
202
+ if __name__ == "__main__":
203
+ main()
split_dataset.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from pathlib import Path
4
+ import argparse
5
+ import logging
6
+ from sklearn.model_selection import train_test_split
7
+
8
+ def setup_logging():
9
+ logging.basicConfig(
10
+ level=logging.INFO,
11
+ format='%(asctime)s - %(levelname)s - %(message)s'
12
+ )
13
+ return logging.getLogger(__name__)
14
+
15
+ def split_dataset(input_path: str, output_dir: str, chunk_size: int = 100000):
16
+ """
17
+ Split a large CSV file into train, test, and validation sets.
18
+ Processes the file in chunks to handle large datasets efficiently.
19
+
20
+ Args:
21
+ input_path: Path to input CSV file
22
+ output_dir: Directory to save split datasets
23
+ chunk_size: Number of rows to process at a time
24
+ """
25
+ logger = setup_logging()
26
+ output_path = Path(output_dir)
27
+ output_path.mkdir(parents=True, exist_ok=True)
28
+
29
+ # Open output files
30
+ train_file = open(output_path / 'train.csv', 'w')
31
+ test_file = open(output_path / 'test.csv', 'w')
32
+ val_file = open(output_path / 'val.csv', 'w')
33
+
34
+ # Set random seed for reproducibility
35
+ np.random.seed(42)
36
+
37
+ # Process the CSV in chunks
38
+ chunk_iterator = pd.read_csv(input_path, chunksize=chunk_size)
39
+
40
+ is_first_chunk = True
41
+ total_rows = 0
42
+
43
+ logger.info("Starting dataset split...")
44
+
45
+ for i, chunk in enumerate(chunk_iterator):
46
+ # Split chunk into train (70%), test (20%), val (10%)
47
+ train_chunk, test_val_chunk = train_test_split(chunk, train_size=0.7, random_state=42)
48
+ test_chunk, val_chunk = train_test_split(test_val_chunk, train_size=0.67, random_state=42)
49
+
50
+ # Write header for first chunk only
51
+ if is_first_chunk:
52
+ train_chunk.to_csv(train_file, index=False)
53
+ test_chunk.to_csv(test_file, index=False)
54
+ val_chunk.to_csv(val_file, index=False)
55
+ is_first_chunk = False
56
+ else:
57
+ train_chunk.to_csv(train_file, index=False, header=False)
58
+ test_chunk.to_csv(test_file, index=False, header=False)
59
+ val_chunk.to_csv(val_file, index=False, header=False)
60
+
61
+ total_rows += len(chunk)
62
+ logger.info(f"Processed {total_rows} rows...")
63
+
64
+ # Close files
65
+ train_file.close()
66
+ test_file.close()
67
+ val_file.close()
68
+
69
+ logger.info("Dataset splitting complete!")
70
+ logger.info(f"Total rows processed: {total_rows}")
71
+
72
+ if __name__ == "__main__":
73
+ parser = argparse.ArgumentParser(description="Split large CSV dataset into train/test/val sets")
74
+ parser.add_argument("--input_path", required=True, help="Path to input CSV file")
75
+ parser.add_argument("--output_dir", required=True, help="Directory to save split datasets")
76
+ parser.add_argument("--chunk_size", type=int, default=100000, help="Chunk size for processing")
77
+
78
+ args = parser.parse_args()
79
+
80
+ split_dataset(args.input_path, args.output_dir, args.chunk_size)
test.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from unsloth import FastLanguageModel
3
+ import pandas as pd
4
+ from transformers import TextStreamer
5
+ import logging
6
+ from typing import List, Dict
7
+ import json
8
+
9
+ def setup_logging():
10
+ logging.basicConfig(
11
+ level=logging.INFO,
12
+ format='%(asctime)s - %(levelname)s - %(message)s'
13
+ )
14
+ return logging.getLogger(__name__)
15
+
16
+ class NetworkFlowAnalyzer:
17
+ def __init__(self, model_path: str):
18
+ self.logger = setup_logging()
19
+ self.logger.info(f"Loading model from {model_path}")
20
+
21
+ # Load model and tokenizer
22
+ self.model, self.tokenizer = FastLanguageModel.from_pretrained(
23
+ model_path,
24
+ max_seq_length=2048,
25
+ load_in_4bit=True,
26
+ )
27
+
28
+ # Enable faster inference
29
+ FastLanguageModel.for_inference(self.model)
30
+
31
+ # Set up tokenizer for generation
32
+ self.tokenizer.pad_token = self.tokenizer.eos_token
33
+ self.tokenizer.padding_side = "right"
34
+
35
+ def format_flow_prompt(self, row: pd.Series) -> str:
36
+ """Format a network flow into a prompt"""
37
+ flow_text = f"""Network Flow Description:
38
+ Source: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']})
39
+ Destination: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']})
40
+ Protocol Information:
41
+ - Protocol ID: {row['PROTOCOL']}
42
+ - Layer 7 Protocol: {row['L7_PROTO']}
43
+ - TCP Flags: {row['TCP_FLAGS']}
44
+ Traffic Metrics:
45
+ - Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound
46
+ - Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound
47
+ - Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds"""
48
+
49
+ # Format in LLaMA-3 style
50
+ return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
51
+ Analyze this network flow for potential security threats:
52
+
53
+ {flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
54
+
55
+ def analyze_flow(self, row: pd.Series, max_new_tokens: int = 256, stream: bool = False) -> str:
56
+ """Analyze a single network flow"""
57
+ # Prepare input
58
+ prompt = self.format_flow_prompt(row)
59
+ inputs = self.tokenizer(
60
+ prompt,
61
+ return_tensors="pt",
62
+ truncation=True,
63
+ max_length=2048
64
+ ).to("cuda")
65
+
66
+ # Set up streamer if requested
67
+ streamer = TextStreamer(self.tokenizer) if stream else None
68
+
69
+ # Generate prediction
70
+ outputs = self.model.generate(
71
+ **inputs,
72
+ max_new_tokens=max_new_tokens,
73
+ streamer=streamer,
74
+ use_cache=True
75
+ )
76
+
77
+ if not stream:
78
+ return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
79
+ return None
80
+
81
+ def analyze_batch(self, df: pd.DataFrame, output_file: str = None) -> List[Dict]:
82
+ """Analyze a batch of network flows"""
83
+ results = []
84
+
85
+ for idx, row in df.iterrows():
86
+ self.logger.info(f"Analyzing flow {idx+1}/{len(df)}")
87
+
88
+ try:
89
+ analysis = self.analyze_flow(row)
90
+ result = {
91
+ "flow_id": idx,
92
+ "source_ip": row["IPV4_SRC_ADDR"],
93
+ "destination_ip": row["IPV4_DST_ADDR"],
94
+ "analysis": analysis,
95
+ "true_label": row.get("Label", "Unknown"),
96
+ "attack_type": row.get("Attack", "Unknown")
97
+ }
98
+ results.append(result)
99
+
100
+ except Exception as e:
101
+ self.logger.error(f"Error analyzing flow {idx}: {str(e)}")
102
+ continue
103
+
104
+ if output_file:
105
+ with open(output_file, 'w') as f:
106
+ json.dump(results, f, indent=2)
107
+
108
+ return results
109
+
110
+ def main():
111
+ logger = setup_logging()
112
+
113
+ # Initialize analyzer
114
+ analyzer = NetworkFlowAnalyzer("cybersec_model_output/checkpoint-4329")
115
+
116
+ # Load test data
117
+ test_df = pd.read_csv("data/test.csv")
118
+ logger.info(f"Loaded {len(test_df)} test samples")
119
+
120
+ # Option 1: Stream analysis of a single flow
121
+ logger.info("\nAnalyzing single flow with streaming:")
122
+ analyzer.analyze_flow(test_df.iloc[0], stream=True)
123
+
124
+ # Option 2: Batch analysis
125
+ logger.info("\nAnalyzing batch of flows:")
126
+ sample_size = min(100, len(test_df)) # Analyze first 100 flows
127
+ results = analyzer.analyze_batch(
128
+ test_df.head(sample_size),
129
+ output_file="analysis_results.json"
130
+ )
131
+
132
+ # Print some statistics
133
+ logger.info(f"\nAnalyzed {len(results)} flows")
134
+ if "Label" in test_df.columns:
135
+ true_positives = sum(1 for r in results if "malicious" in r["analysis"].lower()
136
+ and r["true_label"] == 1)
137
+ logger.info(f"Detected {true_positives} potentially malicious flows")
138
+
139
+ if __name__ == "__main__":
140
+ main()
train.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["CUDA_VISIBLE_DEVICES"]="3"
3
+ import torch
4
+ from datasets import Dataset
5
+ from unsloth import FastLanguageModel
6
+ import pandas as pd
7
+ import numpy as np
8
+ from sklearn.preprocessing import MinMaxScaler
9
+ from transformers import TrainingArguments
10
+ from trl import SFTTrainer
11
+ import logging
12
+ from typing import List, Dict
13
+ import json
14
+
15
+ def setup_logging():
16
+ logging.basicConfig(
17
+ level=logging.INFO,
18
+ format='%(asctime)s - %(levelname)s - %(message)s'
19
+ )
20
+ return logging.getLogger(__name__)
21
+ def sample_balanced_dataset(df, max_samples_per_class=1000):
22
+ """Sample a balanced subset of the data"""
23
+ sampled_dfs = []
24
+
25
+ # Handle 'benign' class separately
26
+ benign_df = df[df['Attack'].str.lower() == 'benign']
27
+ attack_df = df[df['Attack'].str.lower() != 'benign']
28
+
29
+ # Sample benign data
30
+ if len(benign_df) > max_samples_per_class:
31
+ benign_sampled = benign_df.sample(n=max_samples_per_class, random_state=42)
32
+ sampled_dfs.append(benign_sampled)
33
+ else:
34
+ sampled_dfs.append(benign_df)
35
+
36
+ # Sample each attack type
37
+ for attack_type in attack_df['Attack'].unique():
38
+ attack_type_df = attack_df[attack_df['Attack'] == attack_type]
39
+ if len(attack_type_df) > max_samples_per_class:
40
+ sampled = attack_type_df.sample(n=max_samples_per_class, random_state=42)
41
+ sampled_dfs.append(sampled)
42
+ else:
43
+ sampled_dfs.append(attack_type_df)
44
+
45
+ return pd.concat(sampled_dfs, ignore_index=True)
46
+
47
+ class NetworkFlowDataProcessor:
48
+ def __init__(self):
49
+ self.logger = setup_logging()
50
+ self.scaler = MinMaxScaler()
51
+ self.numerical_features = [
52
+ 'L4_SRC_PORT', 'L4_DST_PORT', 'PROTOCOL', 'L7_PROTO',
53
+ 'IN_BYTES', 'OUT_BYTES', 'IN_PKTS', 'OUT_PKTS',
54
+ 'TCP_FLAGS', 'FLOW_DURATION_MILLISECONDS'
55
+ ]
56
+ self.categorical_features = ['IPV4_SRC_ADDR', 'IPV4_DST_ADDR']
57
+
58
+ def process_ip_address(self, ip: str) -> str:
59
+ """Convert IP address to a more descriptive format"""
60
+ parts = ip.split('.')
61
+ if parts[0] == '192' and parts[1] == '168':
62
+ return f"internal_network_{parts[2]}_{parts[3]}"
63
+ return f"external_network_{ip}"
64
+
65
+ def format_flow_data(self, row: pd.Series) -> str:
66
+ """Format network flow data into a descriptive text"""
67
+ return f"""Network Flow Description:
68
+ Source: {self.process_ip_address(row['IPV4_SRC_ADDR'])} (Port: {row['L4_SRC_PORT']})
69
+ Destination: {self.process_ip_address(row['IPV4_DST_ADDR'])} (Port: {row['L4_DST_PORT']})
70
+ Protocol Information:
71
+ - Protocol ID: {row['PROTOCOL']}
72
+ - Layer 7 Protocol: {row['L7_PROTO']}
73
+ - TCP Flags: {row['TCP_FLAGS']}
74
+ Traffic Metrics:
75
+ - Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound
76
+ - Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound
77
+ - Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds"""
78
+
79
+ def get_attack_description(self, attack_type: str) -> str:
80
+ """Get detailed description of attack type"""
81
+ descriptions = {
82
+ "benign": "This is normal network traffic with no malicious intent.",
83
+ "ddos": "A Distributed Denial of Service attack attempting to overwhelm network resources.",
84
+ "dos": "A Denial of Service attack targeting system availability.",
85
+ "injection": "An attack attempting to inject malicious code or commands.",
86
+ "scanning": "Network scanning activity to discover vulnerabilities.",
87
+ "backdoor": "Malicious activity indicating backdoor access attempts.",
88
+ "mitm": "Man-in-the-Middle attack intercepting network communications.",
89
+ "password": "Password-based attack attempting unauthorized access.",
90
+ "ransomware": "Ransomware-related network activity.",
91
+ "xss": "Cross-Site Scripting attack targeting web applications."
92
+ }
93
+ return descriptions.get(attack_type.lower(), "Unknown attack type")
94
+
95
+ def prepare_training_text(self, row: pd.Series) -> str:
96
+ """Prepare single training example in LLaMA-3 chat format"""
97
+ flow_text = self.format_flow_data(row)
98
+ attack_type = row['Attack'].lower() if 'Attack' in row else 'benign'
99
+ attack_desc = self.get_attack_description(attack_type)
100
+
101
+ return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
102
+ Analyze this network flow for potential security threats:
103
+
104
+ {flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
105
+ This network flow is classified as {attack_type}. {attack_desc}
106
+
107
+ Key indicators from the flow data:
108
+ - Traffic volume: {row['IN_BYTES'] + row['OUT_BYTES']} total bytes
109
+ - Flow duration: {row['FLOW_DURATION_MILLISECONDS']} ms
110
+ - Protocol behavior: {row['TCP_FLAGS']} TCP flags<|eot_id|>"""
111
+
112
+ def load_and_process_data(train_path: str, processor: NetworkFlowDataProcessor, max_samples_per_class=50000):
113
+ """Load and process the training data"""
114
+ logger = setup_logging()
115
+ logger.info(f"Loading data from {train_path}")
116
+
117
+ df = pd.read_csv(train_path)
118
+ df = sample_balanced_dataset(df, max_samples_per_class)
119
+ logger.info(f"Sampled dataset size: {len(df)}")
120
+
121
+ # Create dataset with text field
122
+ texts = [processor.prepare_training_text(row) for _, row in df.iterrows()]
123
+ dataset = Dataset.from_pandas(pd.DataFrame({'text': texts}))
124
+
125
+ return dataset
126
+
127
+ def main():
128
+ logger = setup_logging()
129
+
130
+ # Initialize data processor
131
+ processor = NetworkFlowDataProcessor()
132
+
133
+ # Load and process data
134
+ train_dataset = load_and_process_data("data/train.csv", processor, max_samples_per_class=50000)
135
+
136
+ # Model initialization
137
+ model, tokenizer = FastLanguageModel.from_pretrained(
138
+ model_name="unsloth/llama-3-8b-Instruct-bnb-4bit",
139
+ max_seq_length=2048,
140
+ load_in_4bit=True,
141
+ )
142
+
143
+ # Configure tokenizer for LLaMA-3
144
+ tokenizer.pad_token = tokenizer.eos_token
145
+ tokenizer.padding_side = "right"
146
+
147
+ # Add LoRA adapters
148
+ model = FastLanguageModel.get_peft_model(
149
+ model,
150
+ r=16,
151
+ target_modules=[
152
+ "q_proj", "k_proj", "v_proj", "o_proj",
153
+ "gate_proj", "up_proj", "down_proj",
154
+ ],
155
+ lora_alpha=16,
156
+ lora_dropout=0,
157
+ bias="none",
158
+ use_gradient_checkpointing="unsloth",
159
+ random_state=3407,
160
+ )
161
+
162
+ # Configure training arguments
163
+ training_args = TrainingArguments(
164
+ output_dir="cybersec_model_output",
165
+ num_train_epochs=3,
166
+ per_device_train_batch_size=64,
167
+ gradient_accumulation_steps=4,
168
+ learning_rate=2e-4,
169
+ bf16=True, # Use bfloat16 for A100
170
+ logging_steps=10,
171
+ save_strategy="epoch",
172
+ optim="adamw_8bit",
173
+ lr_scheduler_type="cosine",
174
+ )
175
+
176
+ # Initialize trainer
177
+ trainer = SFTTrainer(
178
+ model=model,
179
+ tokenizer=tokenizer,
180
+ train_dataset=train_dataset,
181
+ dataset_text_field="text", # Changed from conversations to text
182
+ max_seq_length=2048,
183
+ args=training_args,
184
+ )
185
+
186
+ # Train the model
187
+ logger.info("Starting training...")
188
+ trainer.train()
189
+
190
+ # Save the model
191
+ logger.info("Saving model...")
192
+ model.save_pretrained("cybersec_model")
193
+ tokenizer.save_pretrained("cybersec_model")
194
+
195
+ if __name__ == "__main__":
196
+ main()