Canstralian commited on
Commit
1e85f11
·
verified ·
1 Parent(s): bd1558e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -5
app.py CHANGED
@@ -1,14 +1,18 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
  from typing import List, Dict
 
4
 
5
  # Initialize the Hugging Face pipeline (make sure to replace with your model name)
6
  model_name = "your_huggingface_model_name" # Ensure to use a valid model
 
7
  try:
8
- generator = pipeline("text-generation", model=model_name)
 
9
  except Exception as e:
10
  raise ValueError(f"Error initializing the model '{model_name}': {e}")
11
 
 
12
  def generate_attack(prompt: str, history: List[Dict[str, str]]) -> List[str]:
13
  """
14
  Simulates a Blackhat AI scenario by generating attack strategies and potential impacts.
@@ -42,20 +46,83 @@ def generate_attack(prompt: str, history: List[Dict[str, str]]) -> List[str]:
42
  except Exception as e:
43
  return [f"Error generating response: {e}"]
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # Define the Gradio interface
46
  demo = gr.Interface(
47
  fn=generate_attack,
48
  inputs=[
49
  gr.Textbox(label="Prompt", placeholder="Enter your simulation prompt here..."),
50
- gr.Dataframe(headers=["user", "assistant"], label="Message History", type="array")
 
 
 
 
 
51
  ],
52
- outputs=gr.Textbox(label="Generated Response"),
53
- title="Blackhat AI Simulator",
54
  description=(
55
  "This simulator generates adversarial scenarios, analyzes attack vectors, "
56
  "and provides ethical countermeasures. Use responsibly for cybersecurity training and awareness."
57
  )
58
  )
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  if __name__ == "__main__":
61
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline, Trainer, TrainingArguments, AutoModelForCausalLM, AutoTokenizer
3
  from typing import List, Dict
4
+ import os
5
 
6
  # Initialize the Hugging Face pipeline (make sure to replace with your model name)
7
  model_name = "your_huggingface_model_name" # Ensure to use a valid model
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  try:
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
12
  except Exception as e:
13
  raise ValueError(f"Error initializing the model '{model_name}': {e}")
14
 
15
+ # Function to generate attack scenarios
16
  def generate_attack(prompt: str, history: List[Dict[str, str]]) -> List[str]:
17
  """
18
  Simulates a Blackhat AI scenario by generating attack strategies and potential impacts.
 
46
  except Exception as e:
47
  return [f"Error generating response: {e}"]
48
 
49
+ # Function for fine-tuning the model with the uploaded dataset
50
+ def fine_tune_model(dataset: str) -> str:
51
+ """
52
+ Fine-tunes the model using the uploaded dataset.
53
+ Args:
54
+ dataset (str): The path to the dataset for fine-tuning.
55
+ Returns:
56
+ str: A message indicating whether fine-tuning was successful or failed.
57
+ """
58
+ try:
59
+ # Process the dataset (dummy processing for illustration)
60
+ with open(dataset, "r") as file:
61
+ data = file.readlines()
62
+
63
+ # Simulate fine-tuning with the provided dataset
64
+ train_args = TrainingArguments(
65
+ output_dir="./results",
66
+ evaluation_strategy="steps",
67
+ save_steps=10,
68
+ per_device_train_batch_size=4,
69
+ num_train_epochs=1,
70
+ logging_dir="./logs",
71
+ )
72
+
73
+ trainer = Trainer(
74
+ model=model,
75
+ args=train_args,
76
+ train_dataset=data,
77
+ tokenizer=tokenizer
78
+ )
79
+
80
+ trainer.train()
81
+ model.save_pretrained("./fine_tuned_model")
82
+ return "Model fine-tuned successfully!"
83
+ except Exception as e:
84
+ return f"Error fine-tuning the model: {e}"
85
+
86
  # Define the Gradio interface
87
  demo = gr.Interface(
88
  fn=generate_attack,
89
  inputs=[
90
  gr.Textbox(label="Prompt", placeholder="Enter your simulation prompt here..."),
91
+ gr.Dataframe(headers=["user", "assistant"], label="Message History", type="array"),
92
+ gr.File(label="Upload Dataset for Fine-Tuning", file_count="single", type="file")
93
+ ],
94
+ outputs=[
95
+ gr.Textbox(label="Generated Response"),
96
+ gr.Textbox(label="Fine-Tuning Status", interactive=False)
97
  ],
98
+ title="Blackhat AI Simulator with Live Fine-Tuning",
 
99
  description=(
100
  "This simulator generates adversarial scenarios, analyzes attack vectors, "
101
  "and provides ethical countermeasures. Use responsibly for cybersecurity training and awareness."
102
  )
103
  )
104
 
105
+ def handle_fine_tuning(dataset_file):
106
+ """
107
+ This function is used to trigger the fine-tuning process after file upload.
108
+ """
109
+ if dataset_file is not None:
110
+ dataset_path = os.path.join("uploads", dataset_file.name)
111
+ with open(dataset_path, "wb") as f:
112
+ f.write(dataset_file.read())
113
+ return fine_tune_model(dataset_path)
114
+ else:
115
+ return "No dataset uploaded."
116
+
117
+ # Add a separate fine-tuning section to the interface
118
+ demo.add_component(
119
+ gr.Button("Fine-Tune Model", variant="primary", elem_id="fine-tune-btn"),
120
+ gr.File(label="Upload Dataset for Fine-Tuning", file_count="single", type="file"),
121
+ outputs=gr.Textbox(label="Fine-Tuning Status")
122
+ )
123
+
124
+ # Bind the fine-tuning button
125
+ demo.interactive(fn=handle_fine_tuning)
126
+
127
  if __name__ == "__main__":
128
  demo.launch()