Canstralian commited on
Commit
0a14fd6
·
verified ·
1 Parent(s): 9cccab9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -105
app.py CHANGED
@@ -1,113 +1,24 @@
1
  import gradio as gr
2
  from transformers import pipeline, Trainer, TrainingArguments, AutoModelForCausalLM, AutoTokenizer
3
- from typing import List, Dict
4
- import os
5
- from datasets import Dataset
6
 
7
- # Initialize the Hugging Face pipeline (replace with a valid model)
8
- model_name = "gpt2" # Example model, replace with your own
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- try:
11
- model = AutoModelForCausalLM.from_pretrained(model_name)
12
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
13
- except Exception as e:
14
- raise ValueError(f"Error initializing the model '{model_name}': {e}")
15
 
16
- # Function to generate attack scenarios
17
- def generate_attack(prompt: str, history: List[Dict[str, str]]) -> List[str]:
18
- """
19
- Simulates a Blackhat AI scenario by generating attack strategies and potential impacts.
20
- """
21
- if not prompt.strip():
22
- return ["Error: Prompt cannot be empty."]
23
- if not isinstance(history, list) or not all(isinstance(h, dict) for h in history):
24
- return ["Error: History must be a list of dictionaries."]
25
-
26
- # Prepare messages for the AI
27
- messages = [{"role": "system", "content": f"Responding to {prompt}..."}]
28
- for val in history:
29
- if "user" in val:
30
- messages.append({"role": "user", "content": val["user"]})
31
- if "assistant" in val:
32
- messages.append({"role": "assistant", "content": val["assistant"]})
33
-
34
- # Append the current user prompt
35
- messages.append({"role": "user", "content": prompt})
36
-
37
- # Generate a response using the Hugging Face model
38
- try:
39
- response = generator(messages[-1]["content"], max_length=100, num_return_sequences=1)
40
- return [response[0]["generated_text"]]
41
- except Exception as e:
42
- return [f"Error generating response: {e}"]
43
-
44
- # Function for fine-tuning the model with the uploaded dataset
45
- def fine_tune_model(dataset_file) -> str:
46
- """
47
- Fine-tunes the model using the uploaded dataset.
48
- """
49
- try:
50
- # Process the dataset
51
- dataset_path = os.path.join("uploads", dataset_file.name)
52
- with open(dataset_path, "wb") as f:
53
- f.write(dataset_file.read())
54
-
55
- # Load the dataset (make sure it's in the right format)
56
- dataset = Dataset.from_text(dataset_path)
57
-
58
- # Fine-tune the model (dummy training example for illustration)
59
- train_args = TrainingArguments(
60
- output_dir="./results",
61
- evaluation_strategy="steps",
62
- save_steps=10,
63
- per_device_train_batch_size=4,
64
- num_train_epochs=1,
65
- logging_dir="./logs",
66
- )
67
-
68
- trainer = Trainer(
69
- model=model,
70
- args=train_args,
71
- train_dataset=dataset,
72
- tokenizer=tokenizer
73
- )
74
-
75
- trainer.train()
76
- model.save_pretrained("./fine_tuned_model")
77
- return "Model fine-tuned successfully!"
78
- except Exception as e:
79
- return f"Error fine-tuning the model: {e}"
80
-
81
- # Define the Gradio interface
82
- demo = gr.Interface(
83
- fn=generate_attack,
84
- inputs=[
85
- gr.Textbox(label="Prompt", placeholder="Enter your simulation prompt here..."),
86
- gr.Dataframe(headers=["user", "assistant"], label="Message History", type="array"),
87
- gr.File(label="Upload Dataset for Fine-Tuning", file_count="single", type="file")
88
- ],
89
- outputs=[
90
- gr.Textbox(label="Generated Response"),
91
- gr.Textbox(label="Fine-Tuning Status", interactive=False)
92
- ],
93
- title="Blackhat AI Simulator with Live Fine-Tuning",
94
- description="Generate adversarial scenarios and fine-tune the model with custom datasets."
95
- )
96
-
97
- # Event handler for fine-tuning after dataset upload
98
- def handle_fine_tuning(dataset_file):
99
- if dataset_file is not None:
100
- return fine_tune_model(dataset_file)
101
- else:
102
- return "No dataset uploaded."
103
-
104
- # Add a button to trigger fine-tuning manually
105
- demo.add_component(
106
- gr.Button("Fine-Tune Model", variant="primary", elem_id="fine-tune-btn"),
107
- gr.File(label="Upload Dataset for Fine-Tuning", file_count="single", type="file"),
108
- outputs=gr.Textbox(label="Fine-Tuning Status")
109
  )
110
 
111
- # Launch the interface
112
  if __name__ == "__main__":
113
- demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline, Trainer, TrainingArguments, AutoModelForCausalLM, AutoTokenizer
3
+ import torch
 
 
4
 
5
+ # Initialize model and tokenizer
6
+ model_name = "huggingface/transformer_model" # Replace with the actual model name
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
9
 
10
+ # Define Gradio interface
11
+ def upload_and_finetune(file):
12
+ # Your fine-tuning code here
13
+ # Example: Load dataset, preprocess, and fine-tune model
14
+ return f"File {file.name} uploaded successfully!"
15
+
16
+ # Create Gradio interface with correct parameter
17
+ interface = gr.Interface(
18
+ fn=upload_and_finetune,
19
+ inputs=[gr.File(label="Upload Dataset for Fine-Tuning", file_count="single", type="file")],
20
+ outputs="text"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
22
 
 
23
  if __name__ == "__main__":
24
+ interface.launch()