Canstralian commited on
Commit
6123028
·
verified ·
1 Parent(s): 665e308

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -57
app.py CHANGED
@@ -1,60 +1,53 @@
1
- import spaces
2
  import gradio as gr
3
- import torch
4
- import transformers
5
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
6
- import os
7
-
8
- title = """# Welcome to 🌟Tonic's✨StarCoder
9
- ✨StarCoder StarCoder2-15B model is a 15B parameter model trained on 600+ programming languages from The Stack v2, with opt-out requests excluded. The model uses Grouped Query Attention, a context window of 16,384 tokens with a sliding window attention of 4,096 tokens, and was trained using the Fill-in-the-Middle objective on 4+ trillion tokens. The model was trained with NVIDIA NeMo™ Framework using the NVIDIA Eos Supercomputer built with NVIDIA DGX H100 systems. You can build with this endpoint using✨StarCoder available here : [bigcode/starcoder2-15b](https://huggingface.co/bigcode/starcoder2-15b). You can also use ✨StarCoder by cloning this space. Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/starcoder2?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
10
- Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [SciTonic](https://github.com/Tonic-AI/scitonic)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
11
- """
12
-
13
- model_path = "bigcode/starcoder2-15b"
14
-
15
- hf_token = os.getenv("HF_TOKEN")
16
- if not hf_token:
17
- raise ValueError("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
18
-
19
- tokenizer = AutoTokenizer.from_pretrained(model_path)
20
- quantization_config = BitsAndBytesConfig(load_in_8bit=True)
21
- model = AutoModelForCausalLM.from_pretrained( model_path, quantization_config=quantization_config)
22
-
23
- @spaces.GPU
24
- def generate_text(prompt, temperature=0.9, max_length=1200):
25
- # Encode the inputs
26
- inputs = tokenizer.encode(prompt, return_tensors="pt")
27
- attention_mask = torch.ones(inputs.shape, dtype=torch.long)
28
- inputs = inputs.to("cuda")
29
- attention_mask = attention_mask.to("cuda")
30
- outputs = model.generate(
31
- inputs,
32
- attention_mask=attention_mask,
33
- max_length=max_length,
34
- top_p=0.9,
35
- temperature=temperature,
36
- do_sample=True,
37
- pad_token_id=tokenizer.eos_token_id
38
- )
39
- return tokenizer.decode(outputs[0])
40
-
41
- def gradio_app():
42
- with gr.Blocks() as demo:
43
- gr.Markdown(title)
44
- prompt = gr.Code(label="Enter your code prompt", value="def print_hello_world():")
45
- with gr.Row():
46
- temperature = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="Temperature")
47
- max_length = gr.Slider(minimum=100, maximum=1024, step=10, value=450, label="Generate Length")
48
- generate_btn = gr.Button("Try✨StarCoder")
49
- output = gr.Code(label="✨StarCoder:", lines=40)
50
-
51
- generate_btn.click(
52
- fn=generate_text,
53
- inputs=[prompt, temperature, max_length],
54
- outputs=output
55
- )
56
-
57
- demo.launch()
58
 
59
  if __name__ == "__main__":
60
- gradio_app()
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from transformers import pipeline
4
+ from typing import List, Dict, Any
5
+
6
+ # Initialize the InferenceClient (make sure to add your Hugging Face model)
7
+ client = InferenceClient("your_huggingface_model_name_or_api_key")
8
+
9
+ def generate_attack(
10
+ prompt: str,
11
+ history: List[Dict[str, str]],
12
+ ) -> List[str]:
13
+ """
14
+ Simulates a Blackhat AI scenario by generating attack strategies and potential impacts.
15
+
16
+ Args:
17
+ prompt (str): The user's input to the simulator.
18
+ history (List[Dict]): The user's message history with timestamps.
19
+
20
+ Returns:
21
+ List[str]: A list of attack responses from the AI.
22
+ """
23
+ messages = [
24
+ {
25
+ "role": "system",
26
+ "content": f"Responding to {prompt}..."
27
+ }
28
+ ]
29
+
30
+ for val in history:
31
+ if "user" in val:
32
+ messages.append({"role": "user", "content": val["user"]})
33
+ if "assistant" in val:
34
+ messages.append({"role": "assistant", "content": val["assistant"]})
35
+
36
+ messages.append({"role": "user", "content": prompt})
37
+
38
+ # Use Hugging Face's Inference API to generate responses
39
+ generator = pipeline("text-generation", model="your_huggingface_model_name")
40
+ response = generator(messages[-1]["content"], max_length=100)
41
+
42
+ return [response[0]['generated_text']] # Return the generated text
43
+
44
+ demo = gr.Interface(
45
+ fn=generate_attack, # or simulate_attack depending on what you want to use
46
+ inputs=[gr.Textbox()],
47
+ outputs=gr.Textbox(),
48
+ title="Blackhat AI Simulator",
49
+ description="This simulator generates adversarial scenarios, analyzes attack vectors, and provides ethical countermeasures. Use responsibly for cybersecurity training and awareness."
50
+ )
 
 
 
 
 
 
51
 
52
  if __name__ == "__main__":
53
+ demo.launch()