ghosthets commited on
Commit
63d5a2c
·
verified ·
1 Parent(s): 7d26434

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -33
app.py CHANGED
@@ -1,39 +1,41 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- client = InferenceClient("dexcommunity/dex") # 👈 Apna Dex model ka path
 
5
 
6
- def respond(message, history, system_message, max_tokens, temperature, top_p):
7
- prompt = ""
8
- for user, bot in history:
9
- prompt += f"User: {user}\nDex: {bot}\n"
10
- prompt += f"User: {message}\nDex:"
11
 
12
- response = ""
13
- try:
14
- for token in client.text_generation(
15
- prompt,
16
- max_new_tokens=max_tokens,
17
- temperature=temperature,
18
- top_p=top_p,
19
- stream=True,
20
- ):
21
- response += token
22
- yield response
23
- except Exception as e:
24
- yield f"❌ Error: {str(e)}"
25
 
26
- demo = gr.ChatInterface(
27
- fn=respond,
28
- additional_inputs=[
29
- gr.Textbox(value="You are Dex AI 🤖", label="System Message"),
30
- gr.Slider(1, 2048, value=512, step=1, label="Max Tokens"),
31
- gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
32
- gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
33
- ],
34
- title="Dex 🛡️ — Your Cybersecurity AI",
35
- description="Talk to Dex, your friendly cybersecurity and hacking assistant powered by HuggingFace 🤖"
36
- )
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  if __name__ == "__main__":
39
- demo.launch()
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
 
4
+ # Load your own model from Hugging Face
5
+ model_id = "dexcommunity/dex" # 👈 yahan apna model path do
6
 
7
+ # Load tokenizer and model
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(model_id)
 
 
10
 
11
+ # Check device (GPU ya CPU)
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ model.to(device)
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # Chat function
16
+ def ask_dex(prompt, max_length=256):
17
+ input_text = f"User: {prompt}\nDex:"
18
+ inputs = tokenizer(input_text, return_tensors="pt").to(device)
 
 
 
 
 
 
 
19
 
20
+ output = model.generate(
21
+ **inputs,
22
+ max_length=max_length,
23
+ do_sample=True,
24
+ top_k=50,
25
+ top_p=0.9,
26
+ temperature=0.7,
27
+ pad_token_id=tokenizer.eos_token_id
28
+ )
29
+
30
+ decoded = tokenizer.decode(output[0], skip_special_tokens=True)
31
+ reply = decoded.split("Dex:")[-1].strip()
32
+ return reply
33
+
34
+ # Example use
35
  if __name__ == "__main__":
36
+ while True:
37
+ user_input = input("You: ")
38
+ if user_input.lower() in ["exit", "quit"]:
39
+ break
40
+ response = ask_dex(user_input)
41
+ print("Dex:", response)