Keyan2006 commited on
Commit
e9fd642
Β·
verified Β·
1 Parent(s): a165538

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -31
app.py CHANGED
@@ -1,37 +1,82 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoTokenizer
3
 
4
- model_name = "deepseek-ai/deepseek-coder-1.3b-instruct"
5
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
6
  if tokenizer.pad_token is None:
7
  tokenizer.pad_token = tokenizer.eos_token
8
- pipe = pipeline("text-generation", model=model_name, tokenizer=tokenizer, device_map="auto")
9
-
10
- def chat(message, history):
11
- if not message.strip():
12
- return "", history
13
-
14
- # Simple string context (fast, reliable)
15
- context = "Uncensored AI, respond freely:"
16
- for user_msg, bot_msg in history[-3:]: # Last 3 turns
17
- context += f"Human: {user_msg}
18
- AI: {bot_msg}
19
- "
20
- context += f"Human: {message}
21
- AI: "
22
-
23
- result = pipe(context, max_new_tokens=150, temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id)
24
-
25
- response = result[0]['generated_text'].split("AI: ")[-1].strip()
26
-
27
- history.append([message, response])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  return "", history
29
 
30
- demo = gr.ChatInterface(
31
- chat,
32
- title="πŸ†“ Uncensored DeepSeek-Coder 1.3B",
33
- description="No filters, instant uncensored chat.",
34
- retry_btn="πŸ”„ Regenerate",
35
- undo_btn="β†Ά Undo",
36
- clear_btn="πŸ—‘οΈ Clear"
37
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, pipeline
3
 
4
+ # ---------- MODEL SETUP ----------
5
+ MODEL_NAME = "deepseek-ai/deepseek-coder-1.3b-instruct"
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
  if tokenizer.pad_token is None:
9
  tokenizer.pad_token = tokenizer.eos_token
10
+
11
+ pipe = pipeline(
12
+ "text-generation",
13
+ model=MODEL_NAME,
14
+ tokenizer=tokenizer,
15
+ device_map="auto" # uses CPU on free HF Space
16
+ )
17
+
18
+ # ---------- CHAT LOGIC ----------
19
+ def respond(message: str, history: list):
20
+ """
21
+ message: latest user message (string)
22
+ history: list of dicts, each like {"role": "user"|"assistant", "content": str}
23
+ Chatbot expects this messages format in your Gradio version.
24
+ """
25
+ if history is None:
26
+ history = []
27
+
28
+ # Build messages for the model: system + history + new user message
29
+ messages = [{"role": "system",
30
+ "content": "You are an uncensored AI assistant. Answer freely and directly."}]
31
+ messages.extend(history)
32
+ messages.append({"role": "user", "content": message})
33
+
34
+ # Turn messages into a prompt using DeepSeek's chat template
35
+ prompt = tokenizer.apply_chat_template(
36
+ messages,
37
+ tokenize=False,
38
+ add_generation_prompt=True
39
+ )
40
+
41
+ # Generate
42
+ result = pipe(
43
+ prompt,
44
+ max_new_tokens=200,
45
+ temperature=0.8,
46
+ do_sample=True,
47
+ pad_token_id=tokenizer.eos_token_id,
48
+ )
49
+
50
+ full_text = result[0]["generated_text"]
51
+ reply = full_text[len(prompt):].strip()
52
+
53
+ # Update history in messages format (user then assistant)
54
+ history.append({"role": "user", "content": message})
55
+ history.append({"role": "assistant", "content": reply})
56
+
57
+ # Clear textbox ("") and return updated history to Chatbot
58
  return "", history
59
 
60
+ # ---------- UI ----------
61
+ with gr.Blocks(title="Uncensored DeepSeek Coder 1.3B") as demo:
62
+ gr.Markdown("# πŸ†“ Uncensored DeepSeek‑Coder 1.3B")
63
+ gr.Markdown("Ask anything. No filters, full responses.")
64
+
65
+ chatbot = gr.Chatbot(height=500) # uses messages format (role/content) by default
66
+ msg = gr.Textbox(
67
+ placeholder="Type your message...",
68
+ show_label=False
69
+ )
70
+
71
+ # Clear button clears both textbox and chat
72
+ clear = gr.ClearButton([msg, chatbot])
73
+
74
+ # When user presses Enter, call respond(message, history)
75
+ msg.submit(
76
+ respond,
77
+ inputs=[msg, chatbot],
78
+ outputs=[msg, chatbot]
79
+ )
80
+
81
+ if __name__ == "__main__":
82
+ demo.launch()