File size: 2,107 Bytes
dda039c
641c30d
 
dda039c
78ab60b
dda039c
641c30d
 
 
dda039c
e36063a
 
 
 
8096cee
e36063a
 
 
 
 
 
8096cee
e36063a
641c30d
 
 
dda039c
 
641c30d
 
 
8096cee
e36063a
 
 
8096cee
e36063a
dda039c
 
8096cee
 
e36063a
8096cee
e36063a
8096cee
 
 
 
e36063a
8096cee
 
 
 
 
 
dda039c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

model_name = "deepseek-ai/DeepSeek-V3-0324"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
model.eval()

def respond(message, history, max_tokens, temperature, top_p):
    history = history or []
    # Append user message as dict with role and content
    history.append({"role": "user", "content": message})
    
    # Create prompt by concatenating conversation history as text
    prompt = ""
    for msg in history:
        prefix = f"{msg['role'].capitalize()}: "
        prompt += prefix + msg["content"] + "\n"
    prompt += "Assistant: "
    
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(
        **inputs,
        max_new_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p,
        do_sample=True,
        pad_token_id=tokenizer.eos_token_id,
    )
    
    reply = tokenizer.decode(outputs[0], skip_special_tokens=True)[len(prompt):].strip()
    # Append assistant response
    history.append({"role": "assistant", "content": reply})
    
    return history, ""

with gr.Blocks() as demo:
    gr.Markdown("# DeepSeek Coder Chatbot")
    
    chatbot = gr.Chatbot(type="messages")
    with gr.Row():
        user_input = gr.Textbox(show_label=False, placeholder="Enter your prompt and press Enter")
    with gr.Row():
        max_tokens = gr.Slider(1, 1024, value=512, step=1, label="Max Tokens")
        temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.05, label="Temperature")
        top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")

    def user_submit(text, history, max_tokens, temperature, top_p):
        if not text.strip():
            return history, ""
        return respond(text, history, max_tokens, temperature, top_p)
    
    user_input.submit(user_submit, inputs=[user_input, chatbot, max_tokens, temperature, top_p], outputs=[chatbot, user_input])

if __name__ == "__main__":
    demo.launch()