Javedalam commited on
Commit
3cfd21e
·
verified ·
1 Parent(s): 80ededa

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -96
app.py DELETED
@@ -1,96 +0,0 @@
1
- import time, threading
2
- import gradio as gr
3
- import torch, spaces
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
5
-
6
- # ---------- Config ----------
7
- MODEL_ID = "WeiboAI/VibeThinker-1.5B"
8
- SYSTEM_PROMPT = "You are a concise solver. Give one clear final answer."
9
-
10
- MAX_INPUT_TOKENS = 384 # clip context so first token comes fast
11
- MAX_NEW_TOKENS = 96 # keep short to finish inside the GPU slice
12
- DO_SAMPLE = False # deterministic decode = smoother & faster on ZeroGPU
13
- TEMPERATURE = 0.4 # only used if DO_SAMPLE=True
14
- TOP_P = 0.9
15
- FIRST_TOKEN_TIMEOUT = 3 # if no token arrives, likely no GPU slice yet
16
-
17
- # ---------- Load model once ----------
18
- print(f"⏳ Loading {MODEL_ID} …", flush=True)
19
- tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
20
- model = AutoModelForCausalLM.from_pretrained(
21
- MODEL_ID,
22
- trust_remote_code=True,
23
- low_cpu_mem_usage=True,
24
- dtype=torch.bfloat16, # (use dtype, not torch_dtype)
25
- device_map="auto",
26
- ).eval()
27
- print("✅ Model ready.", flush=True)
28
-
29
-
30
- def _to_inputs(messages_text: str):
31
- ids = tok([messages_text], return_tensors="pt")
32
- if ids["input_ids"].shape[-1] > MAX_INPUT_TOKENS:
33
- ids = {k: v[:, -MAX_INPUT_TOKENS:] for k, v in ids.items()}
34
- return {k: v.to(model.device) for k, v in ids.items()}
35
-
36
-
37
- @spaces.GPU(duration=60) # shorter slice => scheduled more reliably
38
- def respond(message, history):
39
- history = history or []
40
- msgs = [{"role": "system", "content": SYSTEM_PROMPT}, *history,
41
- {"role": "user", "content": str(message)}]
42
-
43
- prompt_text = tok.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)
44
- inputs = _to_inputs(prompt_text)
45
-
46
- # fast, fine-grained streaming
47
- streamer = TextIteratorStreamer(tok, skip_prompt=True, skip_special_tokens=True, timeout=0.05)
48
-
49
- gen_kwargs = dict(
50
- **inputs,
51
- streamer=streamer,
52
- do_sample=DO_SAMPLE,
53
- temperature=TEMPERATURE,
54
- top_p=TOP_P,
55
- repetition_penalty=1.15, # tame short loops
56
- max_new_tokens=MAX_NEW_TOKENS,
57
- pad_token_id=tok.eos_token_id,
58
- use_cache=True,
59
- eos_token_id=tok.eos_token_id,
60
- )
61
-
62
- th = threading.Thread(target=model.generate, kwargs=gen_kwargs, daemon=True)
63
- th.start()
64
-
65
- out = list(history) + [{"role": "assistant", "content": ""}]
66
- got_first = False
67
- start = time.time()
68
-
69
- # yield every token (no bursting)
70
- for chunk in streamer:
71
- got_first = True
72
- out[-1]["content"] += chunk
73
- yield out
74
-
75
- # If no token ever arrived, likely no GPU window was granted
76
- if not got_first and (time.time() - start) >= FIRST_TOKEN_TIMEOUT:
77
- out[-1]["content"] = "(No GPU slot yet — press Send again.)"
78
- yield out
79
-
80
-
81
- # ---------- UI ----------
82
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
83
- gr.Markdown("## 💡 VibeThinker-1.5B — ZeroGPU slice (smooth streaming)")
84
- chat = gr.Chatbot(type="messages", height=520)
85
- box = gr.Textbox(placeholder="Ask a question…")
86
- send = gr.Button("Send", variant="primary")
87
-
88
- def pipeline(msg, hist):
89
- for hist in respond(msg, hist):
90
- yield "", hist
91
-
92
- box.submit(pipeline, [box, chat], [box, chat])
93
- send.click(pipeline, [box, chat], [box, chat])
94
-
95
- if __name__ == "__main__":
96
- demo.queue(max_size=16).launch()