Javedalam commited on
Commit
4d9c9bd
·
verified ·
1 Parent(s): 21f8a3d

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -119
app.py DELETED
@@ -1,119 +0,0 @@
1
- import time, threading
2
- import gradio as gr
3
- import torch, spaces
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
5
-
6
- # ---- Config ----
7
- MODEL_ID = "WeiboAI/VibeThinker-1.5B"
8
- SYSTEM_PROMPT = "You are a concise solver. Give one clear final answer."
9
-
10
- MAX_INPUT_TOKENS = 384 # cap prompt length so first token comes fast
11
- MAX_NEW_TOKENS = 96 # keep inside ZeroGPU slice
12
- DO_SAMPLE = False # deterministic decode = faster/steadier on ZeroGPU
13
- TEMPERATURE = 0.4 # used only if DO_SAMPLE=True
14
- TOP_P = 0.9
15
- FIRST_TOKEN_TIMEOUT = 3 # if no token in 3s -> likely no worker slot
16
- NO_TOKEN_HANG_CUTOFF = 8 # safety if stream stalls mid-gen
17
-
18
- print(f"⏳ Loading {MODEL_ID} …", flush=True)
19
- tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
20
- model = AutoModelForCausalLM.from_pretrained(
21
- MODEL_ID,
22
- trust_remote_code=True,
23
- low_cpu_mem_usage=True,
24
- dtype=torch.bfloat16, # (use dtype, not torch_dtype)
25
- device_map="auto",
26
- ).eval()
27
- print("✅ Model ready.", flush=True)
28
-
29
-
30
- def _prepare_inputs(messages):
31
- prompt_text = tok.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
32
- ids = tok([prompt_text], return_tensors="pt")
33
- # clip to keep within MAX_INPUT_TOKENS
34
- if ids["input_ids"].shape[-1] > MAX_INPUT_TOKENS:
35
- ids = {k: v[:, -MAX_INPUT_TOKENS:] for k, v in ids.items()}
36
- return {k: v.to(model.device) for k, v in ids.items()}
37
-
38
-
39
- @spaces.GPU(duration=60) # request a short ZeroGPU slice (more likely to schedule)
40
- def respond(user_message, history):
41
- history = history or []
42
- msgs = [{"role": "system", "content": SYSTEM_PROMPT},
43
- *history,
44
- {"role": "user", "content": str(user_message)}]
45
-
46
- inputs = _prepare_inputs(msgs)
47
-
48
- # fine-grained streaming
49
- streamer = TextIteratorStreamer(
50
- tok, skip_prompt=True, skip_special_tokens=True, timeout=0.05
51
- )
52
-
53
- gen_kwargs = dict(
54
- **inputs,
55
- streamer=streamer,
56
- do_sample=DO_SAMPLE,
57
- temperature=TEMPERATURE,
58
- top_p=TOP_P,
59
- repetition_penalty=1.15, # tame short loops
60
- max_new_tokens=MAX_NEW_TOKENS,
61
- pad_token_id=tok.eos_token_id,
62
- eos_token_id=tok.eos_token_id,
63
- use_cache=True,
64
- )
65
-
66
- # run generate in a daemon thread so it never blocks future calls
67
- th = threading.Thread(target=model.generate, kwargs=gen_kwargs, daemon=True)
68
- th.start()
69
-
70
- out = list(history) + [{"role": "assistant", "content": ""}]
71
- got_first = False
72
- start = time.time()
73
- last_token_time = start
74
-
75
- try:
76
- for chunk in streamer:
77
- got_first = True
78
- last_token_time = time.time()
79
- out[-1]["content"] += chunk
80
- # yield every token (true streaming)
81
- yield out
82
-
83
- # safety: if thread still alive but no tokens arriving for a while, stop nicely
84
- while th.is_alive() and (time.time() - last_token_time) < NO_TOKEN_HANG_CUTOFF:
85
- time.sleep(0.25)
86
- yield out
87
-
88
- if th.is_alive():
89
- out[-1]["content"] += f"\n\n(Stopped: no tokens for {NO_TOKEN_HANG_CUTOFF}s)"
90
- yield out
91
-
92
- # if we never got a token, tell the user it was likely a ZeroGPU miss
93
- if not got_first and (time.time() - start) >= FIRST_TOKEN_TIMEOUT:
94
- out[-1]["content"] = "(No ZeroGPU worker slot yet — press Send again.)"
95
- yield out
96
-
97
- except Exception as e:
98
- out[-1]["content"] = f"⚠️ ZeroGPU worker error: {e}"
99
- yield out
100
-
101
-
102
- # ---- UI ----
103
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
104
- gr.Markdown("## 💡 VibeThinker-1.5B — ZeroGPU slice (smooth streaming)")
105
-
106
- chat = gr.Chatbot(type="messages", height=520) # no 'streaming' kwarg (not in your build)
107
- box = gr.Textbox(placeholder="Ask a question…")
108
- send = gr.Button("Send", variant="primary")
109
-
110
- def pipeline(msg, hist):
111
- # generator -> stream into Chatbot
112
- for hist in respond(msg, hist):
113
- yield "", hist
114
-
115
- box.submit(pipeline, [box, chat], [box, chat])
116
- send.click(pipeline, [box, chat], [box, chat])
117
-
118
- if __name__ == "__main__":
119
- demo.queue(max_size=16).launch()