rahul7star commited on
Commit
ff89053
·
verified ·
1 Parent(s): 1ae007f

Update app_qwen_tts.py

Browse files
Files changed (1) hide show
  1. app_qwen_tts.py +80 -108
app_qwen_tts.py CHANGED
@@ -1,50 +1,39 @@
1
  import os
 
 
 
2
  import torch
3
  import gradio as gr
4
  import numpy as np
5
- import base64
6
  import requests
7
  from transformers import AutoTokenizer, AutoModelForCausalLM
8
  from sentence_transformers import SentenceTransformer
9
- import asyncio
10
 
11
- # =========================================================
12
  # Configuration
13
- # =========================================================
14
  MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
15
  DOC_FILE = "general.md"
16
  MAX_NEW_TOKENS = 200
17
  TOP_K = 3
18
- TTS_API_URL = "https://rahul7star-Chatterbox-Multilingual-TTS-API.hf.space/tts"
19
 
20
- # =========================================================
21
- # Paths
22
- # =========================================================
23
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
24
  DOC_PATH = os.path.join(BASE_DIR, DOC_FILE)
25
- if not os.path.exists(DOC_PATH):
26
- raise RuntimeError(f"❌ {DOC_FILE} not found next to app.py")
27
 
28
- # =========================================================
29
- # Load Qwen Model
30
- # =========================================================
31
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
32
- model = AutoModelForCausalLM.from_pretrained(
33
- MODEL_ID,
34
- device_map="auto",
35
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
36
- trust_remote_code=True
37
- )
38
- model.eval()
39
 
40
- # =========================================================
41
- # Embedding Model
42
- # =========================================================
43
- embedder = SentenceTransformer("all-MiniLM-L6-v2")
44
 
45
- # =========================================================
46
- # Load Document
47
- # =========================================================
48
  def chunk_text(text, chunk_size=300, overlap=50):
49
  words = text.split()
50
  chunks = []
@@ -55,24 +44,33 @@ def chunk_text(text, chunk_size=300, overlap=50):
55
  i += chunk_size - overlap
56
  return chunks
57
 
58
- with open(DOC_PATH, "r", encoding="utf-8", errors="ignore") as f:
59
- DOC_TEXT = f.read()
60
-
61
  DOC_CHUNKS = chunk_text(DOC_TEXT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  DOC_EMBEDS = embedder.encode(DOC_CHUNKS, normalize_embeddings=True, show_progress_bar=True)
63
 
64
- # =========================================================
65
- # Retrieval
66
- # =========================================================
67
  def retrieve_context(question, k=TOP_K):
68
  q_emb = embedder.encode([question], normalize_embeddings=True)
69
  scores = np.dot(DOC_EMBEDS, q_emb[0])
70
  top_ids = scores.argsort()[-k:][::-1]
71
  return "\n\n".join([DOC_CHUNKS[i] for i in top_ids])
72
 
73
- # =========================================================
74
- # Extract final answer
75
- # =========================================================
76
  def extract_final_answer(text: str) -> str:
77
  text = text.strip()
78
  markers = ["assistant:", "assistant", "answer:", "final answer:"]
@@ -82,12 +80,11 @@ def extract_final_answer(text: str) -> str:
82
  lines = [l.strip() for l in text.split("\n") if l.strip()]
83
  return lines[-1] if lines else text
84
 
85
- # =========================================================
86
- # Generate text
87
- # =========================================================
88
- def answer_question(question):
89
  context = retrieve_context(question)
90
-
91
  messages = [
92
  {
93
  "role": "system",
@@ -102,93 +99,68 @@ def answer_question(question):
102
  },
103
  {"role": "user", "content": f"Context:\n{context}\n\nQuestion:\n{question}"}
104
  ]
105
-
106
  prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
107
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
108
 
109
  with torch.no_grad():
110
- output = model.generate(**inputs, max_new_tokens=MAX_NEW_TOKENS, temperature=0.3, do_sample=True)
 
 
 
 
 
111
 
112
  decoded = tokenizer.decode(output[0], skip_special_tokens=True)
113
  return extract_final_answer(decoded)
114
 
115
- # =========================================================
116
- # TTS API call
117
- # =========================================================
118
- def tts_via_api(text: str):
119
  try:
120
- payload = {"text": text}
121
- resp = requests.post(TTS_API_URL, json=payload)
122
  resp.raise_for_status()
123
- data = resp.json()
124
- audio_b64 = data.get("audio", "")
125
- if not audio_b64:
126
- return None
127
- audio_path = "/tmp/output.wav"
128
- audio_bytes = base64.b64decode(audio_b64)
129
- with open(audio_path, "wb") as f:
130
- f.write(audio_bytes)
131
- return audio_path
132
  except Exception as e:
133
- print(f"TTS API error: {e}")
134
  return None
135
 
136
- # =========================================================
137
- # Async chat
138
- # =========================================================
139
- async def chat_async(user_message, history):
140
  if not user_message.strip():
141
  return "", history
142
 
143
- # Show user message immediately
144
- history.append((user_message, None))
 
145
 
146
- try:
147
- # 1️⃣ Generate text answer immediately
148
- answer_text = answer_question(user_message)
149
-
150
- # Append answer text only (audio pending)
151
- history.append((None, (answer_text, None)))
152
-
153
- # 2️⃣ Generate audio in background
154
- audio_path = await asyncio.to_thread(tts_via_api, answer_text)
155
-
156
- # Update the last bot message with audio
157
- history[-1] = (None, (answer_text, audio_path))
158
- except Exception as e:
159
- print(e)
160
- history.append((None, ("⚠️ Error generating response", None)))
161
 
162
  return "", history
163
 
164
  def reset_chat():
165
  return []
166
 
167
- # =========================================================
168
  # Build UI
169
- # =========================================================
170
- def build_ui():
171
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
172
- gr.Markdown("# 📄 Qwen Document Assistant + TTS (Async Audio)")
173
- gr.Markdown("Ask a question and hear the answer. Text appears immediately, audio may take a few minutes.")
174
-
175
- # Chat history: left=User, right=Bot
176
- chatbot = gr.Chatbot(height=450, type="tuples")
177
-
178
- msg = gr.Textbox(placeholder="Ask a question...", lines=2)
179
- send = gr.Button("Send")
180
- clear = gr.Button("🧹 Clear")
181
-
182
- send.click(chat_async, [msg, chatbot], [msg, chatbot])
183
- msg.submit(chat_async, [msg, chatbot], [msg, chatbot])
184
- clear.click(reset_chat, outputs=chatbot)
185
-
186
- demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
187
-
188
- # =========================================================
189
- # Entrypoint
190
- # =========================================================
191
- if __name__ == "__main__":
192
- print(f"✅ Loaded {len(DOC_CHUNKS)} chunks from {DOC_FILE}")
193
- print(f"✅ Model: {MODEL_ID}")
194
- build_ui()
 
1
  import os
2
+ import io
3
+ import base64
4
+ import time
5
  import torch
6
  import gradio as gr
7
  import numpy as np
8
+ import soundfile as sf
9
  import requests
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
  from sentence_transformers import SentenceTransformer
 
12
 
13
+ # =======================
14
  # Configuration
15
+ # =======================
16
  MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
17
  DOC_FILE = "general.md"
18
  MAX_NEW_TOKENS = 200
19
  TOP_K = 3
20
+ TTS_API_URL = "https://rahul7star-Chatterbox-Multilingual-TTS-API.hf.space/tts" # your FastAPI TTS endpoint
21
 
22
+ # =======================
23
+ # Load document
24
+ # =======================
25
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
26
  DOC_PATH = os.path.join(BASE_DIR, DOC_FILE)
 
 
27
 
28
+ if not os.path.exists(DOC_PATH):
29
+ raise RuntimeError(f"{DOC_FILE} not found next to app.py")
 
 
 
 
 
 
 
 
 
30
 
31
+ with open(DOC_PATH, "r", encoding="utf-8", errors="ignore") as f:
32
+ DOC_TEXT = f.read()
 
 
33
 
34
+ # =======================
35
+ # Chunk document
36
+ # =======================
37
  def chunk_text(text, chunk_size=300, overlap=50):
38
  words = text.split()
39
  chunks = []
 
44
  i += chunk_size - overlap
45
  return chunks
46
 
 
 
 
47
  DOC_CHUNKS = chunk_text(DOC_TEXT)
48
+
49
+ # =======================
50
+ # Load models
51
+ # =======================
52
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
53
+
54
+ model = AutoModelForCausalLM.from_pretrained(
55
+ MODEL_ID,
56
+ device_map="auto",
57
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
58
+ trust_remote_code=True
59
+ )
60
+ model.eval()
61
+
62
+ embedder = SentenceTransformer("all-MiniLM-L6-v2")
63
  DOC_EMBEDS = embedder.encode(DOC_CHUNKS, normalize_embeddings=True, show_progress_bar=True)
64
 
65
+ # =======================
66
+ # Utilities
67
+ # =======================
68
  def retrieve_context(question, k=TOP_K):
69
  q_emb = embedder.encode([question], normalize_embeddings=True)
70
  scores = np.dot(DOC_EMBEDS, q_emb[0])
71
  top_ids = scores.argsort()[-k:][::-1]
72
  return "\n\n".join([DOC_CHUNKS[i] for i in top_ids])
73
 
 
 
 
74
  def extract_final_answer(text: str) -> str:
75
  text = text.strip()
76
  markers = ["assistant:", "assistant", "answer:", "final answer:"]
 
80
  lines = [l.strip() for l in text.split("\n") if l.strip()]
81
  return lines[-1] if lines else text
82
 
83
+ # =======================
84
+ # Qwen inference
85
+ # =======================
86
+ def answer_question(question: str) -> str:
87
  context = retrieve_context(question)
 
88
  messages = [
89
  {
90
  "role": "system",
 
99
  },
100
  {"role": "user", "content": f"Context:\n{context}\n\nQuestion:\n{question}"}
101
  ]
 
102
  prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
103
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
104
 
105
  with torch.no_grad():
106
+ output = model.generate(
107
+ **inputs,
108
+ max_new_tokens=MAX_NEW_TOKENS,
109
+ temperature=0.3,
110
+ do_sample=True
111
+ )
112
 
113
  decoded = tokenizer.decode(output[0], skip_special_tokens=True)
114
  return extract_final_answer(decoded)
115
 
116
+ # =======================
117
+ # TTS via FastAPI
118
+ # =======================
119
+ def generate_tts_base64(text: str, language_id="en") -> str:
120
  try:
121
+ payload = {"text": text, "language_id": language_id, "mode": "Speak 🗣️"}
122
+ resp = requests.post(TTS_API_URL, json=payload, timeout=None) # no timeout
123
  resp.raise_for_status()
124
+ audio_b64 = resp.json().get("audio", "")
125
+ return audio_b64
 
 
 
 
 
 
 
126
  except Exception as e:
127
+ print(f"TTS error: {e}")
128
  return None
129
 
130
+ # =======================
131
+ # Chat function for Gradio
132
+ # =======================
133
+ def chat(user_message, history):
134
  if not user_message.strip():
135
  return "", history
136
 
137
+ # 1️⃣ Text answer immediately
138
+ answer_text = answer_question(user_message)
139
+ history.append((user_message, [answer_text, None])) # audio placeholder
140
 
141
+ # 2️⃣ Generate audio asynchronously
142
+ audio_b64 = generate_tts_base64(answer_text)
143
+ if audio_b64:
144
+ history[-1][1][1] = f"data:audio/wav;base64,{audio_b64}"
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  return "", history
147
 
148
  def reset_chat():
149
  return []
150
 
151
+ # =======================
152
  # Build UI
153
+ # =======================
154
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
155
+ gr.Markdown("## 📄 Qwen Document Assistant + TTS\nText appears instantly; audio plays once ready.")
156
+
157
+ chatbot = gr.Chatbot(height=450, type="tuples")
158
+ msg = gr.Textbox(placeholder="Ask a question...", lines=2)
159
+ send = gr.Button("Send")
160
+ clear = gr.Button("Clear")
161
+
162
+ send.click(chat, [msg, chatbot], [msg, chatbot])
163
+ msg.submit(chat, [msg, chatbot], [msg, chatbot])
164
+ clear.click(reset_chat, outputs=chatbot)
165
+
166
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)