Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -113,27 +113,7 @@ def smart_truncate(text, max_length=3000):
|
|
| 113 |
if len(sentences) > 1:
|
| 114 |
return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]"
|
| 115 |
# Otherwise, split by word
|
| 116 |
-
|
| 117 |
-
words = text[:max_length].split()
|
| 118 |
-
return ' '.join(words[:-1]) + "... [Response truncated - ask for continuation]"
|
| 119 |
-
|
| 120 |
-
def detect_tool_request(text):
|
| 121 |
-
"""Simple heuristic to detect when a graph might be helpful."""
|
| 122 |
-
graph_keywords = [
|
| 123 |
-
"chart", "graph", "plot", "visualize", "show data", "bar chart",
|
| 124 |
-
"line graph", "pie chart", "diagram", "compare", "data visualization"
|
| 125 |
-
]
|
| 126 |
-
text_lower = text.lower()
|
| 127 |
-
return any(keyword in text_lower for keyword in graph_keywords)
|
| 128 |
-
|
| 129 |
-
def call_hf_api(messages, max_retries=3):
|
| 130 |
-
"""Call Hugging Face API with retry logic."""
|
| 131 |
-
payload = {
|
| 132 |
-
"inputs": format_messages_for_hf(messages),
|
| 133 |
-
"parameters": {
|
| 134 |
-
"max_new_tokens": 1024,
|
| 135 |
-
"temperature": 0.7,
|
| 136 |
-
"top_p": 0.9,
|
| 137 |
"return_full_text": False
|
| 138 |
}
|
| 139 |
}
|
|
@@ -237,69 +217,59 @@ def process_response_for_tools(response_text, original_query):
|
|
| 237 |
return response_text
|
| 238 |
|
| 239 |
def respond_with_enhanced_streaming(message, history):
|
| 240 |
-
"""
|
| 241 |
timing_context = metrics_tracker.start_timing()
|
| 242 |
error_occurred = False
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
try:
|
| 247 |
-
#
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
full_response = current_response.strip()
|
| 284 |
-
yield full_response
|
| 285 |
-
|
| 286 |
-
logger.info(f"Response completed. Length: {len(full_response)} characters")
|
| 287 |
-
|
| 288 |
except Exception as e:
|
| 289 |
error_occurred = True
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
full_response = "Sorry, an error occurred while generating the response."
|
| 293 |
-
yield full_response
|
| 294 |
|
| 295 |
finally:
|
| 296 |
-
metrics_tracker.
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
error_occurred=error_occurred,
|
| 301 |
-
error_message=error_message,
|
| 302 |
-
)
|
| 303 |
|
| 304 |
# ===============================================================================
|
| 305 |
# UI CONFIGURATION SECTION - ALL UI RELATED CODE CENTRALIZED HERE
|
|
|
|
| 113 |
if len(sentences) > 1:
|
| 114 |
return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]"
|
| 115 |
# Otherwise, split by word
|
| 116 |
+
+ "top_p": 0.9,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
"return_full_text": False
|
| 118 |
}
|
| 119 |
}
|
|
|
|
| 217 |
return response_text
|
| 218 |
|
| 219 |
def respond_with_enhanced_streaming(message, history):
|
| 220 |
+
"""Streams the bot's response, with support for graph generation."""
|
| 221 |
timing_context = metrics_tracker.start_timing()
|
| 222 |
error_occurred = False
|
| 223 |
+
response = ""
|
| 224 |
+
mode = ""
|
| 225 |
+
|
| 226 |
try:
|
| 227 |
+
# ---- Call HuggingFace Inference ----
|
| 228 |
+
payload = {
|
| 229 |
+
"inputs": message,
|
| 230 |
+
"parameters": {"return_full_text": False},
|
| 231 |
+
}
|
| 232 |
+
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
| 233 |
+
api_response = requests.post(HF_API_URL, headers=headers, json=payload)
|
| 234 |
+
api_response.raise_for_status()
|
| 235 |
+
outputs = api_response.json()
|
| 236 |
+
|
| 237 |
+
# HuggingFace might return [{"generated_text": "..."}]
|
| 238 |
+
model_output = outputs[0].get("generated_text", "").strip()
|
| 239 |
+
|
| 240 |
+
# ---- Check if it's a tool call ----
|
| 241 |
+
try:
|
| 242 |
+
parsed = json.loads(model_output)
|
| 243 |
+
if isinstance(parsed, dict) and parsed.get("name") == "create_graph":
|
| 244 |
+
args = parsed.get("arguments", {})
|
| 245 |
+
graph_html = generate_plot(
|
| 246 |
+
args.get("data_json", "{}"),
|
| 247 |
+
args.get("labels_json", "[]"),
|
| 248 |
+
args.get("plot_type", "bar"),
|
| 249 |
+
args.get("title", "Untitled"),
|
| 250 |
+
args.get("x_label", ""),
|
| 251 |
+
args.get("y_label", "")
|
| 252 |
+
)
|
| 253 |
+
response = graph_html
|
| 254 |
+
mode = "graph"
|
| 255 |
+
else:
|
| 256 |
+
response = model_output
|
| 257 |
+
mode = "text"
|
| 258 |
+
except json.JSONDecodeError:
|
| 259 |
+
# Not JSON → just plain text
|
| 260 |
+
response = model_output
|
| 261 |
+
mode = "text"
|
| 262 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
except Exception as e:
|
| 264 |
error_occurred = True
|
| 265 |
+
response = f"⚠️ Error: {str(e)}"
|
| 266 |
+
mode = "error"
|
|
|
|
|
|
|
| 267 |
|
| 268 |
finally:
|
| 269 |
+
metrics_tracker.stop_timing(timing_context, error_occurred)
|
| 270 |
+
|
| 271 |
+
return response, mode
|
| 272 |
+
|
|
|
|
|
|
|
|
|
|
| 273 |
|
| 274 |
# ===============================================================================
|
| 275 |
# UI CONFIGURATION SECTION - ALL UI RELATED CODE CENTRALIZED HERE
|