teoat commited on
Commit
ea75307
·
verified ·
1 Parent(s): 95c4c3b

Upload app/performance.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app/performance.py +21 -7
app/performance.py CHANGED
@@ -80,7 +80,9 @@ class PerformanceProfiler:
80
  tasks.append((task, task_start))
81
 
82
  # Run batch concurrently
83
- results = await asyncio.gather(*[t[0] for t in tasks], return_exceptions=True)
 
 
84
 
85
  # Record times
86
  for idx, result in enumerate(results):
@@ -104,8 +106,12 @@ class PerformanceProfiler:
104
  min_response_time_ms=round(min(response_times), 2),
105
  max_response_time_ms=round(max(response_times), 2),
106
  p50_response_time_ms=round(statistics.median(response_times), 2),
107
- p95_response_time_ms=round(response_times[int(len(response_times) * 0.95)], 2),
108
- p99_response_time_ms=round(response_times[int(len(response_times) * 0.99)], 2),
 
 
 
 
109
  error_count=errors,
110
  error_rate=round(errors / num_requests * 100, 2),
111
  )
@@ -183,7 +189,9 @@ class PerformanceProfiler:
183
  "sql": q["sql"][:100] + "..." if len(q["sql"]) > 100 else q["sql"],
184
  "duration_ms": round(q["duration"], 2),
185
  }
186
- for q in sorted(slow_queries, key=lambda x: x["duration"], reverse=True)[:10]
 
 
187
  ],
188
  "query_types": {
189
  qtype: {
@@ -253,7 +261,9 @@ class PerformanceBenchmark:
253
  for count in node_counts:
254
  # Simulate graph with N nodes
255
  [{"id": str(i), "label": f"Node {i}"} for i in range(count)]
256
- links = [{"source": str(i), "target": str((i + 1) % count)} for i in range(count)]
 
 
257
 
258
  # Time the layout calculation (simulated)
259
  start = time.time()
@@ -266,7 +276,9 @@ class PerformanceBenchmark:
266
  "nodes": count,
267
  "links": len(links),
268
  "duration_ms": round(duration * 1000, 2),
269
- "fps_estimate": round(1 / (duration / 60) if duration > 0 else 60, 1),
 
 
270
  }
271
  )
272
 
@@ -299,7 +311,9 @@ def generate_performance_report(results: list[PerformanceResult]) -> str:
299
  report.append(f" P50 (Median): {result.p50_response_time_ms}")
300
  report.append(f" P95: {result.p95_response_time_ms}")
301
  report.append(f" P99: {result.p99_response_time_ms}")
302
- report.append(f"\n_errors: {result.error_count} ({result.error_rate}%)")
 
 
303
 
304
  # Performance assessment
305
  if result.requests_per_second > 100:
 
80
  tasks.append((task, task_start))
81
 
82
  # Run batch concurrently
83
+ results = await asyncio.gather(
84
+ *[t[0] for t in tasks], return_exceptions=True
85
+ )
86
 
87
  # Record times
88
  for idx, result in enumerate(results):
 
106
  min_response_time_ms=round(min(response_times), 2),
107
  max_response_time_ms=round(max(response_times), 2),
108
  p50_response_time_ms=round(statistics.median(response_times), 2),
109
+ p95_response_time_ms=round(
110
+ response_times[int(len(response_times) * 0.95)], 2
111
+ ),
112
+ p99_response_time_ms=round(
113
+ response_times[int(len(response_times) * 0.99)], 2
114
+ ),
115
  error_count=errors,
116
  error_rate=round(errors / num_requests * 100, 2),
117
  )
 
189
  "sql": q["sql"][:100] + "..." if len(q["sql"]) > 100 else q["sql"],
190
  "duration_ms": round(q["duration"], 2),
191
  }
192
+ for q in sorted(
193
+ slow_queries, key=lambda x: x["duration"], reverse=True
194
+ )[:10]
195
  ],
196
  "query_types": {
197
  qtype: {
 
261
  for count in node_counts:
262
  # Simulate graph with N nodes
263
  [{"id": str(i), "label": f"Node {i}"} for i in range(count)]
264
+ links = [
265
+ {"source": str(i), "target": str((i + 1) % count)} for i in range(count)
266
+ ]
267
 
268
  # Time the layout calculation (simulated)
269
  start = time.time()
 
276
  "nodes": count,
277
  "links": len(links),
278
  "duration_ms": round(duration * 1000, 2),
279
+ "fps_estimate": round(
280
+ 1 / (duration / 60) if duration > 0 else 60, 1
281
+ ),
282
  }
283
  )
284
 
 
311
  report.append(f" P50 (Median): {result.p50_response_time_ms}")
312
  report.append(f" P95: {result.p95_response_time_ms}")
313
  report.append(f" P99: {result.p99_response_time_ms}")
314
+ report.append(
315
+ f"\n_errors: {result.error_count} ({result.error_rate}%)"
316
+ )
317
 
318
  # Performance assessment
319
  if result.requests_per_second > 100: