File size: 2,199 Bytes
80692f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
name: 05_topk_bitonic
display_name: "TopK via Bitonic Sort"
precision: fp32
regime: memory

# Top-k is dominated by the input read (small output, no reduction over k).
# Comparator-network FLOPs are not the bottleneck on real hardware, so we
# track them but score on bandwidth.
flops_formula: "batch * n * 4"            # ~O(n log^2 n) compares total, but the
                                          # bitonic network is so cheap relative
                                          # to memory that a coarse 4*n estimate
                                          # is fine for telemetry only.
bytes_formula: "batch * n * 4 + batch * k * (4 + 8)"  # fp32 input read + (fp32 value + int64 idx) output

hardware: [RTX_PRO_6000]
peak_tflops_key: fp32
peak_bandwidth_key: dram

# Top-k correctness:
#   - VALUES must match within fp32 atol/rtol (the kth largest value is
#     well-defined modulo float-equal ties, so we use a loose-ish tol).
#   - INDICES are checked leniently: for each row, the multiset of returned
#     indices must select values that match ref values within tol. Direct
#     index equality is NOT required (ties in x can yield different valid
#     index sets).
tolerance:
  float32: 1.0e-4

# Forbidden ops — using any of these in solution.py fails correctness post-hoc.
# This problem is about IMPLEMENTING the selection, not dispatching to PyTorch's
# tuned top-k. torch.sort is also banned because torch.topk falls back to it.
forbidden:
  - "torch.topk"
  - "torch.kthvalue"
  - "torch.sort"
  - "torch.argsort"
  - "Tensor.topk"
  - "Tensor.kthvalue"
  - "Tensor.sort"
  - "Tensor.argsort"
  - "torch.ops.aten.topk"
  - "torch.ops.aten.sort"
  - "torch.ops.aten.kthvalue"

sota:
  name: "torch.topk (cuTOPK / CUB internals)"
  url: "https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/cuda/TensorTopK.cu"
  function: "torch.topk"
  deps: []
  # Informational: torch.topk dispatches to a radix-select kernel for moderate
  # k and to a bitonic sort kernel for small n. Beating it on the (1, 131072,
  # 64) decoder shape requires saturating DRAM bandwidth on the input read.
  reference_throughput_gbps_h100: 2400

num_correct_trials: 3
num_perf_trials: 50