TylerHilbert commited on
Commit
0d7e51f
·
1 Parent(s): bb96ee5

Renamed columns

Browse files
Files changed (1) hide show
  1. PyTorchConference2025_GithubRepos.json +248 -248
PyTorchConference2025_GithubRepos.json CHANGED
@@ -1,835 +1,835 @@
1
  [
2
  {
3
  "repo_name": "pytorch",
4
- "github_repo_link": "https://github.com/pytorch/pytorch",
5
  "category": "machine learning framework",
6
- "repo_description": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
7
  "homepage_link": "https://pytorch.org",
8
  "github_topic_closest_fit": "machine-learning"
9
  },
10
  {
11
  "repo_name": "triton",
12
- "github_repo_link": "https://github.com/triton-lang/triton",
13
  "category": "parallel computing dsl",
14
- "repo_description": "Development repository for the Triton language and compiler",
15
  "homepage_link": "https://triton-lang.org/",
16
  "github_topic_closest_fit": "parallel-programming"
17
  },
18
  {
19
  "repo_name": "cutlass",
20
- "github_repo_link": "https://github.com/NVIDIA/cutlass",
21
  "category": "parallel computing",
22
- "repo_description": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
23
  "homepage_link": "https://docs.nvidia.com/cutlass/index.html",
24
  "github_topic_closest_fit": "parallel-programming"
25
  },
26
  {
27
  "repo_name": "tilelang",
28
- "github_repo_link": "https://github.com/tile-ai/tilelang",
29
  "category": "parallel computing dsl",
30
- "repo_description": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
31
  "homepage_link": "https://tilelang.com",
32
  "github_topic_closest_fit": "parallel-programming"
33
  },
34
  {
35
  "repo_name": "ThunderKittens",
36
- "github_repo_link": "https://github.com/HazyResearch/ThunderKittens",
37
  "category": "parallel computing",
38
- "repo_description": "Tile primitives for speedy kernels",
39
  "homepage_link": "https://hazyresearch.stanford.edu/blog/2024-10-29-tk2",
40
  "github_topic_closest_fit": "parallel-programming"
41
  },
42
  {
43
  "repo_name": "helion",
44
- "github_repo_link": "https://github.com/pytorch/helion",
45
  "category": "parallel computing dsl",
46
- "repo_description": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
47
  "homepage_link": "https://helionlang.com",
48
  "github_topic_closest_fit": "parallel-programming"
49
  },
50
  {
51
  "repo_name": "TileIR",
52
- "github_repo_link": "https://github.com/microsoft/TileIR",
53
  "category": "parallel computing dsl",
54
- "repo_description": "TileIR (tile-ir) is a concise domain-specific IR designed to streamline the development of high-performance GPU/CPU kernels (e.g., GEMM, Dequant GEMM, FlashAttention, LinearAttention). By employing a Pythonic syntax with an underlying compiler infrastructure on top of TVM, TileIR allows developers to focus on productivity without sacrificing the low-level optimizations necessary for state-of-the-art performance.",
55
  "github_topic_closest_fit": "parallel-programming"
56
  },
57
  {
58
  "repo_name": "BitBLAS",
59
- "github_repo_link": "https://github.com/microsoft/BitBLAS",
60
- "repo_description": "BitBLAS is a library to support mixed-precision matrix multiplications, especially for quantized LLM deployment."
61
  },
62
  {
63
  "repo_name": "tensorflow",
64
- "github_repo_link": "https://github.com/tensorflow/tensorflow",
65
  "category": "machine learning framework",
66
- "repo_description": "An Open Source Machine Learning Framework for Everyone",
67
  "homepage_link": "https://tensorflow.org",
68
  "github_topic_closest_fit": "machine-learning"
69
  },
70
  {
71
  "repo_name": "vllm",
72
- "github_repo_link": "https://github.com/vllm-project/vllm",
73
  "category": "inference engine",
74
- "repo_description": "A high-throughput and memory-efficient inference and serving engine for LLMs",
75
  "homepage_link": "https://docs.vllm.ai",
76
  "github_topic_closest_fit": "inference"
77
  },
78
  {
79
  "repo_name": "ollama",
80
- "github_repo_link": "https://github.com/ollama/ollama",
81
  "category": "inference engine",
82
- "repo_description": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
83
  "homepage_link": "https://ollama.com",
84
  "github_topic_closest_fit": "inference"
85
  },
86
  {
87
  "repo_name": "llama.cpp",
88
- "github_repo_link": "https://github.com/ggml-org/llama.cpp",
89
  "category": "inference engine",
90
- "repo_description": "LLM inference in C/C++",
91
  "homepage_link": "https://ggml.ai",
92
  "github_topic_closest_fit": "inference"
93
  },
94
  {
95
  "repo_name": "sglang",
96
- "github_repo_link": "https://github.com/sgl-project/sglang",
97
  "category": "inference engine",
98
- "repo_description": "SGLang is a fast serving framework for large language models and vision language models.",
99
  "homepage_link": "https://docs.sglang.ai",
100
  "github_topic_closest_fit": "inference"
101
  },
102
  {
103
  "repo_name": "onnx",
104
- "github_repo_link": "https://github.com/onnx/onnx",
105
  "category": "machine learning framework",
106
- "repo_description": "Open standard for machine learning interoperability",
107
  "homepage_link": "https://onnx.ai/",
108
  "github_topic_closest_fit": "onnx"
109
  },
110
  {
111
  "repo_name": "executorch",
112
- "github_repo_link": "https://github.com/pytorch/executorch",
113
  "category": "model compiler",
114
- "repo_description": "On-device AI across mobile, embedded and edge for PyTorch",
115
  "homepage_link": "https://executorch.ai",
116
  "github_topic_closest_fit": "compiler"
117
  },
118
  {
119
- "github_repo_link": "https://github.com/ray-project/ray",
120
  "repo_name": "ray",
121
- "repo_description": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
122
  "homepage_link": "https://ray.io",
123
  "github_topic_closest_fit": "machine-learning"
124
  },
125
  {
126
- "github_repo_link": "https://github.com/jax-ml/jax",
127
  "repo_name": "jax",
128
- "repo_description": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
129
  "homepage_link": "https://docs.jax.dev",
130
  "github_topic_closest_fit": "jax"
131
  },
132
  {
133
- "github_repo_link": "https://github.com/llvm/llvm-project",
134
  "repo_name": "llvm-project",
135
- "repo_description": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
136
  "homepage_link": "http://llvm.org",
137
  "category": "compiler"
138
  },
139
  {
140
- "github_repo_link": "https://github.com/NVIDIA/TensorRT",
141
  "repo_name": "TensorRT",
142
- "repo_description": "NVIDIA® TensorRT™ is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
143
  "homepage_link": "https://developer.nvidia.com/tensorrt",
144
  "github_topic_closest_fit": "inference"
145
  },
146
  {
147
- "github_repo_link": "https://github.com/pytorch/ao",
148
  "repo_name": "ao",
149
- "repo_description": "PyTorch native quantization and sparsity for training and inference",
150
  "homepage_link": "https://pytorch.org/ao/stable/index.html",
151
  "github_topic_closest_fit": "quantization"
152
  },
153
  {
154
- "github_repo_link": "https://github.com/AMD-AGI/GEAK-agent",
155
  "repo_name": "GEAK-agent",
156
- "repo_description": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically."
157
  },
158
  {
159
- "github_repo_link": "https://github.com/block/goose",
160
  "repo_name": "goose",
161
- "repo_description": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
162
  "homepage_link": "https://block.github.io/goose/",
163
  "github_topic_closest_fit": "mcp",
164
  "category": "agent"
165
  },
166
  {
167
- "github_repo_link": "https://github.com/codelion/openevolve",
168
  "repo_name": "openevolve",
169
- "repo_description": "Open-source implementation of AlphaEvolve",
170
  "github_topic_closest_fit": "genetic-algorithm"
171
  },
172
  {
173
- "github_repo_link": "https://github.com/volcengine/verl",
174
  "repo_name": "verl",
175
- "repo_description": "verl: Volcano Engine Reinforcement Learning for LLMs",
176
  "homepage_link": "https://verl.readthedocs.io/en/latest/index.html"
177
  },
178
  {
179
- "github_repo_link": "https://github.com/huggingface/peft",
180
  "repo_name": "peft",
181
- "repo_description": "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
182
  "homepage_link": "https://huggingface.co/docs/peft",
183
  "github_topic_closest_fit": "lora"
184
  },
185
  {
186
- "github_repo_link": "https://github.com/Dao-AILab/quack",
187
  "repo_name": "quack",
188
- "repo_description": "A Quirky Assortment of CuTe Kernels",
189
  "category": "kernels"
190
  },
191
  {
192
- "github_repo_link": "https://github.com/AMDResearch/intelliperf",
193
  "repo_name": "intelliperf",
194
- "repo_description": "Automated bottleneck detection and solution orchestration",
195
  "github_topic_closest_fit": "performance"
196
  },
197
  {
198
- "github_repo_link": "https://github.com/letta-ai/letta",
199
  "repo_name": "letta",
200
- "repo_description": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
201
  "homepage_link": "https://docs.letta.com/",
202
  "github_topic_closest_fit": "ai-agents"
203
  },
204
  {
205
- "github_repo_link": "https://github.com/lastmile-ai/mcp-agent",
206
  "repo_name": "mcp-agent",
207
- "repo_description": "Build effective agents using Model Context Protocol and simple workflow patterns",
208
  "github_topic_closest_fit": "ai-agents"
209
  },
210
  {
211
- "github_repo_link": "https://github.com/modular/modular",
212
  "repo_name": "modular",
213
- "repo_description": "The Modular Platform (includes MAX & Mojo)",
214
  "homepage_link": "https://docs.modular.com/",
215
  "github_topic_closest_fit": "mojo"
216
  },
217
  {
218
- "github_repo_link": "https://github.com/ScalingIntelligence/KernelBench",
219
  "repo_name": "KernelBench",
220
- "repo_description": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
221
  "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench/",
222
  "github_topic_closest_fit": "benchmark",
223
  "category": "benchmark"
224
  },
225
  {
226
- "github_repo_link": "https://github.com/thunlp/TritonBench",
227
  "repo_name": "TritonBench",
228
- "repo_description": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators",
229
  "category": "benchmark"
230
  },
231
  {
232
- "github_repo_link": "https://github.com/flashinfer-ai/flashinfer-bench",
233
  "repo_name": "flashinfer-bench",
234
- "repo_description": "Building the Virtuous Cycle for AI-driven LLM Systems",
235
  "homepage_link": "https://bench.flashinfer.ai",
236
  "category": "benchmark"
237
  },
238
  {
239
- "github_repo_link": "https://github.com/laude-institute/terminal-bench",
240
  "repo_name": "terminal-bench",
241
- "repo_description": "A benchmark for LLMs on complicated tasks in the terminal",
242
  "homepage_link": "https://www.tbench.ai",
243
  "category": "benchmark"
244
  },
245
  {
246
- "github_repo_link": "https://github.com/SWE-bench/SWE-bench",
247
  "repo_name": "SWE-bench",
248
- "repo_description": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
249
  "homepage_link": "https://www.swebench.com",
250
  "github_topic_closest_fit": "benchmark",
251
  "category": "benchmark"
252
  },
253
  {
254
- "github_repo_link": "https://github.com/gpu-mode/reference-kernels",
255
  "repo_name": "reference-kernels",
256
- "repo_description": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
257
  "homepage_link": "https://gpumode.com",
258
  "github_topic_closest_fit": "gpu",
259
  "category": "kernels"
260
  },
261
  {
262
- "github_repo_link": "https://github.com/linkedin/Liger-Kernel",
263
  "repo_name": "Liger-Kernel",
264
- "repo_description": "Efficient Triton Kernels for LLM Training",
265
  "homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
266
  "github_topic_closest_fit": "triton",
267
  "category": "kernels"
268
  },
269
  {
270
- "github_repo_link": "https://github.com/huggingface/kernels",
271
  "repo_name": "kernels",
272
- "repo_description": "Load compute kernels from the Hub",
273
  "category": "kernels"
274
  },
275
  {
276
- "github_repo_link": "https://github.com/huggingface/kernels-community",
277
  "repo_name": "kernels-community",
278
- "repo_description": "Kernel sources for https://huggingface.co/kernels-community",
279
  "category": "kernels"
280
  },
281
  {
282
- "github_repo_link": "https://github.com/unslothai/unsloth",
283
  "repo_name": "unsloth",
284
- "repo_description": "Fine-tuning & Reinforcement Learning for LLMs. 🦥 Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
285
  "homepage_link": "https://docs.unsloth.ai/",
286
  "github_topic_closest_fit": "unsloth"
287
  },
288
  {
289
- "github_repo_link": "https://github.com/jupyterlab/jupyterlab",
290
  "repo_name": "jupyterlab",
291
- "repo_description": "JupyterLab computational environment.",
292
  "homepage_link": "https://jupyterlab.readthedocs.io/",
293
  "github_topic_closest_fit": "jupyter",
294
  "category": "ui"
295
  },
296
  {
297
- "github_repo_link": "https://github.com/ROCm/rocm-systems",
298
  "repo_name": "rocm-systems",
299
- "repo_description": "super repo for rocm systems projects"
300
  },
301
  {
302
- "github_repo_link": "https://github.com/ROCm/hip",
303
  "repo_name": "hip",
304
- "repo_description": "HIP: C++ Heterogeneous-Compute Interface for Portability",
305
  "homepage_link": "https://rocmdocs.amd.com/projects/HIP/",
306
  "github_topic_closest_fit": "hip"
307
  },
308
  {
309
- "github_repo_link": "https://github.com/ROCm/ROCm",
310
  "repo_name": "ROCm",
311
- "repo_description": "AMD ROCm™ Software - GitHub Home",
312
  "homepage_link": "https://rocm.docs.amd.com",
313
  "github_topic_closest_fit": "documentation"
314
  },
315
  {
316
- "github_repo_link": "https://github.com/ROCm/omnitrace",
317
  "repo_name": "omnitrace",
318
- "repo_description": "Omnitrace: Application Profiling, Tracing, and Analysis",
319
  "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace/en/docs-6.2.4/",
320
  "github_topic_closest_fit": "performance-analysis"
321
  },
322
  {
323
- "github_repo_link": "https://github.com/vosen/ZLUDA",
324
  "repo_name": "ZLUDA",
325
- "repo_description": "CUDA on non-NVIDIA GPUs",
326
  "homepage_link": "https://vosen.github.io/ZLUDA/",
327
  "github_topic_closest_fit": "cuda"
328
  },
329
  {
330
- "github_repo_link": "https://github.com/vtsynergy/CU2CL",
331
  "repo_name": "CU2CL",
332
- "repo_description": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
333
  "homepage_link": "http://chrec.cs.vt.edu/cu2cl"
334
  },
335
  {
336
- "github_repo_link": "https://github.com/pocl/pocl",
337
  "repo_name": "pocl",
338
- "repo_description": "pocl - Portable Computing Language",
339
  "homepage_link": "https://portablecl.org",
340
  "github_topic_closest_fit": "opencl"
341
  },
342
  {
343
- "github_repo_link": "https://github.com/cwpearson/cupti",
344
  "repo_name": "cupti",
345
- "repo_description": "Profile how CUDA applications create and modify data in memory.",
346
  "category": "profiler"
347
  },
348
  {
349
- "github_repo_link": "https://github.com/LLNL/hatchet",
350
  "repo_name": "hatchet",
351
- "repo_description": "Graph-indexed Pandas DataFrames for analyzing hierarchical performance data",
352
  "homepage_link": "https://llnl-hatchet.readthedocs.io",
353
  "github_topic_closest_fit": "performance",
354
  "category": "profiler"
355
  },
356
  {
357
- "github_repo_link": "https://github.com/toyaix/triton-runner",
358
  "repo_name": "triton-runner",
359
- "repo_description": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
360
  "homepage_link": "https://triton-runner.org",
361
  "github_topic_closest_fit": "triton"
362
  },
363
  {
364
- "github_repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
365
  "repo_name": "Triton-distributed",
366
- "repo_description": "Distributed Compiler based on Triton for Parallel Systems",
367
  "homepage_link": "https://triton-distributed.readthedocs.io/en/latest/",
368
  "category": "model compiler"
369
  },
370
  {
371
- "github_repo_link": "https://github.com/meta-pytorch/tritonparse",
372
  "repo_name": "tritonparse",
373
- "repo_description": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
374
  "homepage_link": "https://meta-pytorch.org/tritonparse/",
375
  "github_topic_closest_fit": "triton"
376
  },
377
  {
378
- "github_repo_link": "https://github.com/numpy/numpy",
379
  "repo_name": "numpy",
380
- "repo_description": "The fundamental package for scientific computing with Python.",
381
  "homepage_link": "https://numpy.org",
382
  "github_topic_closest_fit": "python",
383
  "category": "python library"
384
  },
385
  {
386
- "github_repo_link": "https://github.com/scipy/scipy",
387
  "repo_name": "scipy",
388
- "repo_description": "SciPy library main repository",
389
  "homepage_link": "https://scipy.org",
390
  "github_topic_closest_fit": "python",
391
  "category": "python library"
392
  },
393
  {
394
- "github_repo_link": "https://github.com/numba/numba",
395
  "repo_name": "numba",
396
- "repo_description": "NumPy aware dynamic Python compiler using LLVM",
397
  "homepage_link": "https://numba.pydata.org/",
398
  "github_topic_closest_fit": "compiler"
399
  },
400
  {
401
- "github_repo_link": "https://github.com/Lightning-AI/lightning-thunder",
402
  "repo_name": "lightning-thunder",
403
- "repo_description": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own."
404
  },
405
  {
406
- "github_repo_link": "https://github.com/pytorch/torchdynamo",
407
  "repo_name": "torchdynamo",
408
- "repo_description": "A Python-level JIT compiler designed to make unmodified PyTorch programs faster."
409
  },
410
  {
411
- "github_repo_link": "https://github.com/NVIDIA/nccl",
412
  "repo_name": "nccl",
413
- "repo_description": "Optimized primitives for collective multi-GPU communication",
414
  "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html",
415
  "github_topic_closest_fit": "cuda"
416
  },
417
  {
418
- "github_repo_link": "https://github.com/ai-dynamo/nixl",
419
  "repo_name": "nixl",
420
- "repo_description": "NVIDIA Inference Xfer Library (NIXL)"
421
  },
422
  {
423
- "github_repo_link": "https://github.com/guandeh17/Self-Forcing",
424
  "repo_name": "Self-Forcing",
425
- "repo_description": "Official codebase for \"Self Forcing: Bridging Training and Inference in Autoregressive Video Diffusion\" (NeurIPS 2025 Spotlight)"
426
 
427
  },
428
  {
429
- "github_repo_link": "https://github.com/cumulo-autumn/StreamDiffusion",
430
  "repo_name": "StreamDiffusion",
431
- "repo_description": "StreamDiffusion: A Pipeline-Level Solution for Real-Time Interactive Generation"
432
  },
433
  {
434
- "github_repo_link": "https://github.com/comfyanonymous/ComfyUI",
435
  "repo_name": "ComfyUI",
436
- "repo_description": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
437
  "homepage_link": "https://www.comfy.org/",
438
  "github_topic_closest_fit": "stable-diffusion"
439
  },
440
  {
441
- "github_repo_link": "https://github.com/Jeff-LiangF/streamv2v",
442
  "repo_name": "streamv2v",
443
- "repo_description": "Official Pytorch implementation of StreamV2V.",
444
  "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v/"
445
  },
446
  {
447
- "github_repo_link": "https://github.com/deepspeedai/DeepSpeed",
448
  "repo_name": "DeepSpeed",
449
- "repo_description": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
450
  "homepage_link": "https://www.deepspeed.ai/",
451
  "github_topic_closest_fit": "gpu"
452
  },
453
  {
454
- "github_repo_link": "https://github.com/triton-inference-server/server",
455
  "repo_name": "server",
456
- "repo_description": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
457
  "homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
458
  "github_topic_closest_fit": "inference"
459
  },
460
  {
461
- "github_repo_link": "https://github.com/elastic/elasticsearch",
462
  "repo_name": "elasticsearch",
463
- "repo_description": "Free and Open Source, Distributed, RESTful Search Engine",
464
  "homepage_link": "https://www.elastic.co/products/elasticsearch",
465
  "github_topic_closest_fit": "search-engine",
466
  "category": "search engine"
467
  },
468
  {
469
- "github_repo_link": "https://github.com/kubernetes/kubernetes",
470
  "repo_name": "kubernetes",
471
- "repo_description": "Production-Grade Container Scheduling and Management",
472
  "homepage_link": "https://kubernetes.io",
473
  "github_topic_closest_fit": "containers"
474
  },
475
  {
476
- "github_repo_link": "https://github.com/modelcontextprotocol/modelcontextprotocol",
477
  "repo_name": "modelcontextprotocol",
478
- "repo_description": "Specification and documentation for the Model Context Protocol",
479
  "homepage_link": "https://modelcontextprotocol.io"
480
  },
481
  {
482
- "github_repo_link": "https://github.com/milvus-io/milvus",
483
  "repo_name": "milvus",
484
- "repo_description": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
485
  "homepage_link": "https://milvus.io",
486
  "github_topic_closest_fit": "vector-search",
487
  "category": "vector database"
488
  },
489
  {
490
- "github_repo_link": "https://github.com/gaoj0017/RaBitQ",
491
  "repo_name": "RaBitQ",
492
- "repo_description": "[SIGMOD 2024] RaBitQ: Quantizing High-Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search",
493
  "homepage_link": "https://github.com/VectorDB-NTU/RaBitQ-Library",
494
  "github_topic_closest_fit": "nearest-neighbor-search"
495
  },
496
  {
497
- "github_repo_link": "https://github.com/Airtable/airtable.js",
498
  "repo_name": "airtable.js",
499
- "repo_description": "Airtable javascript client"
500
  },
501
  {
502
- "github_repo_link": "https://github.com/mistralai/mistral-inference",
503
  "repo_name": "mistral-inference",
504
- "repo_description": "Official inference library for Mistral models",
505
  "homepage_link": "https://mistral.ai/",
506
  "github_topic_closest_fit": "llm-inference",
507
  "category": "inference engine"
508
  },
509
  {
510
- "github_repo_link": "https://github.com/dstackai/dstack",
511
  "repo_name": "dstack",
512
- "repo_description": "dstack is an open-source control plane for running development, training, and inference jobs on GPUs—across hyperscalers, neoclouds, or on-prem.",
513
  "homepage_link": "https://dstack.ai",
514
  "github_topic_closest_fit": "orchestration"
515
  },
516
  {
517
- "github_repo_link": "https://github.com/sandialabs/torchdendrite",
518
  "repo_name": "torchdendrite",
519
- "repo_description": "Dendrites for PyTorch and SNNTorch neural networks",
520
  "category": "machine learning framework"
521
  },
522
  {
523
- "github_repo_link": "https://github.com/pytorch/torchtitan",
524
  "repo_name": "torchtitan",
525
- "repo_description": "A PyTorch native platform for training generative AI models"
526
  },
527
  {
528
- "github_repo_link": "https://github.com/NVIDIA/cudnn-frontend",
529
  "repo_name": "cudnn-frontend",
530
- "repo_description": "cudnn_frontend provides a c++ wrapper for the cudnn backend API and samples on how to use it"
531
  },
532
  {
533
- "github_repo_link": "https://github.com/pytorch/ort",
534
  "repo_name": "ort",
535
- "repo_description": "Accelerate PyTorch models with ONNX Runtime"
536
  },
537
  {
538
- "github_repo_link": "https://github.com/sgl-project/ome",
539
  "repo_name": "ome",
540
- "repo_description": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
541
  "homepage_link": "http://docs.sglang.ai/ome/",
542
  "github_topic_closest_fit": "k8s"
543
  },
544
  {
545
- "github_repo_link": "https://github.com/aws-neuron/neuronx-distributed-inference",
546
  "repo_name": "neuronx-distributed-inference",
547
  "category": "inference engine"
548
  },
549
  {
550
- "github_repo_link": "https://github.com/meta-pytorch/monarch",
551
  "repo_name": "monarch",
552
- "repo_description": "PyTorch Single Controller",
553
  "homepage_link": "https://meta-pytorch.org/monarch"
554
  },
555
  {
556
- "github_repo_link": "https://github.com/LMCache/LMCache",
557
  "repo_name": "LMCache",
558
- "repo_description": "Supercharge Your LLM with the Fastest KV Cache Layer",
559
  "homepage_link": "https://lmcache.ai/",
560
  "github_topic_closest_fit": "inference"
561
  },
562
  {
563
- "github_repo_link": "https://github.com/linux-rdma/rdma-core",
564
  "repo_name": "rdma-core",
565
- "repo_description": "RDMA core userspace libraries and daemons",
566
  "github_topic_closest_fit": "linux-kernel"
567
  },
568
  {
569
- "github_repo_link": "https://github.com/Cambridge-ICCS/FTorch",
570
  "repo_name": "FTorch",
571
- "repo_description": "A library for directly calling PyTorch ML models from Fortran.",
572
  "homepage_link": "https://cambridge-iccs.github.io/FTorch/",
573
  "github_topic_closest_fit": "machine-learning"
574
  },
575
  {
576
- "github_repo_link": "https://github.com/facebook/hhvm",
577
  "repo_name": "hhvm",
578
- "repo_description": "A virtual machine for executing programs written in Hack.",
579
  "homepage_link": "https://hhvm.com",
580
  "github_topic_closest_fit": "hack"
581
  },
582
  {
583
- "github_repo_link": "https://github.com/apache/spark",
584
  "repo_name": "spark",
585
- "repo_description": "Apache Spark - A unified analytics engine for large-scale data processing",
586
  "homepage_link": "https://spark.apache.org/",
587
  "github_topic_closest_fit": "big-data"
588
  },
589
  {
590
- "github_repo_link": "https://github.com/ROCm/composable_kernel",
591
  "repo_name": "composable_kernel",
592
- "repo_description": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
593
  "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel/en/latest/"
594
  },
595
  {
596
- "github_repo_link": "https://github.com/ROCm/aiter",
597
  "repo_name": "aiter",
598
- "repo_description": "AI Tensor Engine for ROCm"
599
  },
600
  {
601
- "github_repo_link": "https://github.com/AMD-AGI/torchtitan",
602
  "repo_name": "torchtitan",
603
- "repo_description": "A PyTorch native platform for training generative AI models"
604
  },
605
  {
606
- "github_repo_link": "https://github.com/AMD-AGI/hipBLASLt",
607
  "repo_name": "hipBLASLt",
608
- "repo_description": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
609
  "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt/en/latest/index.html"
610
  },
611
  {
612
- "github_repo_link": "https://github.com/AMD-AGI/rocm-torchtitan",
613
  "repo_name": "rocm-torchtitan"
614
  },
615
  {
616
- "github_repo_link": "https://github.com/HazyResearch/Megakernels",
617
  "repo_name": "Megakernels",
618
- "repo_description": "kernels, of the mega variety"
619
  },
620
  {
621
- "github_repo_link": "https://github.com/opencv/opencv",
622
  "repo_name": "opencv",
623
- "repo_description": "Open Source Computer Vision Library",
624
  "homepage_link": "https://opencv.org",
625
  "github_topic_closest_fit": "image-processing"
626
  },
627
  {
628
- "github_repo_link": "https://github.com/tracel-ai/burn",
629
  "repo_name": "burn",
630
- "repo_description": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
631
  "homepage_link": "https://burn.dev",
632
  "github_topic_closest_fit": "machine-learning"
633
  },
634
  {
635
- "github_repo_link": "https://github.com/OSC/ondemand",
636
  "repo_name": "ondemand",
637
- "repo_description": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
638
  "homepage_link": "https://openondemand.org/",
639
  "github_topic_closest_fit": "hpc"
640
  },
641
  {
642
- "github_repo_link": "https://github.com/flashinfer-ai/flashinfer",
643
  "repo_name": "flashinfer",
644
- "repo_description": "FlashInfer: Kernel Library for LLM Serving",
645
  "homepage_link": "https://flashinfer.ai",
646
  "github_topic_closest_fit": "attention"
647
  },
648
  {
649
- "github_repo_link": "https://github.com/AutomataLab/cuJSON",
650
  "repo_name": "cuJSON",
651
- "repo_description": "cuJSON: A Highly Parallel JSON Parser for GPUs"
652
  },
653
  {
654
- "github_repo_link": "https://github.com/Netflix/metaflow",
655
  "repo_name": "metaflow",
656
- "repo_description": "Build, Manage and Deploy AI/ML Systems",
657
  "homepage_link": "https://metaflow.org",
658
  "github_topic_closest_fit": "machine-learning"
659
  },
660
  {
661
- "github_repo_link": "https://github.com/harmonic-ai/IMO2025",
662
  "repo_name": "IMO2025"
663
  },
664
  {
665
- "github_repo_link": "https://github.com/leanprover/lean4",
666
  "repo_name": "lean4",
667
- "repo_description": "Lean 4 programming language and theorem prover",
668
  "homepage_link": "https://lean-lang.org",
669
  "github_topic_closest_fit": "lean"
670
  },
671
  {
672
- "github_repo_link": "https://github.com/NVIDIA/warp",
673
  "repo_name": "warp",
674
- "repo_description": "A Python framework for accelerated simulation, data generation and spatial computing.",
675
  "homepage_link": "https://nvidia.github.io/warp/",
676
  "github_topic_closest_fit": "gpu"
677
  },
678
  {
679
- "github_repo_link": "https://github.com/NVIDIA/cuda-python",
680
  "repo_name": "cuda-python",
681
- "repo_description": "CUDA Python: Performance meets Productivity",
682
  "homepage_link": "https://nvidia.github.io/cuda-python/"
683
  },
684
  {
685
- "github_repo_link": "https://github.com/basetenlabs/truss",
686
  "repo_name": "truss",
687
- "repo_description": "The simplest way to serve AI/ML models in production",
688
  "homepage_link": "https://truss.baseten.co",
689
  "github_topic_closest_fit": "machine-learning"
690
  },
691
  {
692
- "github_repo_link": "https://github.com/kvcache-ai/Mooncake",
693
  "repo_name": "Mooncake",
694
- "repo_description": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
695
  "homepage_link": "https://kvcache-ai.github.io/Mooncake/",
696
  "github_topic_closest_fit": "inference"
697
  },
698
  {
699
- "github_repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
700
  "repo_name": "SYCL-Docs",
701
- "repo_description": "SYCL Open Source Specification"
702
  },
703
  {
704
- "github_repo_link": "https://github.com/triSYCL/triSYCL",
705
  "repo_name": "triSYCL",
706
- "repo_description": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
707
  "github_topic_closest_fit": "opencl"
708
  },
709
  {
710
- "github_repo_link": "https://github.com/pybind/pybind11",
711
  "repo_name": "pybind11",
712
- "repo_description": "Seamless operability between C++11 and Python",
713
  "homepage_link": "https://pybind11.readthedocs.io/",
714
  "github_topic_closest_fit": "bindings"
715
  },
716
  {
717
- "github_repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
718
  "repo_name": "nvcc4jupyter",
719
- "repo_description": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
720
  "category": "compiler"
721
  },
722
  {
723
- "github_repo_link": "https://github.com/Reference-LAPACK/lapack",
724
  "repo_name": "lapack",
725
- "repo_description": "LAPACK development repository",
726
  "github_topic_closest_fit": "linear-algebra"
727
  },
728
  {
729
- "github_repo_link": "https://github.com/ccache/ccache",
730
  "repo_name": "ccache",
731
- "repo_description": "ccache – a fast compiler cache",
732
  "homepage_link": "https://ccache.dev",
733
  "github_topic_closest_fit": "compiler",
734
  "category": "compiler"
735
  },
736
  {
737
- "github_repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
738
  "repo_name": "OpenCL-SDK",
739
- "repo_description": "OpenCL SDK"
740
  },
741
  {
742
- "github_repo_link": "https://github.com/meta-llama/synthetic-data-kit",
743
  "repo_name": "synthetic-data-kit",
744
- "repo_description": "Tool for generating high quality Synthetic datasets",
745
  "homepage_link": "https://pypi.org/project/synthetic-data-kit/",
746
  "github_topic_closest_fit": "generation"
747
  },
748
  {
749
- "github_repo_link": "https://github.com/KhronosGroup/Vulkan-Docs",
750
  "repo_name": "Vulkan-Docs",
751
- "repo_description": "The Vulkan API Specification and related tools"
752
  },
753
  {
754
- "github_repo_link": "https://github.com/tensorflow/tflite-micro",
755
  "repo_name": "tflite-micro",
756
- "repo_description": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors)."
757
  },
758
  {
759
- "github_repo_link": "https://github.com/Wan-Video/Wan2.2",
760
  "repo_name": "Wan2.2",
761
- "repo_description": "Wan: Open and Advanced Large-Scale Video Generative Models",
762
  "homepage_link": "https://wan.video",
763
  "github_topic_closest_fit": "video-generation"
764
  },
765
  {
766
- "github_repo_link": "https://github.com/AMD-AGI/Primus-Turbo",
767
  "repo_name": "Primus-Turbo"
768
  },
769
  {
770
- "github_repo_link": "https://github.com/ROCm/hipBLAS",
771
  "repo_name": "hipBLAS",
772
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
773
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
774
  "github_topic_closest_fit": "hip"
775
  },
776
  {
777
- "github_repo_link": "https://github.com/ROCm/roctracer",
778
  "repo_name": "roctracer",
779
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-systems repo",
780
  "homepage_link": "https://github.com/ROCm/rocm-systems"
781
  },
782
  {
783
- "github_repo_link": "https://github.com/ROCm/rocSOLVER",
784
  "repo_name": "rocSOLVER",
785
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
786
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
787
  "github_topic_closest_fit": "rocm"
788
  },
789
  {
790
- "github_repo_link": "https://github.com/ROCm/Tensile",
791
  "repo_name": "Tensile",
792
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
793
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
794
  "github_topic_closest_fit": "gpu"
795
  },
796
  {
797
- "github_repo_link": "https://github.com/ROCm/rocPRIM",
798
  "repo_name": "rocPRIM",
799
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
800
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
801
  "github_topic_closest_fit": "hip"
802
  },
803
  {
804
- "github_repo_link": "https://github.com/ROCm/hipCUB",
805
  "repo_name": "hipCUB",
806
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
807
  "homepage_link": "https://github.com/ROCm/rocm-libraries"
808
  },
809
  {
810
- "github_repo_link": "https://github.com/ROCm/rocFFT",
811
  "repo_name": "rocFFT",
812
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
813
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
814
  "github_topic_closest_fit": "hip"
815
  },
816
  {
817
- "github_repo_link": "https://github.com/ROCm/rocSPARSE",
818
  "repo_name": "rocSPARSE",
819
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
820
  "homepage_link": "https://github.com/ROCm/rocm-libraries"
821
  },
822
  {
823
- "github_repo_link": "https://github.com/ROCm/rocRAND",
824
  "repo_name": "rocRAND",
825
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
826
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
827
  "github_topic_closest_fit": "hip"
828
  },
829
  {
830
- "github_repo_link": "https://github.com/ROCm/MIOpen",
831
  "repo_name": "MIOpen",
832
- "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
833
  "homepage_link": "https://github.com/ROCm/rocm-libraries"
834
  }
835
  ]
 
1
  [
2
  {
3
  "repo_name": "pytorch",
4
+ "repo_link": "https://github.com/pytorch/pytorch",
5
  "category": "machine learning framework",
6
+ "github_about_section": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
7
  "homepage_link": "https://pytorch.org",
8
  "github_topic_closest_fit": "machine-learning"
9
  },
10
  {
11
  "repo_name": "triton",
12
+ "repo_link": "https://github.com/triton-lang/triton",
13
  "category": "parallel computing dsl",
14
+ "github_about_section": "Development repository for the Triton language and compiler",
15
  "homepage_link": "https://triton-lang.org/",
16
  "github_topic_closest_fit": "parallel-programming"
17
  },
18
  {
19
  "repo_name": "cutlass",
20
+ "repo_link": "https://github.com/NVIDIA/cutlass",
21
  "category": "parallel computing",
22
+ "github_about_section": "CUDA Templates and Python DSLs for High-Performance Linear Algebra",
23
  "homepage_link": "https://docs.nvidia.com/cutlass/index.html",
24
  "github_topic_closest_fit": "parallel-programming"
25
  },
26
  {
27
  "repo_name": "tilelang",
28
+ "repo_link": "https://github.com/tile-ai/tilelang",
29
  "category": "parallel computing dsl",
30
+ "github_about_section": "Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels",
31
  "homepage_link": "https://tilelang.com",
32
  "github_topic_closest_fit": "parallel-programming"
33
  },
34
  {
35
  "repo_name": "ThunderKittens",
36
+ "repo_link": "https://github.com/HazyResearch/ThunderKittens",
37
  "category": "parallel computing",
38
+ "github_about_section": "Tile primitives for speedy kernels",
39
  "homepage_link": "https://hazyresearch.stanford.edu/blog/2024-10-29-tk2",
40
  "github_topic_closest_fit": "parallel-programming"
41
  },
42
  {
43
  "repo_name": "helion",
44
+ "repo_link": "https://github.com/pytorch/helion",
45
  "category": "parallel computing dsl",
46
+ "github_about_section": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.",
47
  "homepage_link": "https://helionlang.com",
48
  "github_topic_closest_fit": "parallel-programming"
49
  },
50
  {
51
  "repo_name": "TileIR",
52
+ "repo_link": "https://github.com/microsoft/TileIR",
53
  "category": "parallel computing dsl",
54
+ "github_about_section": "TileIR (tile-ir) is a concise domain-specific IR designed to streamline the development of high-performance GPU/CPU kernels (e.g., GEMM, Dequant GEMM, FlashAttention, LinearAttention). By employing a Pythonic syntax with an underlying compiler infrastructure on top of TVM, TileIR allows developers to focus on productivity without sacrificing the low-level optimizations necessary for state-of-the-art performance.",
55
  "github_topic_closest_fit": "parallel-programming"
56
  },
57
  {
58
  "repo_name": "BitBLAS",
59
+ "repo_link": "https://github.com/microsoft/BitBLAS",
60
+ "github_about_section": "BitBLAS is a library to support mixed-precision matrix multiplications, especially for quantized LLM deployment."
61
  },
62
  {
63
  "repo_name": "tensorflow",
64
+ "repo_link": "https://github.com/tensorflow/tensorflow",
65
  "category": "machine learning framework",
66
+ "github_about_section": "An Open Source Machine Learning Framework for Everyone",
67
  "homepage_link": "https://tensorflow.org",
68
  "github_topic_closest_fit": "machine-learning"
69
  },
70
  {
71
  "repo_name": "vllm",
72
+ "repo_link": "https://github.com/vllm-project/vllm",
73
  "category": "inference engine",
74
+ "github_about_section": "A high-throughput and memory-efficient inference and serving engine for LLMs",
75
  "homepage_link": "https://docs.vllm.ai",
76
  "github_topic_closest_fit": "inference"
77
  },
78
  {
79
  "repo_name": "ollama",
80
+ "repo_link": "https://github.com/ollama/ollama",
81
  "category": "inference engine",
82
+ "github_about_section": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.",
83
  "homepage_link": "https://ollama.com",
84
  "github_topic_closest_fit": "inference"
85
  },
86
  {
87
  "repo_name": "llama.cpp",
88
+ "repo_link": "https://github.com/ggml-org/llama.cpp",
89
  "category": "inference engine",
90
+ "github_about_section": "LLM inference in C/C++",
91
  "homepage_link": "https://ggml.ai",
92
  "github_topic_closest_fit": "inference"
93
  },
94
  {
95
  "repo_name": "sglang",
96
+ "repo_link": "https://github.com/sgl-project/sglang",
97
  "category": "inference engine",
98
+ "github_about_section": "SGLang is a fast serving framework for large language models and vision language models.",
99
  "homepage_link": "https://docs.sglang.ai",
100
  "github_topic_closest_fit": "inference"
101
  },
102
  {
103
  "repo_name": "onnx",
104
+ "repo_link": "https://github.com/onnx/onnx",
105
  "category": "machine learning framework",
106
+ "github_about_section": "Open standard for machine learning interoperability",
107
  "homepage_link": "https://onnx.ai/",
108
  "github_topic_closest_fit": "onnx"
109
  },
110
  {
111
  "repo_name": "executorch",
112
+ "repo_link": "https://github.com/pytorch/executorch",
113
  "category": "model compiler",
114
+ "github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
115
  "homepage_link": "https://executorch.ai",
116
  "github_topic_closest_fit": "compiler"
117
  },
118
  {
119
+ "repo_link": "https://github.com/ray-project/ray",
120
  "repo_name": "ray",
121
+ "github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
122
  "homepage_link": "https://ray.io",
123
  "github_topic_closest_fit": "machine-learning"
124
  },
125
  {
126
+ "repo_link": "https://github.com/jax-ml/jax",
127
  "repo_name": "jax",
128
+ "github_about_section": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more",
129
  "homepage_link": "https://docs.jax.dev",
130
  "github_topic_closest_fit": "jax"
131
  },
132
  {
133
+ "repo_link": "https://github.com/llvm/llvm-project",
134
  "repo_name": "llvm-project",
135
+ "github_about_section": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.",
136
  "homepage_link": "http://llvm.org",
137
  "category": "compiler"
138
  },
139
  {
140
+ "repo_link": "https://github.com/NVIDIA/TensorRT",
141
  "repo_name": "TensorRT",
142
+ "github_about_section": "NVIDIA® TensorRT™ is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
143
  "homepage_link": "https://developer.nvidia.com/tensorrt",
144
  "github_topic_closest_fit": "inference"
145
  },
146
  {
147
+ "repo_link": "https://github.com/pytorch/ao",
148
  "repo_name": "ao",
149
+ "github_about_section": "PyTorch native quantization and sparsity for training and inference",
150
  "homepage_link": "https://pytorch.org/ao/stable/index.html",
151
  "github_topic_closest_fit": "quantization"
152
  },
153
  {
154
+ "repo_link": "https://github.com/AMD-AGI/GEAK-agent",
155
  "repo_name": "GEAK-agent",
156
+ "github_about_section": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically."
157
  },
158
  {
159
+ "repo_link": "https://github.com/block/goose",
160
  "repo_name": "goose",
161
+ "github_about_section": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM",
162
  "homepage_link": "https://block.github.io/goose/",
163
  "github_topic_closest_fit": "mcp",
164
  "category": "agent"
165
  },
166
  {
167
+ "repo_link": "https://github.com/codelion/openevolve",
168
  "repo_name": "openevolve",
169
+ "github_about_section": "Open-source implementation of AlphaEvolve",
170
  "github_topic_closest_fit": "genetic-algorithm"
171
  },
172
  {
173
+ "repo_link": "https://github.com/volcengine/verl",
174
  "repo_name": "verl",
175
+ "github_about_section": "verl: Volcano Engine Reinforcement Learning for LLMs",
176
  "homepage_link": "https://verl.readthedocs.io/en/latest/index.html"
177
  },
178
  {
179
+ "repo_link": "https://github.com/huggingface/peft",
180
  "repo_name": "peft",
181
+ "github_about_section": "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.",
182
  "homepage_link": "https://huggingface.co/docs/peft",
183
  "github_topic_closest_fit": "lora"
184
  },
185
  {
186
+ "repo_link": "https://github.com/Dao-AILab/quack",
187
  "repo_name": "quack",
188
+ "github_about_section": "A Quirky Assortment of CuTe Kernels",
189
  "category": "kernels"
190
  },
191
  {
192
+ "repo_link": "https://github.com/AMDResearch/intelliperf",
193
  "repo_name": "intelliperf",
194
+ "github_about_section": "Automated bottleneck detection and solution orchestration",
195
  "github_topic_closest_fit": "performance"
196
  },
197
  {
198
+ "repo_link": "https://github.com/letta-ai/letta",
199
  "repo_name": "letta",
200
+ "github_about_section": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.",
201
  "homepage_link": "https://docs.letta.com/",
202
  "github_topic_closest_fit": "ai-agents"
203
  },
204
  {
205
+ "repo_link": "https://github.com/lastmile-ai/mcp-agent",
206
  "repo_name": "mcp-agent",
207
+ "github_about_section": "Build effective agents using Model Context Protocol and simple workflow patterns",
208
  "github_topic_closest_fit": "ai-agents"
209
  },
210
  {
211
+ "repo_link": "https://github.com/modular/modular",
212
  "repo_name": "modular",
213
+ "github_about_section": "The Modular Platform (includes MAX & Mojo)",
214
  "homepage_link": "https://docs.modular.com/",
215
  "github_topic_closest_fit": "mojo"
216
  },
217
  {
218
+ "repo_link": "https://github.com/ScalingIntelligence/KernelBench",
219
  "repo_name": "KernelBench",
220
+ "github_about_section": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems",
221
  "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench/",
222
  "github_topic_closest_fit": "benchmark",
223
  "category": "benchmark"
224
  },
225
  {
226
+ "repo_link": "https://github.com/thunlp/TritonBench",
227
  "repo_name": "TritonBench",
228
+ "github_about_section": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators",
229
  "category": "benchmark"
230
  },
231
  {
232
+ "repo_link": "https://github.com/flashinfer-ai/flashinfer-bench",
233
  "repo_name": "flashinfer-bench",
234
+ "github_about_section": "Building the Virtuous Cycle for AI-driven LLM Systems",
235
  "homepage_link": "https://bench.flashinfer.ai",
236
  "category": "benchmark"
237
  },
238
  {
239
+ "repo_link": "https://github.com/laude-institute/terminal-bench",
240
  "repo_name": "terminal-bench",
241
+ "github_about_section": "A benchmark for LLMs on complicated tasks in the terminal",
242
  "homepage_link": "https://www.tbench.ai",
243
  "category": "benchmark"
244
  },
245
  {
246
+ "repo_link": "https://github.com/SWE-bench/SWE-bench",
247
  "repo_name": "SWE-bench",
248
+ "github_about_section": "SWE-bench: Can Language Models Resolve Real-world Github Issues?",
249
  "homepage_link": "https://www.swebench.com",
250
  "github_topic_closest_fit": "benchmark",
251
  "category": "benchmark"
252
  },
253
  {
254
+ "repo_link": "https://github.com/gpu-mode/reference-kernels",
255
  "repo_name": "reference-kernels",
256
+ "github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
257
  "homepage_link": "https://gpumode.com",
258
  "github_topic_closest_fit": "gpu",
259
  "category": "kernels"
260
  },
261
  {
262
+ "repo_link": "https://github.com/linkedin/Liger-Kernel",
263
  "repo_name": "Liger-Kernel",
264
+ "github_about_section": "Efficient Triton Kernels for LLM Training",
265
  "homepage_link": "https://openreview.net/pdf?id=36SjAIT42G",
266
  "github_topic_closest_fit": "triton",
267
  "category": "kernels"
268
  },
269
  {
270
+ "repo_link": "https://github.com/huggingface/kernels",
271
  "repo_name": "kernels",
272
+ "github_about_section": "Load compute kernels from the Hub",
273
  "category": "kernels"
274
  },
275
  {
276
+ "repo_link": "https://github.com/huggingface/kernels-community",
277
  "repo_name": "kernels-community",
278
+ "github_about_section": "Kernel sources for https://huggingface.co/kernels-community",
279
  "category": "kernels"
280
  },
281
  {
282
+ "repo_link": "https://github.com/unslothai/unsloth",
283
  "repo_name": "unsloth",
284
+ "github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. 🦥 Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
285
  "homepage_link": "https://docs.unsloth.ai/",
286
  "github_topic_closest_fit": "unsloth"
287
  },
288
  {
289
+ "repo_link": "https://github.com/jupyterlab/jupyterlab",
290
  "repo_name": "jupyterlab",
291
+ "github_about_section": "JupyterLab computational environment.",
292
  "homepage_link": "https://jupyterlab.readthedocs.io/",
293
  "github_topic_closest_fit": "jupyter",
294
  "category": "ui"
295
  },
296
  {
297
+ "repo_link": "https://github.com/ROCm/rocm-systems",
298
  "repo_name": "rocm-systems",
299
+ "github_about_section": "super repo for rocm systems projects"
300
  },
301
  {
302
+ "repo_link": "https://github.com/ROCm/hip",
303
  "repo_name": "hip",
304
+ "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
305
  "homepage_link": "https://rocmdocs.amd.com/projects/HIP/",
306
  "github_topic_closest_fit": "hip"
307
  },
308
  {
309
+ "repo_link": "https://github.com/ROCm/ROCm",
310
  "repo_name": "ROCm",
311
+ "github_about_section": "AMD ROCm™ Software - GitHub Home",
312
  "homepage_link": "https://rocm.docs.amd.com",
313
  "github_topic_closest_fit": "documentation"
314
  },
315
  {
316
+ "repo_link": "https://github.com/ROCm/omnitrace",
317
  "repo_name": "omnitrace",
318
+ "github_about_section": "Omnitrace: Application Profiling, Tracing, and Analysis",
319
  "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace/en/docs-6.2.4/",
320
  "github_topic_closest_fit": "performance-analysis"
321
  },
322
  {
323
+ "repo_link": "https://github.com/vosen/ZLUDA",
324
  "repo_name": "ZLUDA",
325
+ "github_about_section": "CUDA on non-NVIDIA GPUs",
326
  "homepage_link": "https://vosen.github.io/ZLUDA/",
327
  "github_topic_closest_fit": "cuda"
328
  },
329
  {
330
+ "repo_link": "https://github.com/vtsynergy/CU2CL",
331
  "repo_name": "CU2CL",
332
+ "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
333
  "homepage_link": "http://chrec.cs.vt.edu/cu2cl"
334
  },
335
  {
336
+ "repo_link": "https://github.com/pocl/pocl",
337
  "repo_name": "pocl",
338
+ "github_about_section": "pocl - Portable Computing Language",
339
  "homepage_link": "https://portablecl.org",
340
  "github_topic_closest_fit": "opencl"
341
  },
342
  {
343
+ "repo_link": "https://github.com/cwpearson/cupti",
344
  "repo_name": "cupti",
345
+ "github_about_section": "Profile how CUDA applications create and modify data in memory.",
346
  "category": "profiler"
347
  },
348
  {
349
+ "repo_link": "https://github.com/LLNL/hatchet",
350
  "repo_name": "hatchet",
351
+ "github_about_section": "Graph-indexed Pandas DataFrames for analyzing hierarchical performance data",
352
  "homepage_link": "https://llnl-hatchet.readthedocs.io",
353
  "github_topic_closest_fit": "performance",
354
  "category": "profiler"
355
  },
356
  {
357
+ "repo_link": "https://github.com/toyaix/triton-runner",
358
  "repo_name": "triton-runner",
359
+ "github_about_section": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
360
  "homepage_link": "https://triton-runner.org",
361
  "github_topic_closest_fit": "triton"
362
  },
363
  {
364
+ "repo_link": "https://github.com/ByteDance-Seed/Triton-distributed",
365
  "repo_name": "Triton-distributed",
366
+ "github_about_section": "Distributed Compiler based on Triton for Parallel Systems",
367
  "homepage_link": "https://triton-distributed.readthedocs.io/en/latest/",
368
  "category": "model compiler"
369
  },
370
  {
371
+ "repo_link": "https://github.com/meta-pytorch/tritonparse",
372
  "repo_name": "tritonparse",
373
+ "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
374
  "homepage_link": "https://meta-pytorch.org/tritonparse/",
375
  "github_topic_closest_fit": "triton"
376
  },
377
  {
378
+ "repo_link": "https://github.com/numpy/numpy",
379
  "repo_name": "numpy",
380
+ "github_about_section": "The fundamental package for scientific computing with Python.",
381
  "homepage_link": "https://numpy.org",
382
  "github_topic_closest_fit": "python",
383
  "category": "python library"
384
  },
385
  {
386
+ "repo_link": "https://github.com/scipy/scipy",
387
  "repo_name": "scipy",
388
+ "github_about_section": "SciPy library main repository",
389
  "homepage_link": "https://scipy.org",
390
  "github_topic_closest_fit": "python",
391
  "category": "python library"
392
  },
393
  {
394
+ "repo_link": "https://github.com/numba/numba",
395
  "repo_name": "numba",
396
+ "github_about_section": "NumPy aware dynamic Python compiler using LLVM",
397
  "homepage_link": "https://numba.pydata.org/",
398
  "github_topic_closest_fit": "compiler"
399
  },
400
  {
401
+ "repo_link": "https://github.com/Lightning-AI/lightning-thunder",
402
  "repo_name": "lightning-thunder",
403
+ "github_about_section": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own."
404
  },
405
  {
406
+ "repo_link": "https://github.com/pytorch/torchdynamo",
407
  "repo_name": "torchdynamo",
408
+ "github_about_section": "A Python-level JIT compiler designed to make unmodified PyTorch programs faster."
409
  },
410
  {
411
+ "repo_link": "https://github.com/NVIDIA/nccl",
412
  "repo_name": "nccl",
413
+ "github_about_section": "Optimized primitives for collective multi-GPU communication",
414
  "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html",
415
  "github_topic_closest_fit": "cuda"
416
  },
417
  {
418
+ "repo_link": "https://github.com/ai-dynamo/nixl",
419
  "repo_name": "nixl",
420
+ "github_about_section": "NVIDIA Inference Xfer Library (NIXL)"
421
  },
422
  {
423
+ "repo_link": "https://github.com/guandeh17/Self-Forcing",
424
  "repo_name": "Self-Forcing",
425
+ "github_about_section": "Official codebase for \"Self Forcing: Bridging Training and Inference in Autoregressive Video Diffusion\" (NeurIPS 2025 Spotlight)"
426
 
427
  },
428
  {
429
+ "repo_link": "https://github.com/cumulo-autumn/StreamDiffusion",
430
  "repo_name": "StreamDiffusion",
431
+ "github_about_section": "StreamDiffusion: A Pipeline-Level Solution for Real-Time Interactive Generation"
432
  },
433
  {
434
+ "repo_link": "https://github.com/comfyanonymous/ComfyUI",
435
  "repo_name": "ComfyUI",
436
+ "github_about_section": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.",
437
  "homepage_link": "https://www.comfy.org/",
438
  "github_topic_closest_fit": "stable-diffusion"
439
  },
440
  {
441
+ "repo_link": "https://github.com/Jeff-LiangF/streamv2v",
442
  "repo_name": "streamv2v",
443
+ "github_about_section": "Official Pytorch implementation of StreamV2V.",
444
  "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v/"
445
  },
446
  {
447
+ "repo_link": "https://github.com/deepspeedai/DeepSpeed",
448
  "repo_name": "DeepSpeed",
449
+ "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
450
  "homepage_link": "https://www.deepspeed.ai/",
451
  "github_topic_closest_fit": "gpu"
452
  },
453
  {
454
+ "repo_link": "https://github.com/triton-inference-server/server",
455
  "repo_name": "server",
456
+ "github_about_section": "The Triton Inference Server provides an optimized cloud and edge inferencing solution.",
457
  "homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html",
458
  "github_topic_closest_fit": "inference"
459
  },
460
  {
461
+ "repo_link": "https://github.com/elastic/elasticsearch",
462
  "repo_name": "elasticsearch",
463
+ "github_about_section": "Free and Open Source, Distributed, RESTful Search Engine",
464
  "homepage_link": "https://www.elastic.co/products/elasticsearch",
465
  "github_topic_closest_fit": "search-engine",
466
  "category": "search engine"
467
  },
468
  {
469
+ "repo_link": "https://github.com/kubernetes/kubernetes",
470
  "repo_name": "kubernetes",
471
+ "github_about_section": "Production-Grade Container Scheduling and Management",
472
  "homepage_link": "https://kubernetes.io",
473
  "github_topic_closest_fit": "containers"
474
  },
475
  {
476
+ "repo_link": "https://github.com/modelcontextprotocol/modelcontextprotocol",
477
  "repo_name": "modelcontextprotocol",
478
+ "github_about_section": "Specification and documentation for the Model Context Protocol",
479
  "homepage_link": "https://modelcontextprotocol.io"
480
  },
481
  {
482
+ "repo_link": "https://github.com/milvus-io/milvus",
483
  "repo_name": "milvus",
484
+ "github_about_section": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search",
485
  "homepage_link": "https://milvus.io",
486
  "github_topic_closest_fit": "vector-search",
487
  "category": "vector database"
488
  },
489
  {
490
+ "repo_link": "https://github.com/gaoj0017/RaBitQ",
491
  "repo_name": "RaBitQ",
492
+ "github_about_section": "[SIGMOD 2024] RaBitQ: Quantizing High-Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search",
493
  "homepage_link": "https://github.com/VectorDB-NTU/RaBitQ-Library",
494
  "github_topic_closest_fit": "nearest-neighbor-search"
495
  },
496
  {
497
+ "repo_link": "https://github.com/Airtable/airtable.js",
498
  "repo_name": "airtable.js",
499
+ "github_about_section": "Airtable javascript client"
500
  },
501
  {
502
+ "repo_link": "https://github.com/mistralai/mistral-inference",
503
  "repo_name": "mistral-inference",
504
+ "github_about_section": "Official inference library for Mistral models",
505
  "homepage_link": "https://mistral.ai/",
506
  "github_topic_closest_fit": "llm-inference",
507
  "category": "inference engine"
508
  },
509
  {
510
+ "repo_link": "https://github.com/dstackai/dstack",
511
  "repo_name": "dstack",
512
+ "github_about_section": "dstack is an open-source control plane for running development, training, and inference jobs on GPUs—across hyperscalers, neoclouds, or on-prem.",
513
  "homepage_link": "https://dstack.ai",
514
  "github_topic_closest_fit": "orchestration"
515
  },
516
  {
517
+ "repo_link": "https://github.com/sandialabs/torchdendrite",
518
  "repo_name": "torchdendrite",
519
+ "github_about_section": "Dendrites for PyTorch and SNNTorch neural networks",
520
  "category": "machine learning framework"
521
  },
522
  {
523
+ "repo_link": "https://github.com/pytorch/torchtitan",
524
  "repo_name": "torchtitan",
525
+ "github_about_section": "A PyTorch native platform for training generative AI models"
526
  },
527
  {
528
+ "repo_link": "https://github.com/NVIDIA/cudnn-frontend",
529
  "repo_name": "cudnn-frontend",
530
+ "github_about_section": "cudnn_frontend provides a c++ wrapper for the cudnn backend API and samples on how to use it"
531
  },
532
  {
533
+ "repo_link": "https://github.com/pytorch/ort",
534
  "repo_name": "ort",
535
+ "github_about_section": "Accelerate PyTorch models with ONNX Runtime"
536
  },
537
  {
538
+ "repo_link": "https://github.com/sgl-project/ome",
539
  "repo_name": "ome",
540
+ "github_about_section": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)",
541
  "homepage_link": "http://docs.sglang.ai/ome/",
542
  "github_topic_closest_fit": "k8s"
543
  },
544
  {
545
+ "repo_link": "https://github.com/aws-neuron/neuronx-distributed-inference",
546
  "repo_name": "neuronx-distributed-inference",
547
  "category": "inference engine"
548
  },
549
  {
550
+ "repo_link": "https://github.com/meta-pytorch/monarch",
551
  "repo_name": "monarch",
552
+ "github_about_section": "PyTorch Single Controller",
553
  "homepage_link": "https://meta-pytorch.org/monarch"
554
  },
555
  {
556
+ "repo_link": "https://github.com/LMCache/LMCache",
557
  "repo_name": "LMCache",
558
+ "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
559
  "homepage_link": "https://lmcache.ai/",
560
  "github_topic_closest_fit": "inference"
561
  },
562
  {
563
+ "repo_link": "https://github.com/linux-rdma/rdma-core",
564
  "repo_name": "rdma-core",
565
+ "github_about_section": "RDMA core userspace libraries and daemons",
566
  "github_topic_closest_fit": "linux-kernel"
567
  },
568
  {
569
+ "repo_link": "https://github.com/Cambridge-ICCS/FTorch",
570
  "repo_name": "FTorch",
571
+ "github_about_section": "A library for directly calling PyTorch ML models from Fortran.",
572
  "homepage_link": "https://cambridge-iccs.github.io/FTorch/",
573
  "github_topic_closest_fit": "machine-learning"
574
  },
575
  {
576
+ "repo_link": "https://github.com/facebook/hhvm",
577
  "repo_name": "hhvm",
578
+ "github_about_section": "A virtual machine for executing programs written in Hack.",
579
  "homepage_link": "https://hhvm.com",
580
  "github_topic_closest_fit": "hack"
581
  },
582
  {
583
+ "repo_link": "https://github.com/apache/spark",
584
  "repo_name": "spark",
585
+ "github_about_section": "Apache Spark - A unified analytics engine for large-scale data processing",
586
  "homepage_link": "https://spark.apache.org/",
587
  "github_topic_closest_fit": "big-data"
588
  },
589
  {
590
+ "repo_link": "https://github.com/ROCm/composable_kernel",
591
  "repo_name": "composable_kernel",
592
+ "github_about_section": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators",
593
  "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel/en/latest/"
594
  },
595
  {
596
+ "repo_link": "https://github.com/ROCm/aiter",
597
  "repo_name": "aiter",
598
+ "github_about_section": "AI Tensor Engine for ROCm"
599
  },
600
  {
601
+ "repo_link": "https://github.com/AMD-AGI/torchtitan",
602
  "repo_name": "torchtitan",
603
+ "github_about_section": "A PyTorch native platform for training generative AI models"
604
  },
605
  {
606
+ "repo_link": "https://github.com/AMD-AGI/hipBLASLt",
607
  "repo_name": "hipBLASLt",
608
+ "github_about_section": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library",
609
  "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt/en/latest/index.html"
610
  },
611
  {
612
+ "repo_link": "https://github.com/AMD-AGI/rocm-torchtitan",
613
  "repo_name": "rocm-torchtitan"
614
  },
615
  {
616
+ "repo_link": "https://github.com/HazyResearch/Megakernels",
617
  "repo_name": "Megakernels",
618
+ "github_about_section": "kernels, of the mega variety"
619
  },
620
  {
621
+ "repo_link": "https://github.com/opencv/opencv",
622
  "repo_name": "opencv",
623
+ "github_about_section": "Open Source Computer Vision Library",
624
  "homepage_link": "https://opencv.org",
625
  "github_topic_closest_fit": "image-processing"
626
  },
627
  {
628
+ "repo_link": "https://github.com/tracel-ai/burn",
629
  "repo_name": "burn",
630
+ "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
631
  "homepage_link": "https://burn.dev",
632
  "github_topic_closest_fit": "machine-learning"
633
  },
634
  {
635
+ "repo_link": "https://github.com/OSC/ondemand",
636
  "repo_name": "ondemand",
637
+ "github_about_section": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web",
638
  "homepage_link": "https://openondemand.org/",
639
  "github_topic_closest_fit": "hpc"
640
  },
641
  {
642
+ "repo_link": "https://github.com/flashinfer-ai/flashinfer",
643
  "repo_name": "flashinfer",
644
+ "github_about_section": "FlashInfer: Kernel Library for LLM Serving",
645
  "homepage_link": "https://flashinfer.ai",
646
  "github_topic_closest_fit": "attention"
647
  },
648
  {
649
+ "repo_link": "https://github.com/AutomataLab/cuJSON",
650
  "repo_name": "cuJSON",
651
+ "github_about_section": "cuJSON: A Highly Parallel JSON Parser for GPUs"
652
  },
653
  {
654
+ "repo_link": "https://github.com/Netflix/metaflow",
655
  "repo_name": "metaflow",
656
+ "github_about_section": "Build, Manage and Deploy AI/ML Systems",
657
  "homepage_link": "https://metaflow.org",
658
  "github_topic_closest_fit": "machine-learning"
659
  },
660
  {
661
+ "repo_link": "https://github.com/harmonic-ai/IMO2025",
662
  "repo_name": "IMO2025"
663
  },
664
  {
665
+ "repo_link": "https://github.com/leanprover/lean4",
666
  "repo_name": "lean4",
667
+ "github_about_section": "Lean 4 programming language and theorem prover",
668
  "homepage_link": "https://lean-lang.org",
669
  "github_topic_closest_fit": "lean"
670
  },
671
  {
672
+ "repo_link": "https://github.com/NVIDIA/warp",
673
  "repo_name": "warp",
674
+ "github_about_section": "A Python framework for accelerated simulation, data generation and spatial computing.",
675
  "homepage_link": "https://nvidia.github.io/warp/",
676
  "github_topic_closest_fit": "gpu"
677
  },
678
  {
679
+ "repo_link": "https://github.com/NVIDIA/cuda-python",
680
  "repo_name": "cuda-python",
681
+ "github_about_section": "CUDA Python: Performance meets Productivity",
682
  "homepage_link": "https://nvidia.github.io/cuda-python/"
683
  },
684
  {
685
+ "repo_link": "https://github.com/basetenlabs/truss",
686
  "repo_name": "truss",
687
+ "github_about_section": "The simplest way to serve AI/ML models in production",
688
  "homepage_link": "https://truss.baseten.co",
689
  "github_topic_closest_fit": "machine-learning"
690
  },
691
  {
692
+ "repo_link": "https://github.com/kvcache-ai/Mooncake",
693
  "repo_name": "Mooncake",
694
+ "github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
695
  "homepage_link": "https://kvcache-ai.github.io/Mooncake/",
696
  "github_topic_closest_fit": "inference"
697
  },
698
  {
699
+ "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
700
  "repo_name": "SYCL-Docs",
701
+ "github_about_section": "SYCL Open Source Specification"
702
  },
703
  {
704
+ "repo_link": "https://github.com/triSYCL/triSYCL",
705
  "repo_name": "triSYCL",
706
+ "github_about_section": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
707
  "github_topic_closest_fit": "opencl"
708
  },
709
  {
710
+ "repo_link": "https://github.com/pybind/pybind11",
711
  "repo_name": "pybind11",
712
+ "github_about_section": "Seamless operability between C++11 and Python",
713
  "homepage_link": "https://pybind11.readthedocs.io/",
714
  "github_topic_closest_fit": "bindings"
715
  },
716
  {
717
+ "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
718
  "repo_name": "nvcc4jupyter",
719
+ "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
720
  "category": "compiler"
721
  },
722
  {
723
+ "repo_link": "https://github.com/Reference-LAPACK/lapack",
724
  "repo_name": "lapack",
725
+ "github_about_section": "LAPACK development repository",
726
  "github_topic_closest_fit": "linear-algebra"
727
  },
728
  {
729
+ "repo_link": "https://github.com/ccache/ccache",
730
  "repo_name": "ccache",
731
+ "github_about_section": "ccache – a fast compiler cache",
732
  "homepage_link": "https://ccache.dev",
733
  "github_topic_closest_fit": "compiler",
734
  "category": "compiler"
735
  },
736
  {
737
+ "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
738
  "repo_name": "OpenCL-SDK",
739
+ "github_about_section": "OpenCL SDK"
740
  },
741
  {
742
+ "repo_link": "https://github.com/meta-llama/synthetic-data-kit",
743
  "repo_name": "synthetic-data-kit",
744
+ "github_about_section": "Tool for generating high quality Synthetic datasets",
745
  "homepage_link": "https://pypi.org/project/synthetic-data-kit/",
746
  "github_topic_closest_fit": "generation"
747
  },
748
  {
749
+ "repo_link": "https://github.com/KhronosGroup/Vulkan-Docs",
750
  "repo_name": "Vulkan-Docs",
751
+ "github_about_section": "The Vulkan API Specification and related tools"
752
  },
753
  {
754
+ "repo_link": "https://github.com/tensorflow/tflite-micro",
755
  "repo_name": "tflite-micro",
756
+ "github_about_section": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors)."
757
  },
758
  {
759
+ "repo_link": "https://github.com/Wan-Video/Wan2.2",
760
  "repo_name": "Wan2.2",
761
+ "github_about_section": "Wan: Open and Advanced Large-Scale Video Generative Models",
762
  "homepage_link": "https://wan.video",
763
  "github_topic_closest_fit": "video-generation"
764
  },
765
  {
766
+ "repo_link": "https://github.com/AMD-AGI/Primus-Turbo",
767
  "repo_name": "Primus-Turbo"
768
  },
769
  {
770
+ "repo_link": "https://github.com/ROCm/hipBLAS",
771
  "repo_name": "hipBLAS",
772
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
773
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
774
  "github_topic_closest_fit": "hip"
775
  },
776
  {
777
+ "repo_link": "https://github.com/ROCm/roctracer",
778
  "repo_name": "roctracer",
779
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-systems repo",
780
  "homepage_link": "https://github.com/ROCm/rocm-systems"
781
  },
782
  {
783
+ "repo_link": "https://github.com/ROCm/rocSOLVER",
784
  "repo_name": "rocSOLVER",
785
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
786
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
787
  "github_topic_closest_fit": "rocm"
788
  },
789
  {
790
+ "repo_link": "https://github.com/ROCm/Tensile",
791
  "repo_name": "Tensile",
792
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
793
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
794
  "github_topic_closest_fit": "gpu"
795
  },
796
  {
797
+ "repo_link": "https://github.com/ROCm/rocPRIM",
798
  "repo_name": "rocPRIM",
799
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
800
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
801
  "github_topic_closest_fit": "hip"
802
  },
803
  {
804
+ "repo_link": "https://github.com/ROCm/hipCUB",
805
  "repo_name": "hipCUB",
806
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
807
  "homepage_link": "https://github.com/ROCm/rocm-libraries"
808
  },
809
  {
810
+ "repo_link": "https://github.com/ROCm/rocFFT",
811
  "repo_name": "rocFFT",
812
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
813
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
814
  "github_topic_closest_fit": "hip"
815
  },
816
  {
817
+ "repo_link": "https://github.com/ROCm/rocSPARSE",
818
  "repo_name": "rocSPARSE",
819
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
820
  "homepage_link": "https://github.com/ROCm/rocm-libraries"
821
  },
822
  {
823
+ "repo_link": "https://github.com/ROCm/rocRAND",
824
  "repo_name": "rocRAND",
825
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
826
  "homepage_link": "https://github.com/ROCm/rocm-libraries",
827
  "github_topic_closest_fit": "hip"
828
  },
829
  {
830
+ "repo_link": "https://github.com/ROCm/MIOpen",
831
  "repo_name": "MIOpen",
832
+ "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
833
  "homepage_link": "https://github.com/ROCm/rocm-libraries"
834
  }
835
  ]