TylerHilbert commited on
Commit
0a7ff39
·
1 Parent(s): 9760adf

Cleanup + truss and unsloth

Browse files
PyTorchConference2025_GithubRepos.json CHANGED
@@ -10,10 +10,8 @@
10
  {
11
  "repo_name": "ray",
12
  "repo_link": "https://github.com/ray-project/ray",
13
- "category": "ai compute engine",
14
  "github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
15
- "homepage_link": "https://ray.io",
16
- "github_topic_closest_fit": "machine-learning"
17
  },
18
  {
19
  "repo_name": "flashinfer-bench",
@@ -80,10 +78,8 @@
80
  {
81
  "repo_name": "AdaptiveCpp",
82
  "repo_link": "https://github.com/AdaptiveCpp/AdaptiveCpp",
83
- "category": "compiler",
84
  "github_about_section": "Compiler for multiple programming models (SYCL, C++ standard parallelism, HIP/CUDA) for CPUs and GPUs from all vendors: The independent, community-driven compiler for C++-based heterogeneous programming models. Lets applications adapt themselves to all the hardware in the system - even at runtime!",
85
- "homepage_link": "https://adaptivecpp.github.io",
86
- "github_topic_closest_fit": "compiler"
87
  },
88
  {
89
  "repo_name": "llvm-project",
@@ -96,24 +92,18 @@
96
  {
97
  "repo_name": "numba",
98
  "repo_link": "https://github.com/numba/numba",
99
- "category": "compiler",
100
  "github_about_section": "NumPy aware dynamic Python compiler using LLVM",
101
- "homepage_link": "https://numba.pydata.org",
102
- "github_topic_closest_fit": "compiler"
103
  },
104
  {
105
  "repo_name": "nvcc4jupyter",
106
  "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
107
- "category": "compiler",
108
  "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
109
- "homepage_link": "https://nvcc4jupyter.readthedocs.io",
110
- "github_topic_closest_fit": "compiler"
111
-
112
  },
113
  {
114
  "repo_name": "CU2CL",
115
  "repo_link": "https://github.com/vtsynergy/CU2CL",
116
- "category": "CUDA / OpenCL",
117
  "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
118
  "homepage_link": "http://chrec.cs.vt.edu/cu2cl",
119
  "github_topic_closest_fit": "parallel-programming"
@@ -121,7 +111,6 @@
121
  {
122
  "repo_name": "cuda-python",
123
  "repo_link": "https://github.com/NVIDIA/cuda-python",
124
- "category": "CUDA / OpenCL",
125
  "github_about_section": "CUDA Python: Performance meets Productivity",
126
  "homepage_link": "https://nvidia.github.io/cuda-python",
127
  "github_topic_closest_fit": "parallel-programming"
@@ -129,15 +118,13 @@
129
  {
130
  "repo_name": "OpenCL-SDK",
131
  "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
132
- "category": "CUDA / OpenCL",
133
  "github_about_section": "OpenCL SDK",
134
- "homepage_link": "https://khronos.org/opencl/",
135
  "github_topic_closest_fit": "parallel-programming"
136
  },
137
  {
138
  "repo_name": "pocl",
139
  "repo_link": "https://github.com/pocl/pocl",
140
- "category": "CUDA / OpenCL",
141
  "github_about_section": "pocl - Portable Computing Language",
142
  "homepage_link": "https://portablecl.org",
143
  "github_topic_closest_fit": "parallel-programming"
@@ -145,7 +132,6 @@
145
  {
146
  "repo_name": "SYCL-Docs",
147
  "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
148
- "category": "CUDA / OpenCL",
149
  "github_about_section": "SYCL Open Source Specification",
150
  "homepage_link": "https://khronos.org/sycl",
151
  "github_topic_closest_fit": "parallel-programming"
@@ -153,7 +139,6 @@
153
  {
154
  "repo_name": "triSYCL",
155
  "repo_link": "https://github.com/triSYCL/triSYCL",
156
- "category": "CUDA / OpenCL",
157
  "github_about_section": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
158
  "homepage_link": "https://trisycl.github.io/triSYCL/Doxygen/triSYCL/html/index.html",
159
  "github_topic_closest_fit": "parallel-programming"
@@ -161,7 +146,6 @@
161
  {
162
  "repo_name": "ZLUDA",
163
  "repo_link": "https://github.com/vosen/ZLUDA",
164
- "category": "CUDA / OpenCL",
165
  "github_about_section": "CUDA on non-NVIDIA GPUs",
166
  "homepage_link": "https://vosen.github.io/ZLUDA",
167
  "github_topic_closest_fit": "parallel-programming"
@@ -180,7 +164,7 @@
180
  "category": "inference engine",
181
  "github_about_section": "Official inference library for Mistral models",
182
  "homepage_link": "https://mistral.ai",
183
- "github_topic_closest_fit": "llm-inference"
184
  },
185
  {
186
  "repo_name": "ollama",
@@ -201,10 +185,8 @@
201
  {
202
  "repo_name": "TensorRT",
203
  "repo_link": "https://github.com/NVIDIA/TensorRT",
204
- "category": "inference engine",
205
  "github_about_section": "NVIDIA TensorRT is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
206
- "homepage_link": "https://developer.nvidia.com/tensorrt",
207
- "github_topic_closest_fit": "inference"
208
  },
209
  {
210
  "repo_name": "vllm",
@@ -246,8 +228,7 @@
246
  "repo_link": "https://github.com/gpu-mode/reference-kernels",
247
  "category": "kernel examples",
248
  "github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
249
- "homepage_link": "https://gpumode.com",
250
- "github_topic_closest_fit": "gpu"
251
  },
252
  {
253
  "repo_name": "pytorch",
@@ -284,8 +265,7 @@
284
  "repo_link": "https://github.com/pytorch/executorch",
285
  "category": "model compiler",
286
  "github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
287
- "homepage_link": "https://executorch.ai",
288
- "github_topic_closest_fit": "compiler"
289
  },
290
  {
291
  "repo_name": "cutlass",
@@ -416,18 +396,14 @@
416
  {
417
  "repo_name": "accelerate",
418
  "repo_link": "https://github.com/huggingface/accelerate",
419
- "category": "training framework",
420
  "github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
421
- "homepage_link": "https://huggingface.co/docs/accelerate",
422
- "github_topic_closest_fit": "gpu-acceleration"
423
  },
424
  {
425
  "repo_name": "aiter",
426
  "repo_link": "https://github.com/ROCm/aiter",
427
- "category": "ai tensor engine",
428
  "github_about_section": "AI Tensor Engine for ROCm",
429
- "homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html",
430
- "github_topic_closest_fit": "gpu-acceleration"
431
  },
432
  {
433
  "repo_name": "ao",
@@ -440,8 +416,7 @@
440
  "repo_name": "burn",
441
  "repo_link": "https://github.com/tracel-ai/burn",
442
  "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
443
- "homepage_link": "https://burn.dev",
444
- "github_topic_closest_fit": "machine-learning"
445
  },
446
  {
447
  "repo_name": "ccache",
@@ -483,10 +458,8 @@
483
  {
484
  "repo_name": "DeepSpeed",
485
  "repo_link": "https://github.com/deepspeedai/DeepSpeed",
486
- "category": "training framework",
487
  "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
488
- "homepage_link": "https://deepspeed.ai",
489
- "github_topic_closest_fit": "gpu-acceleration"
490
  },
491
  {
492
  "repo_name": "dstack",
@@ -523,15 +496,13 @@
523
  "repo_name": "hhvm",
524
  "repo_link": "https://github.com/facebook/hhvm",
525
  "github_about_section": "A virtual machine for executing programs written in Hack.",
526
- "homepage_link": "https://hhvm.com",
527
- "github_topic_closest_fit": "hack"
528
  },
529
  {
530
  "repo_name": "hip",
531
  "repo_link": "https://github.com/ROCm/hip",
532
  "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
533
- "homepage_link": "https://rocmdocs.amd.com/projects/HIP",
534
- "github_topic_closest_fit": "hip"
535
  },
536
  {
537
  "repo_name": "hipCUB",
@@ -582,16 +553,13 @@
582
  {
583
  "repo_name": "lightning-thunder",
584
  "repo_link": "https://github.com/Lightning-AI/lightning-thunder",
585
- "category": "model compiler",
586
- "github_about_section": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own.",
587
- "github_topic_closest_fit": "compiler"
588
  },
589
  {
590
  "repo_name": "LMCache",
591
  "repo_link": "https://github.com/LMCache/LMCache",
592
  "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
593
- "homepage_link": "https://lmcache.ai",
594
- "github_topic_closest_fit": "inference"
595
  },
596
  {
597
  "repo_name": "mcp-agent",
@@ -604,8 +572,7 @@
604
  "repo_name": "metaflow",
605
  "repo_link": "https://github.com/Netflix/metaflow",
606
  "github_about_section": "Build, Manage and Deploy AI/ML Systems",
607
- "homepage_link": "https://metaflow.org",
608
- "github_topic_closest_fit": "machine-learning"
609
  },
610
  {
611
  "repo_name": "MIOpen",
@@ -716,22 +683,19 @@
716
  {
717
  "repo_name": "rdma-core",
718
  "repo_link": "https://github.com/linux-rdma/rdma-core",
719
- "github_about_section": "RDMA core userspace libraries and daemons",
720
- "github_topic_closest_fit": "linux-kernel"
721
  },
722
  {
723
  "repo_name": "rocFFT",
724
  "repo_link": "https://github.com/ROCm/rocFFT",
725
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
726
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
727
- "github_topic_closest_fit": "hip"
728
  },
729
  {
730
  "repo_name": "ROCm",
731
  "repo_link": "https://github.com/ROCm/ROCm",
732
  "github_about_section": "AMD ROCm Software - GitHub Home",
733
- "homepage_link": "https://rocm.docs.amd.com",
734
- "github_topic_closest_fit": "documentation"
735
  },
736
  {
737
  "repo_name": "rocm-systems",
@@ -742,22 +706,19 @@
742
  "repo_name": "rocPRIM",
743
  "repo_link": "https://github.com/ROCm/rocPRIM",
744
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
745
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
746
- "github_topic_closest_fit": "hip"
747
  },
748
  {
749
  "repo_name": "rocRAND",
750
  "repo_link": "https://github.com/ROCm/rocRAND",
751
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
752
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
753
- "github_topic_closest_fit": "hip"
754
  },
755
  {
756
  "repo_name": "rocSOLVER",
757
  "repo_link": "https://github.com/ROCm/rocSOLVER",
758
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
759
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
760
- "github_topic_closest_fit": "rocm"
761
  },
762
  {
763
  "repo_name": "rocSPARSE",
@@ -821,8 +782,7 @@
821
  "repo_name": "Tensile",
822
  "repo_link": "https://github.com/ROCm/Tensile",
823
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
824
- "homepage_link": "https://github.com/ROCm/rocm-libraries",
825
- "github_topic_closest_fit": "gpu"
826
  },
827
  {
828
  "repo_name": "tflite-micro",
@@ -860,15 +820,13 @@
860
  "repo_name": "triton-runner",
861
  "repo_link": "https://github.com/toyaix/triton-runner",
862
  "github_about_section": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
863
- "homepage_link": "https://triton-runner.org",
864
- "github_topic_closest_fit": "triton"
865
  },
866
  {
867
  "repo_name": "tritonparse",
868
  "repo_link": "https://github.com/meta-pytorch/tritonparse",
869
  "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
870
- "homepage_link": "https://meta-pytorch.org/tritonparse",
871
- "github_topic_closest_fit": "triton"
872
  },
873
  {
874
  "repo_name": "trl",
@@ -879,15 +837,18 @@
879
  {
880
  "repo_name": "truss",
881
  "repo_link": "https://github.com/basetenlabs/truss",
 
882
  "github_about_section": "The simplest way to serve AI/ML models in production",
883
  "homepage_link": "https://truss.baseten.co",
884
- "github_topic_closest_fit": "machine-learning"
885
  },
886
  {
887
  "repo_name": "unsloth",
888
  "repo_link": "https://github.com/unslothai/unsloth",
 
889
  "github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
890
- "homepage_link": "https://docs.unsloth.ai"
 
891
  },
892
  {
893
  "repo_name": "verl",
 
10
  {
11
  "repo_name": "ray",
12
  "repo_link": "https://github.com/ray-project/ray",
 
13
  "github_about_section": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.",
14
+ "homepage_link": "https://ray.io"
 
15
  },
16
  {
17
  "repo_name": "flashinfer-bench",
 
78
  {
79
  "repo_name": "AdaptiveCpp",
80
  "repo_link": "https://github.com/AdaptiveCpp/AdaptiveCpp",
 
81
  "github_about_section": "Compiler for multiple programming models (SYCL, C++ standard parallelism, HIP/CUDA) for CPUs and GPUs from all vendors: The independent, community-driven compiler for C++-based heterogeneous programming models. Lets applications adapt themselves to all the hardware in the system - even at runtime!",
82
+ "homepage_link": "https://adaptivecpp.github.io"
 
83
  },
84
  {
85
  "repo_name": "llvm-project",
 
92
  {
93
  "repo_name": "numba",
94
  "repo_link": "https://github.com/numba/numba",
 
95
  "github_about_section": "NumPy aware dynamic Python compiler using LLVM",
96
+ "homepage_link": "https://numba.pydata.org"
 
97
  },
98
  {
99
  "repo_name": "nvcc4jupyter",
100
  "repo_link": "https://github.com/andreinechaev/nvcc4jupyter",
 
101
  "github_about_section": "A plugin for Jupyter Notebook to run CUDA C/C++ code",
102
+ "homepage_link": "https://nvcc4jupyter.readthedocs.io"
 
 
103
  },
104
  {
105
  "repo_name": "CU2CL",
106
  "repo_link": "https://github.com/vtsynergy/CU2CL",
 
107
  "github_about_section": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework",
108
  "homepage_link": "http://chrec.cs.vt.edu/cu2cl",
109
  "github_topic_closest_fit": "parallel-programming"
 
111
  {
112
  "repo_name": "cuda-python",
113
  "repo_link": "https://github.com/NVIDIA/cuda-python",
 
114
  "github_about_section": "CUDA Python: Performance meets Productivity",
115
  "homepage_link": "https://nvidia.github.io/cuda-python",
116
  "github_topic_closest_fit": "parallel-programming"
 
118
  {
119
  "repo_name": "OpenCL-SDK",
120
  "repo_link": "https://github.com/KhronosGroup/OpenCL-SDK",
 
121
  "github_about_section": "OpenCL SDK",
122
+ "homepage_link": "https://khronos.org/opencl",
123
  "github_topic_closest_fit": "parallel-programming"
124
  },
125
  {
126
  "repo_name": "pocl",
127
  "repo_link": "https://github.com/pocl/pocl",
 
128
  "github_about_section": "pocl - Portable Computing Language",
129
  "homepage_link": "https://portablecl.org",
130
  "github_topic_closest_fit": "parallel-programming"
 
132
  {
133
  "repo_name": "SYCL-Docs",
134
  "repo_link": "https://github.com/KhronosGroup/SYCL-Docs",
 
135
  "github_about_section": "SYCL Open Source Specification",
136
  "homepage_link": "https://khronos.org/sycl",
137
  "github_topic_closest_fit": "parallel-programming"
 
139
  {
140
  "repo_name": "triSYCL",
141
  "repo_link": "https://github.com/triSYCL/triSYCL",
 
142
  "github_about_section": "Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group",
143
  "homepage_link": "https://trisycl.github.io/triSYCL/Doxygen/triSYCL/html/index.html",
144
  "github_topic_closest_fit": "parallel-programming"
 
146
  {
147
  "repo_name": "ZLUDA",
148
  "repo_link": "https://github.com/vosen/ZLUDA",
 
149
  "github_about_section": "CUDA on non-NVIDIA GPUs",
150
  "homepage_link": "https://vosen.github.io/ZLUDA",
151
  "github_topic_closest_fit": "parallel-programming"
 
164
  "category": "inference engine",
165
  "github_about_section": "Official inference library for Mistral models",
166
  "homepage_link": "https://mistral.ai",
167
+ "github_topic_closest_fit": "inference"
168
  },
169
  {
170
  "repo_name": "ollama",
 
185
  {
186
  "repo_name": "TensorRT",
187
  "repo_link": "https://github.com/NVIDIA/TensorRT",
 
188
  "github_about_section": "NVIDIA TensorRT is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.",
189
+ "homepage_link": "https://developer.nvidia.com/tensorrt"
 
190
  },
191
  {
192
  "repo_name": "vllm",
 
228
  "repo_link": "https://github.com/gpu-mode/reference-kernels",
229
  "category": "kernel examples",
230
  "github_about_section": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!",
231
+ "homepage_link": "https://gpumode.com"
 
232
  },
233
  {
234
  "repo_name": "pytorch",
 
265
  "repo_link": "https://github.com/pytorch/executorch",
266
  "category": "model compiler",
267
  "github_about_section": "On-device AI across mobile, embedded and edge for PyTorch",
268
+ "homepage_link": "https://executorch.ai"
 
269
  },
270
  {
271
  "repo_name": "cutlass",
 
396
  {
397
  "repo_name": "accelerate",
398
  "repo_link": "https://github.com/huggingface/accelerate",
 
399
  "github_about_section": "A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support.",
400
+ "homepage_link": "https://huggingface.co/docs/accelerate"
 
401
  },
402
  {
403
  "repo_name": "aiter",
404
  "repo_link": "https://github.com/ROCm/aiter",
 
405
  "github_about_section": "AI Tensor Engine for ROCm",
406
+ "homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html"
 
407
  },
408
  {
409
  "repo_name": "ao",
 
416
  "repo_name": "burn",
417
  "repo_link": "https://github.com/tracel-ai/burn",
418
  "github_about_section": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.",
419
+ "homepage_link": "https://burn.dev"
 
420
  },
421
  {
422
  "repo_name": "ccache",
 
458
  {
459
  "repo_name": "DeepSpeed",
460
  "repo_link": "https://github.com/deepspeedai/DeepSpeed",
 
461
  "github_about_section": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.",
462
+ "homepage_link": "https://deepspeed.ai"
 
463
  },
464
  {
465
  "repo_name": "dstack",
 
496
  "repo_name": "hhvm",
497
  "repo_link": "https://github.com/facebook/hhvm",
498
  "github_about_section": "A virtual machine for executing programs written in Hack.",
499
+ "homepage_link": "https://hhvm.com"
 
500
  },
501
  {
502
  "repo_name": "hip",
503
  "repo_link": "https://github.com/ROCm/hip",
504
  "github_about_section": "HIP: C++ Heterogeneous-Compute Interface for Portability",
505
+ "homepage_link": "https://rocmdocs.amd.com/projects/HIP"
 
506
  },
507
  {
508
  "repo_name": "hipCUB",
 
553
  {
554
  "repo_name": "lightning-thunder",
555
  "repo_link": "https://github.com/Lightning-AI/lightning-thunder",
556
+ "github_about_section": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own."
 
 
557
  },
558
  {
559
  "repo_name": "LMCache",
560
  "repo_link": "https://github.com/LMCache/LMCache",
561
  "github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
562
+ "homepage_link": "https://lmcache.ai"
 
563
  },
564
  {
565
  "repo_name": "mcp-agent",
 
572
  "repo_name": "metaflow",
573
  "repo_link": "https://github.com/Netflix/metaflow",
574
  "github_about_section": "Build, Manage and Deploy AI/ML Systems",
575
+ "homepage_link": "https://metaflow.org"
 
576
  },
577
  {
578
  "repo_name": "MIOpen",
 
683
  {
684
  "repo_name": "rdma-core",
685
  "repo_link": "https://github.com/linux-rdma/rdma-core",
686
+ "github_about_section": "RDMA core userspace libraries and daemons"
 
687
  },
688
  {
689
  "repo_name": "rocFFT",
690
  "repo_link": "https://github.com/ROCm/rocFFT",
691
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
692
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
 
693
  },
694
  {
695
  "repo_name": "ROCm",
696
  "repo_link": "https://github.com/ROCm/ROCm",
697
  "github_about_section": "AMD ROCm Software - GitHub Home",
698
+ "homepage_link": "https://rocm.docs.amd.com"
 
699
  },
700
  {
701
  "repo_name": "rocm-systems",
 
706
  "repo_name": "rocPRIM",
707
  "repo_link": "https://github.com/ROCm/rocPRIM",
708
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
709
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
 
710
  },
711
  {
712
  "repo_name": "rocRAND",
713
  "repo_link": "https://github.com/ROCm/rocRAND",
714
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
715
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
 
716
  },
717
  {
718
  "repo_name": "rocSOLVER",
719
  "repo_link": "https://github.com/ROCm/rocSOLVER",
720
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
721
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
 
722
  },
723
  {
724
  "repo_name": "rocSPARSE",
 
782
  "repo_name": "Tensile",
783
  "repo_link": "https://github.com/ROCm/Tensile",
784
  "github_about_section": "[DEPRECATED] Moved to ROCm/rocm-libraries repo",
785
+ "homepage_link": "https://github.com/ROCm/rocm-libraries"
 
786
  },
787
  {
788
  "repo_name": "tflite-micro",
 
820
  "repo_name": "triton-runner",
821
  "repo_link": "https://github.com/toyaix/triton-runner",
822
  "github_about_section": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.",
823
+ "homepage_link": "https://triton-runner.org"
 
824
  },
825
  {
826
  "repo_name": "tritonparse",
827
  "repo_link": "https://github.com/meta-pytorch/tritonparse",
828
  "github_about_section": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels",
829
+ "homepage_link": "https://meta-pytorch.org/tritonparse"
 
830
  },
831
  {
832
  "repo_name": "trl",
 
837
  {
838
  "repo_name": "truss",
839
  "repo_link": "https://github.com/basetenlabs/truss",
840
+ "category": "inference engine",
841
  "github_about_section": "The simplest way to serve AI/ML models in production",
842
  "homepage_link": "https://truss.baseten.co",
843
+ "github_topic_closest_fit": "inference"
844
  },
845
  {
846
  "repo_name": "unsloth",
847
  "repo_link": "https://github.com/unslothai/unsloth",
848
+ "category": "fine tuning",
849
  "github_about_section": "Fine-tuning & Reinforcement Learning for LLMs. Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.",
850
+ "homepage_link": "https://docs.unsloth.ai",
851
+ "github_topic_closest_fit": "fine-tuning"
852
  },
853
  {
854
  "repo_name": "verl",