[ { "github_repo_link": "https://github.com/pytorch/pytorch", "repo_name": "pytorch", "repo_description": "Tensors and Dynamic neural networks in Python with strong GPU acceleration", "homepage_link": "https://pytorch.org", "repo_tags": [ "autograd", "deep-learning", "gpu", "machine-learning", "neural-network", "numpy", "python", "tensor" ], "category": "machine learning framework" }, { "github_repo_link": "https://github.com/pytorch/executorch", "repo_name": "executorch", "repo_description": "On-device AI across mobile, embedded and edge for PyTorch", "homepage_link": "https://executorch.ai", "repo_tags": [ "deep-learning", "embedded", "gpu", "machine-learning", "mobile", "neural-network", "tensor" ], "category": "model compiler" }, { "github_repo_link": "https://github.com/ggml-org/llama.cpp", "repo_name": "llama.cpp", "repo_description": "LLM inference in C/C++", "homepage_link": "", "repo_tags": [ "ggml" ], "category": "inference engine" }, { "github_repo_link": "https://github.com/onnx/onnx", "repo_name": "onnx", "repo_description": "Open standard for machine learning interoperability", "homepage_link": "https://onnx.ai/", "repo_tags": [ "deep-learning", "deep-neural-networks", "dnn", "keras", "machine-learning", "ml", "neural-network", "onnx", "pytorch", "scikit-learn", "tensorflow" ] }, { "github_repo_link": "https://github.com/ray-project/ray", "repo_name": "ray", "repo_description": "Ray is an AI compute engine. Ray consists of a core distributed runtime and a set of AI Libraries for accelerating ML workloads.", "homepage_link": "https://ray.io", "repo_tags": [ "data-science", "deep-learning", "deployment", "distributed", "hyperparameter-optimization", "hyperparameter-search", "large-language-models", "llm", "llm-inference", "llm-serving", "machine-learning", "optimization", "parallel", "python", "pytorch", "ray", "reinforcement-learning", "rllib", "serving", "tensorflow" ] }, { "github_repo_link": "https://github.com/vllm-project/vllm", "repo_name": "vllm", "repo_description": "A high-throughput and memory-efficient inference and serving engine for LLMs", "homepage_link": "https://docs.vllm.ai", "repo_tags": [ "amd", "blackwell", "cuda", "deepseek", "deepseek-v3", "gpt", "gpt-oss", "inference", "kimi", "llama", "llm", "llm-serving", "model-serving", "moe", "openai", "pytorch", "qwen", "qwen3", "tpu", "transformer" ], "category": "inference engine" }, { "github_repo_link": "https://github.com/ollama/ollama", "repo_name": "ollama", "repo_description": "Get up and running with OpenAI gpt-oss, DeepSeek-R1, Gemma 3 and other models.", "homepage_link": "https://ollama.com", "repo_tags": [ "deepseek", "gemma", "gemma3", "gemma3n", "go", "golang", "gpt-oss", "llama", "llama2", "llama3", "llava", "llm", "llms", "mistral", "ollama", "phi4", "qwen" ], "category": "inference engine" }, { "github_repo_link": "https://github.com/sgl-project/sglang", "repo_name": "sglang", "repo_description": "SGLang is a fast serving framework for large language models and vision language models.", "homepage_link": "https://docs.sglang.ai/", "repo_tags": [ "blackwell", "cuda", "deepseek", "deepseek-r1", "deepseek-v3", "deepseek-v3-2", "gpt-oss", "inference", "kimi", "llama", "llama3", "llava", "llm", "llm-serving", "moe", "openai", "pytorch", "qwen3", "transformer", "vlm" ], "category": "inference engine" }, { "github_repo_link": "https://github.com/pytorch/ao", "repo_name": "ao", "repo_description": "PyTorch native quantization and sparsity for training and inference", "homepage_link": "https://pytorch.org/ao/stable/index.html", "repo_tags": [ "brrr", "cuda", "dtypes", "float8", "inference", "llama", "mx", "offloading", "optimizer", "pytorch", "quantization", "sparsity", "training", "transformer" ] }, { "github_repo_link": "https://github.com/triton-lang/triton", "repo_name": "triton", "repo_description": "Development repository for the Triton language and compiler", "homepage_link": "https://triton-lang.org/", "repo_tags": [], "category": "dsl" }, { "github_repo_link": "https://github.com/HazyResearch/ThunderKittens", "repo_name": "ThunderKittens", "repo_description": "Tile primitives for speedy kernels", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/gpu-mode/reference-kernels", "repo_name": "reference-kernels", "repo_description": "Official Problem Sets / Reference Kernels for the GPU MODE Leaderboard!", "homepage_link": "https://gpumode.com", "repo_tags": [ "cuda", "gpu", "leaderboard", "triton" ], "category": "kernels" }, { "github_repo_link": "https://github.com/guandeh17/Self-Forcing", "repo_name": "Self-Forcing", "repo_description": "Official codebase for \"Self Forcing: Bridging Training and Inference in Autoregressive Video Diffusion\" (NeurIPS 2025 Spotlight)", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/chenfengxu714/StreamDiffusionV2", "repo_name": "StreamDiffusionV2", "repo_description": "StreamDiffusion, Live Stream APP", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/cumulo-autumn/StreamDiffusion", "repo_name": "StreamDiffusion", "repo_description": "StreamDiffusion: A Pipeline-Level Solution for Real-Time Interactive Generation", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/comfyanonymous/ComfyUI", "repo_name": "ComfyUI", "repo_description": "The most powerful and modular diffusion model GUI, api and backend with a graph/nodes interface.", "homepage_link": "https://www.comfy.org/", "repo_tags": [ "ai", "comfy", "comfyui", "python", "pytorch", "stable-diffusion" ] }, { "github_repo_link": "https://github.com/Jeff-LiangF/streamv2v", "repo_name": "streamv2v", "repo_description": "Official Pytorch implementation of StreamV2V. ", "homepage_link": "https://jeff-liangf.github.io/projects/streamv2v/", "repo_tags": [] }, { "github_repo_link": "https://github.com/letta-ai/letta", "repo_name": "letta", "repo_description": "Letta is the platform for building stateful agents: open AI with advanced memory that can learn and self-improve over time.", "homepage_link": "https://docs.letta.com/", "repo_tags": [ "ai", "ai-agents", "llm", "llm-agent" ] }, { "github_repo_link": "https://github.com/jupyterlab/jupyterlab", "repo_name": "jupyterlab", "repo_description": "JupyterLab computational environment.", "homepage_link": "https://jupyterlab.readthedocs.io/", "repo_tags": [ "jupyter", "jupyterlab" ], "category": "ui" }, { "github_repo_link": "https://github.com/ROCm/rocm-systems", "repo_name": "rocm-systems", "repo_description": "super repo for rocm systems projects", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/NVIDIA/cutlass", "repo_name": "cutlass", "repo_description": "CUDA Templates and Python DSLs for High-Performance Linear Algebra", "homepage_link": "https://docs.nvidia.com/cutlass/index.html", "repo_tags": [ "cpp", "cuda", "deep-learning", "deep-learning-library", "gpu", "nvidia", "python" ] }, { "github_repo_link": "https://github.com/pytorch/helion", "repo_name": "helion", "repo_description": "A Python-embedded DSL that makes it easy to write fast, scalable ML kernels with minimal boilerplate.", "homepage_link": null, "repo_tags": [], "category": "dsl" }, { "github_repo_link": "https://github.com/jax-ml/jax", "repo_name": "jax", "repo_description": "Composable transformations of Python+NumPy programs: differentiate, vectorize, JIT to GPU/TPU, and more", "homepage_link": "https://docs.jax.dev", "repo_tags": [ "jax" ] }, { "github_repo_link": "https://github.com/tensorflow/tensorflow", "repo_name": "tensorflow", "repo_description": "An Open Source Machine Learning Framework for Everyone", "homepage_link": "https://tensorflow.org", "repo_tags": [ "deep-learning", "deep-neural-networks", "distributed", "machine-learning", "ml", "neural-network", "python", "tensorflow" ], "category": "machine learning framework" }, { "github_repo_link": "https://github.com/deepspeedai/DeepSpeed", "repo_name": "DeepSpeed", "repo_description": "DeepSpeed is a deep learning optimization library that makes distributed training and inference easy, efficient, and effective.", "homepage_link": "https://www.deepspeed.ai/", "repo_tags": [ "billion-parameters", "compression", "data-parallelism", "deep-learning", "gpu", "inference", "machine-learning", "mixture-of-experts", "model-parallelism", "pipeline-parallelism", "pytorch", "trillion-parameters", "zero" ] }, { "github_repo_link": "https://github.com/triton-inference-server/server", "repo_name": "server", "repo_description": "The Triton Inference Server provides an optimized cloud and edge inferencing solution. ", "homepage_link": "https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html", "repo_tags": [ "cloud", "datacenter", "deep-learning", "edge", "gpu", "inference", "machine-learning" ] }, { "github_repo_link": "https://github.com/ROCm/ROCm", "repo_name": "ROCm", "repo_description": "AMD ROCm™ Software - GitHub Home", "homepage_link": "https://rocm.docs.amd.com", "repo_tags": [ "documentation" ] }, { "github_repo_link": "https://github.com/llvm/llvm-project", "repo_name": "llvm-project", "repo_description": "The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.", "homepage_link": "http://llvm.org", "repo_tags": [], "category": "compiler" }, { "github_repo_link": "https://github.com/cwpearson/cupti", "repo_name": "cupti", "repo_description": "Profile how CUDA applications create and modify data in memory.", "homepage_link": "", "repo_tags": [], "category": "profiler" }, { "github_repo_link": "https://github.com/LLNL/hatchet", "repo_name": "hatchet", "repo_description": "Graph-indexed Pandas DataFrames for analyzing hierarchical performance data", "homepage_link": "https://llnl-hatchet.readthedocs.io", "repo_tags": [ "comparative-analysis", "data-analytics", "graphs", "hierarchical-data", "hpc", "performance", "performance-analysis", "python", "radiuss", "trees" ], "category": "profiler" }, { "github_repo_link": "https://github.com/toyaix/triton-runner", "repo_name": "triton-runner", "repo_description": "Multi-Level Triton Runner supporting Python, IR, PTX, and cubin.", "homepage_link": "https://triton-runner.org", "repo_tags": [ "ai-infra", "cuda", "tools", "triton" ] }, { "github_repo_link": "https://github.com/ByteDance-Seed/Triton-distributed", "repo_name": "Triton-distributed", "repo_description": "Distributed Compiler based on Triton for Parallel Systems", "homepage_link": "https://triton-distributed.readthedocs.io/en/latest/", "repo_tags": [], "category": "model compiler" }, { "github_repo_link": "https://github.com/linkedin/Liger-Kernel", "repo_name": "Liger-Kernel", "repo_description": "Efficient Triton Kernels for LLM Training", "homepage_link": "https://openreview.net/pdf?id=36SjAIT42G", "repo_tags": [ "finetuning", "gemma2", "hacktoberfest", "llama", "llama3", "llm-training", "llms", "mistral", "phi3", "triton", "triton-kernels" ], "category": "kernels" }, { "github_repo_link": "https://github.com/thunlp/TritonBench", "repo_name": "TritonBench", "repo_description": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators", "homepage_link": "", "repo_tags": [], "category": "benchmark" }, { "github_repo_link": "https://github.com/meta-pytorch/tritonparse", "repo_name": "tritonparse", "repo_description": "TritonParse: A Compiler Tracer, Visualizer, and Reproducer for Triton Kernels", "homepage_link": "https://meta-pytorch.org/tritonparse/", "repo_tags": [ "compiler", "debugging", "gpu-kernel", "interactive-visualization", "ir-analysis", "ir-visualization", "pytorch", "structured-logging", "triton" ] }, { "github_repo_link": "https://github.com/elastic/elasticsearch", "repo_name": "elasticsearch", "repo_description": "Free and Open Source, Distributed, RESTful Search Engine", "homepage_link": "https://www.elastic.co/products/elasticsearch", "repo_tags": [ "elasticsearch", "java", "search-engine" ], "category": "search engine" }, { "github_repo_link": "https://github.com/kubernetes/kubernetes", "repo_name": "kubernetes", "repo_description": "Production-Grade Container Scheduling and Management", "homepage_link": "https://kubernetes.io", "repo_tags": [ "cncf", "containers", "go", "kubernetes" ] }, { "github_repo_link": "https://github.com/modelcontextprotocol/modelcontextprotocol", "repo_name": "modelcontextprotocol", "repo_description": "Specification and documentation for the Model Context Protocol", "homepage_link": "https://modelcontextprotocol.io", "repo_tags": [] }, { "github_repo_link": "https://github.com/lastmile-ai/mcp-agent", "repo_name": "mcp-agent", "repo_description": "Build effective agents using Model Context Protocol and simple workflow patterns", "homepage_link": "", "repo_tags": [ "agents", "ai", "ai-agents", "llm", "llms", "mcp", "model-context-protocol", "python" ] }, { "github_repo_link": "https://github.com/milvus-io/milvus", "repo_name": "milvus", "repo_description": "Milvus is a high-performance, cloud-native vector database built for scalable vector ANN search", "homepage_link": "https://milvus.io", "repo_tags": [ "anns", "cloud-native", "diskann", "distributed", "embedding-database", "embedding-similarity", "embedding-store", "faiss", "golang", "hnsw", "image-search", "llm", "nearest-neighbor-search", "rag", "vector-database", "vector-search", "vector-similarity", "vector-store" ], "category": "vector databse" }, { "github_repo_link": "https://github.com/gaoj0017/RaBitQ", "repo_name": "RaBitQ", "repo_description": "[SIGMOD 2024] RaBitQ: Quantizing High-Dimensional Vectors with a Theoretical Error Bound for Approximate Nearest Neighbor Search", "homepage_link": "https://github.com/VectorDB-NTU/RaBitQ-Library", "repo_tags": [ "high-dimensional-vectors", "nearest-neighbor-search", "quantization" ] }, { "github_repo_link": "https://github.com/Airtable/airtable.js", "repo_name": "airtable.js", "repo_description": "Airtable javascript client", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/mistralai/mistral-inference", "repo_name": "mistral-inference", "repo_description": "Official inference library for Mistral models", "homepage_link": "https://mistral.ai/", "repo_tags": [ "llm", "llm-inference", "mistralai" ], "category": "inference engine" }, { "github_repo_link": "https://github.com/dstackai/dstack", "repo_name": "dstack", "repo_description": "dstack is an open-source control plane for running development, training, and inference jobs on GPUs—across hyperscalers, neoclouds, or on-prem.", "homepage_link": "https://dstack.ai", "repo_tags": [ "amd", "cloud", "containers", "docker", "fine-tuning", "gpu", "inference", "k8s", "kubernetes", "llms", "machine-learning", "nvidia", "orchestration", "python", "slurm", "training" ] }, { "github_repo_link": "https://github.com/numpy/numpy", "repo_name": "numpy", "repo_description": "The fundamental package for scientific computing with Python.", "homepage_link": "https://numpy.org", "repo_tags": [ "numpy", "python" ], "category": "python library" }, { "github_repo_link": "https://github.com/scipy/scipy", "repo_name": "scipy", "repo_description": "SciPy library main repository", "homepage_link": "https://scipy.org", "repo_tags": [ "algorithms", "closember", "python", "scientific-computing", "scipy" ], "category": "python library" }, { "github_repo_link": "https://github.com/numba/numba", "repo_name": "numba", "repo_description": "NumPy aware dynamic Python compiler using LLVM", "homepage_link": "https://numba.pydata.org/", "repo_tags": [ "compiler", "cuda", "llvm", "numba", "numpy", "parallel", "python" ] }, { "github_repo_link": "https://github.com/sandialabs/torchdendrite", "repo_name": "torchdendrite", "repo_description": "Dendrites for PyTorch and SNNTorch neural networks ", "homepage_link": "", "repo_tags": [ "scr-3078" ], "category": "machine learning framework" }, { "github_repo_link": "https://github.com/Lightning-AI/lightning-thunder", "repo_name": "lightning-thunder", "repo_description": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own.", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/Ascend/pytorch", "repo_name": "pytorch", "repo_description": "Ascend PyTorch adapter (torch_npu). Mirror of https://gitee.com/ascend/pytorch", "homepage_link": "https://ascend.github.io/docs/", "repo_tags": [ "ascend", "deep-learning", "pytorch" ] }, { "github_repo_link": "https://github.com/pytorch/torchdynamo", "repo_name": "torchdynamo", "repo_description": "A Python-level JIT compiler designed to make unmodified PyTorch programs faster.", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/modular/modular", "repo_name": "modular", "repo_description": "The Modular Platform (includes MAX & Mojo)", "homepage_link": "https://docs.modular.com/", "repo_tags": [ "ai", "language", "machine-learning", "max", "modular", "mojo", "programming-language" ] }, { "github_repo_link": "https://github.com/microsoft/TileIR", "repo_name": "TileIR", "repo_description": null, "homepage_link": null, "repo_tags": [], "category": "dsl" }, { "github_repo_link": "https://github.com/pytorch/torchtitan", "repo_name": "torchtitan", "repo_description": "A PyTorch native platform for training generative AI models", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/NVIDIA/cudnn-frontend", "repo_name": "cudnn-frontend", "repo_description": "cudnn_frontend provides a c++ wrapper for the cudnn backend API and samples on how to use it", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/pytorch/ort", "repo_name": "ort", "repo_description": "Accelerate PyTorch models with ONNX Runtime", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/NVIDIA/nccl", "repo_name": "nccl", "repo_description": "Optimized primitives for collective multi-GPU communication", "homepage_link": "https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html", "repo_tags": [ "communications", "cpp", "cuda", "deep-learning", "gpu", "nvidia" ] }, { "github_repo_link": "https://github.com/sgl-project/ome", "repo_name": "ome", "repo_description": "OME is a Kubernetes operator for enterprise-grade management and serving of Large Language Models (LLMs)", "homepage_link": "http://docs.sglang.ai/ome/", "repo_tags": [ "deepseek", "k8s", "kimi-k2", "llama", "llm", "llm-inference", "model-as-a-service", "model-serving", "multi-node-kubernetes", "oracle-cloud", "sgalng", "sglang" ] }, { "github_repo_link": "https://github.com/volcengine/verl", "repo_name": "verl", "repo_description": "verl: Volcano Engine Reinforcement Learning for LLMs", "homepage_link": "https://verl.readthedocs.io/en/latest/index.html", "repo_tags": [] }, { "github_repo_link": "https://github.com/aws-neuron/neuronx-distributed-inference", "repo_name": "neuronx-distributed-inference", "repo_description": null, "homepage_link": null, "repo_tags": [], "category": "inference engine" }, { "github_repo_link": "https://github.com/meta-pytorch/monarch", "repo_name": "monarch", "repo_description": "PyTorch Single Controller", "homepage_link": "https://meta-pytorch.org/monarch", "repo_tags": [] }, { "github_repo_link": "https://github.com/ai-dynamo/nixl", "repo_name": "nixl", "repo_description": "NVIDIA Inference Xfer Library (NIXL)", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/LMCache/LMCache", "repo_name": "LMCache", "repo_description": "Supercharge Your LLM with the Fastest KV Cache Layer", "homepage_link": "https://lmcache.ai/", "repo_tags": [ "amd", "cuda", "fast", "inference", "kv-cache", "llm", "pytorch", "rocm", "speed", "vllm" ] }, { "github_repo_link": "https://github.com/linux-rdma/rdma-core", "repo_name": "rdma-core", "repo_description": "RDMA core userspace libraries and daemons", "homepage_link": null, "repo_tags": [ "infiniband", "iwarp", "kernel-rdma-drivers", "linux-kernel", "rdma", "roce", "userspace-libraries" ] }, { "github_repo_link": "https://github.com/NVIDIA/TensorRT", "repo_name": "TensorRT", "repo_description": "NVIDIA® TensorRT™ is an SDK for high-performance deep learning inference on NVIDIA GPUs. This repository contains the open source components of TensorRT.", "homepage_link": "https://developer.nvidia.com/tensorrt", "repo_tags": [ "deep-learning", "gpu-acceleration", "inference", "nvidia", "tensorrt" ] }, { "github_repo_link": "https://github.com/Cambridge-ICCS/FTorch", "repo_name": "FTorch", "repo_description": "A library for directly calling PyTorch ML models from Fortran.", "homepage_link": "https://cambridge-iccs.github.io/FTorch/", "repo_tags": [ "deep-learning", "fortran", "hacktoberfest", "interoperability", "libtorch", "machine-learning", "python", "pytorch", "torch" ] }, { "github_repo_link": "https://github.com/facebook/hhvm", "repo_name": "hhvm", "repo_description": "A virtual machine for executing programs written in Hack.", "homepage_link": "https://hhvm.com", "repo_tags": [ "hack", "hacklang", "hhvm", "php" ] }, { "github_repo_link": "https://github.com/vosen/ZLUDA", "repo_name": "ZLUDA", "repo_description": "CUDA on non-NVIDIA GPUs", "homepage_link": "https://vosen.github.io/ZLUDA/", "repo_tags": [ "cuda", "rust" ] }, { "github_repo_link": "https://github.com/vtsynergy/CU2CL", "repo_name": "CU2CL", "repo_description": "A prototype CUDA-to-OpenCL source-to-source translator, built on the Clang compiler framework", "homepage_link": "http://chrec.cs.vt.edu/cu2cl", "repo_tags": [] }, { "github_repo_link": "https://github.com/pocl/pocl", "repo_name": "pocl", "repo_description": "pocl - Portable Computing Language", "homepage_link": "https://portablecl.org", "repo_tags": [ "heterogeneous-parallel-programming", "opencl" ] }, { "github_repo_link": "https://github.com/apache/spark", "repo_name": "spark", "repo_description": "Apache Spark - A unified analytics engine for large-scale data processing", "homepage_link": "https://spark.apache.org/", "repo_tags": [ "big-data", "java", "jdbc", "python", "r", "scala", "spark", "sql" ] }, { "github_repo_link": "https://github.com/codelion/openevolve", "repo_name": "openevolve", "repo_description": "Open-source implementation of AlphaEvolve", "homepage_link": "", "repo_tags": [ "alpha-evolve", "alphacode", "alphaevolve", "coding-agent", "deepmind", "deepmind-lab", "discovery", "distributed-evolutionary-algorithms", "evolutionary-algorithms", "evolutionary-computation", "genetic-algorithm", "genetic-algorithms", "iterative-methods", "iterative-refinement", "llm-engineering", "llm-ensemble", "llm-inference", "openevolve", "optimize" ] }, { "github_repo_link": "https://github.com/ROCm/hipBLAS", "repo_name": "hipBLAS", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [ "blas", "cuda", "hip", "rocm" ] }, { "github_repo_link": "https://github.com/ROCm/roctracer", "repo_name": "roctracer", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-systems repo ", "homepage_link": "https://github.com/ROCm/rocm-systems", "repo_tags": [] }, { "github_repo_link": "https://github.com/huggingface/peft", "repo_name": "peft", "repo_description": "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.", "homepage_link": "https://huggingface.co/docs/peft", "repo_tags": [ "adapter", "diffusion", "fine-tuning", "llm", "lora", "parameter-efficient-learning", "peft", "python", "pytorch", "transformers" ] }, { "github_repo_link": "https://github.com/ROCm/hip", "repo_name": "hip", "repo_description": "HIP: C++ Heterogeneous-Compute Interface for Portability", "homepage_link": "https://rocmdocs.amd.com/projects/HIP/", "repo_tags": [ "cuda", "hip", "hip-kernel-language", "hip-portability", "hip-runtime", "hipify" ] }, { "github_repo_link": "https://github.com/ROCm/composable_kernel", "repo_name": "composable_kernel", "repo_description": "Composable Kernel: Performance Portable Programming Model for Machine Learning Tensor Operators", "homepage_link": "https://rocm.docs.amd.com/projects/composable_kernel/en/latest/", "repo_tags": [] }, { "github_repo_link": "https://github.com/ROCm/aiter", "repo_name": "aiter", "repo_description": "AI Tensor Engine for ROCm", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/AMDResearch/intelliperf", "repo_name": "intelliperf", "repo_description": "Automated bottleneck detection and solution orchestration", "homepage_link": "", "repo_tags": [ "amd", "genai", "gpu", "hip", "instinct", "llm", "performance", "rocm" ] }, { "github_repo_link": "https://github.com/AMD-AGI/GEAK-agent", "repo_name": "GEAK-agent", "repo_description": "It is an LLM-based AI agent, which can write correct and efficient gpu kernels automatically.", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/AMD-AGI/torchtitan", "repo_name": "torchtitan", "repo_description": "A PyTorch native platform for training generative AI models", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/AMD-AGI/hipBLASLt", "repo_name": "hipBLASLt", "repo_description": "hipBLASLt is a library that provides general matrix-matrix operations with a flexible API and extends functionalities beyond a traditional BLAS library", "homepage_link": "https://rocm.docs.amd.com/projects/hipBLASLt/en/latest/index.html", "repo_tags": [] }, { "github_repo_link": "https://github.com/AMD-AGI/rocm-torchtitan", "repo_name": "rocm-torchtitan", "repo_description": null, "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/HazyResearch/Megakernels", "repo_name": "Megakernels", "repo_description": "kernels, of the mega variety", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/huggingface/kernels", "repo_name": "kernels", "repo_description": "Load compute kernels from the Hub", "homepage_link": "", "repo_tags": [], "category": "kernels" }, { "github_repo_link": "https://github.com/tile-ai/tilelang", "repo_name": "tilelang", "repo_description": " Domain-specific language designed to streamline the development of high-performance GPU/CPU/Accelerators kernels", "homepage_link": "https://tilelang.com/", "repo_tags": [], "category": "dsl" }, { "github_repo_link": "https://github.com/opencv/opencv", "repo_name": "opencv", "repo_description": "Open Source Computer Vision Library", "homepage_link": "https://opencv.org", "repo_tags": [ "c-plus-plus", "computer-vision", "deep-learning", "image-processing", "opencv" ] }, { "github_repo_link": "https://github.com/Lightning-AI/lightning-thunder", "repo_name": "lightning-thunder", "repo_description": "PyTorch compiler that accelerates training and inference. Get built-in optimizations for performance, memory, parallelism, and easily write your own.", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/tracel-ai/burn", "repo_name": "burn", "repo_description": "Burn is a next generation tensor library and Deep Learning Framework that doesn't compromise on flexibility, efficiency and portability.", "homepage_link": "https://burn.dev", "repo_tags": [ "autodiff", "cross-platform", "cuda", "deep-learning", "kernel-fusion", "machine-learning", "metal", "ndarray", "neural-network", "onnx", "pytorch", "rocm", "rust", "scientific-computing", "tensor", "vulkan", "wasm", "webgpu" ] }, { "github_repo_link": "https://github.com/huggingface/kernels-community", "repo_name": "kernels-community", "repo_description": "Kernel sources for https://huggingface.co/kernels-community", "homepage_link": null, "repo_tags": [], "category": "kernels" }, { "github_repo_link": "https://github.com/flashinfer-ai/flashinfer-bench", "repo_name": "flashinfer-bench", "repo_description": "Building the Virtuous Cycle for AI-driven LLM Systems", "homepage_link": "https://bench.flashinfer.ai", "repo_tags": [], "category": "benchmark" }, { "github_repo_link": "https://github.com/OSC/ondemand", "repo_name": "ondemand", "repo_description": "Supercomputing. Seamlessly. Open, Interactive HPC Via the Web", "homepage_link": "https://openondemand.org/", "repo_tags": [ "gateway", "hacktoberfest", "hpc", "hpc-applications" ] }, { "github_repo_link": "https://github.com/flashinfer-ai/flashinfer", "repo_name": "flashinfer", "repo_description": "FlashInfer: Kernel Library for LLM Serving", "homepage_link": "https://flashinfer.ai", "repo_tags": [ "attention", "cuda", "distributed-inference", "gpu", "jit", "large-large-models", "llm-inference", "moe", "nvidia", "pytorch" ] }, { "github_repo_link": "https://github.com/ScalingIntelligence/KernelBench", "repo_name": "KernelBench", "repo_description": "KernelBench: Can LLMs Write GPU Kernels? - Benchmark with Torch -> CUDA problems", "homepage_link": "https://scalingintelligence.stanford.edu/blogs/kernelbench/", "repo_tags": [ "benchmark", "codegen", "evaluation", "gpu" ], "category": "benchmark" }, { "github_repo_link": "https://github.com/thunlp/TritonBench", "repo_name": "TritonBench", "repo_description": "TritonBench: Benchmarking Large Language Model Capabilities for Generating Triton Operators", "homepage_link": "", "repo_tags": [], "category": "benchmark" }, { "github_repo_link": "https://github.com/AutomataLab/cuJSON", "repo_name": "cuJSON", "repo_description": "cuJSON: A Highly Parallel JSON Parser for GPUs", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/Netflix/metaflow", "repo_name": "metaflow", "repo_description": "Build, Manage and Deploy AI/ML Systems", "homepage_link": "https://metaflow.org", "repo_tags": [ "agents", "ai", "aws", "azure", "cost-optimization", "datascience", "distributed-training", "gcp", "generative-ai", "high-performance-computing", "kubernetes", "llm", "llmops", "machine-learning", "ml", "ml-infrastructure", "ml-platform", "mlops", "model-management", "python" ] }, { "github_repo_link": "https://github.com/harmonic-ai/IMO2025", "repo_name": "IMO2025", "repo_description": null, "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/leanprover/lean4", "repo_name": "lean4", "repo_description": "Lean 4 programming language and theorem prover", "homepage_link": "https://lean-lang.org", "repo_tags": [ "lean", "lean4" ] }, { "github_repo_link": "https://github.com/NVIDIA/warp", "repo_name": "warp", "repo_description": "A Python framework for accelerated simulation, data generation and spatial computing.", "homepage_link": "https://nvidia.github.io/warp/", "repo_tags": [ "cuda", "differentiable-programming", "gpu", "gpu-acceleration", "nvidia", "nvidia-warp", "python" ] }, { "github_repo_link": "https://github.com/NVIDIA/cuda-python", "repo_name": "cuda-python", "repo_description": "CUDA Python: Performance meets Productivity", "homepage_link": "https://nvidia.github.io/cuda-python/", "repo_tags": [] }, { "github_repo_link": "https://github.com/basetenlabs/truss", "repo_name": "truss", "repo_description": "The simplest way to serve AI/ML models in production", "homepage_link": "https://truss.baseten.co", "repo_tags": [ "artificial-intelligence", "easy-to-use", "falcon", "inference-api", "inference-server", "machine-learning", "model-serving", "open-source", "packaging", "stable-diffusion", "whisper", "wizardlm" ] }, { "github_repo_link": "https://github.com/laude-institute/terminal-bench", "repo_name": "terminal-bench", "repo_description": "A benchmark for LLMs on complicated tasks in the terminal", "homepage_link": "https://www.tbench.ai", "repo_tags": [], "category": "benchmark" }, { "github_repo_link": "https://github.com/block/goose", "repo_name": "goose", "repo_description": "an open source, extensible AI agent that goes beyond code suggestions - install, execute, edit, and test with any LLM", "homepage_link": "https://block.github.io/goose/", "repo_tags": [ "hacktoberfest", "mcp" ], "category": "agent" }, { "github_repo_link": "https://github.com/kvcache-ai/Mooncake", "repo_name": "Mooncake", "repo_description": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.", "homepage_link": "https://kvcache-ai.github.io/Mooncake/", "repo_tags": [ "disaggregation", "inference", "kvcache", "llm", "rdma", "sglang", "vllm" ] }, { "github_repo_link": "https://github.com/SWE-bench/SWE-bench", "repo_name": "SWE-bench", "repo_description": "SWE-bench: Can Language Models Resolve Real-world Github Issues?", "homepage_link": "https://www.swebench.com", "repo_tags": [ "benchmark", "language-model", "software-engineering" ], "category": "benchmark" }, { "github_repo_link": "https://github.com/Dao-AILab/quack", "repo_name": "quack", "repo_description": "A Quirky Assortment of CuTe Kernels", "homepage_link": "", "repo_tags": [], "category": "kernels" }, { "github_repo_link": "https://github.com/KhronosGroup/SYCL-Docs", "repo_name": "SYCL-Docs", "repo_description": "SYCL Open Source Specification", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/triSYCL/triSYCL", "repo_name": "triSYCL", "repo_description": " Generic system-wide modern C++ for heterogeneous platforms with SYCL from Khronos Group", "homepage_link": "", "repo_tags": [ "cpp", "cpp20", "fpga", "gpu-computing", "heterogeneous-parallel-programming", "opencl", "spir", "sycl", "trisycl" ] }, { "github_repo_link": "https://github.com/pybind/pybind11", "repo_name": "pybind11", "repo_description": "Seamless operability between C++11 and Python", "homepage_link": "https://pybind11.readthedocs.io/", "repo_tags": [ "bindings", "python" ] }, { "github_repo_link": "https://github.com/andreinechaev/nvcc4jupyter", "repo_name": "nvcc4jupyter", "repo_description": "A plugin for Jupyter Notebook to run CUDA C/C++ code", "homepage_link": null, "repo_tags": [], "category": "compiler" }, { "github_repo_link": "https://github.com/ROCm/rocSOLVER", "repo_name": "rocSOLVER", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [ "lapack", "linear-algebra", "rocm" ] }, { "github_repo_link": "https://github.com/ROCm/Tensile", "repo_name": "Tensile", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [ "amd", "assembly", "auto-tuning", "blas", "dnn", "gemm", "gpu", "gpu-acceleration", "gpu-computing", "hip", "machine-learning", "matrix-multiplication", "neural-networks", "opencl", "python", "radeon", "tensor-contraction", "tensors" ] }, { "github_repo_link": "https://github.com/ROCm/rocPRIM", "repo_name": "rocPRIM", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo ", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [ "amd", "cuda", "gpu", "hip", "parallel", "primitive", "rocm" ] }, { "github_repo_link": "https://github.com/ROCm/hipCUB", "repo_name": "hipCUB", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo ", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [] }, { "github_repo_link": "https://github.com/ROCm/rocFFT", "repo_name": "rocFFT", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [ "amd", "fast", "fft", "fourier", "gpu", "hip", "rocm", "transform" ] }, { "github_repo_link": "https://github.com/ROCm/rocSPARSE", "repo_name": "rocSPARSE", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [] }, { "github_repo_link": "https://github.com/ROCm/rocRAND", "repo_name": "rocRAND", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo ", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [ "cuda", "gpu", "hip", "random", "rng", "rocm" ] }, { "github_repo_link": "https://github.com/ROCm/MIOpen", "repo_name": "MIOpen", "repo_description": "[DEPRECATED] Moved to ROCm/rocm-libraries repo", "homepage_link": "https://github.com/ROCm/rocm-libraries", "repo_tags": [] }, { "github_repo_link": "https://github.com/Reference-LAPACK/lapack", "repo_name": "lapack", "repo_description": "LAPACK development repository", "homepage_link": "", "repo_tags": [ "blas", "eigenvalues", "eigenvectors", "lapack", "lapacke", "linear-algebra", "linear-equations", "matrix-factorization", "singular-values", "svd" ] }, { "github_repo_link": "https://github.com/ccache/ccache", "repo_name": "ccache", "repo_description": "ccache – a fast compiler cache", "homepage_link": "https://ccache.dev", "repo_tags": [ "c", "c-plus-plus", "cache", "ccache", "clang", "compiler", "cplusplus", "cpp", "gcc", "msvc" ], "category": "compiler" }, { "github_repo_link": "https://github.com/ROCm/omnitrace", "repo_name": "omnitrace", "repo_description": "Omnitrace: Application Profiling, Tracing, and Analysis", "homepage_link": "https://rocm.docs.amd.com/projects/omnitrace/en/docs-6.2.4/", "repo_tags": [ "binary-instrumentation", "code-coverage", "cpu-profiler", "dynamic-instrumentation", "gpu-profiler", "hardware-counters", "instrumentation-profiler", "linux", "performance-analysis", "performance-metrics", "performance-monitoring", "profiler", "profiling", "python", "python-profiler", "sampling-profiler", "tracing" ] }, { "github_repo_link": "https://github.com/KhronosGroup/OpenCL-SDK", "repo_name": "OpenCL-SDK", "repo_description": "OpenCL SDK", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/meta-llama/synthetic-data-kit", "repo_name": "synthetic-data-kit", "repo_description": "Tool for generating high quality Synthetic datasets", "homepage_link": "https://pypi.org/project/synthetic-data-kit/", "repo_tags": [ "data", "generation", "llm", "python", "synthetic" ] }, { "github_repo_link": "https://github.com/unslothai/unsloth", "repo_name": "unsloth", "repo_description": "Fine-tuning & Reinforcement Learning for LLMs. 🦥 Train OpenAI gpt-oss, DeepSeek-R1, Qwen3, Gemma 3, TTS 2x faster with 70% less VRAM.", "homepage_link": "https://docs.unsloth.ai/", "repo_tags": [ "agent", "deepseek", "deepseek-r1", "fine-tuning", "gemma", "gemma3", "gpt-oss", "llama", "llama3", "llm", "llms", "mistral", "openai", "qwen", "qwen3", "reinforcement-learning", "text-to-speech", "tts", "unsloth", "voice-cloning" ] }, { "github_repo_link": "https://github.com/KhronosGroup/Vulkan-Docs", "repo_name": "Vulkan-Docs", "repo_description": "The Vulkan API Specification and related tools", "homepage_link": null, "repo_tags": [] }, { "github_repo_link": "https://github.com/tensorflow/tflite-micro", "repo_name": "tflite-micro", "repo_description": "Infrastructure to enable deployment of ML models to low-power resource-constrained embedded targets (including microcontrollers and digital signal processors).", "homepage_link": "", "repo_tags": [] }, { "github_repo_link": "https://github.com/Wan-Video/Wan2.2", "repo_name": "Wan2.2", "repo_description": "Wan: Open and Advanced Large-Scale Video Generative Models", "homepage_link": "https://wan.video", "repo_tags": [ "aigc", "video-generation" ] }, { "github_repo_link": "https://github.com/AMD-AGI/Primus-Turbo", "repo_name": "Primus-Turbo", "repo_description": null, "homepage_link": null, "repo_tags": [] } ]