diff --git a/analysis/current/analysis-report-hybrid.json b/analysis/current/analysis-report-hybrid.json index 784c9ae85e63ea460b626fcf45b8a6ad857ec99a..69e932252beb6c7eb7d0a4c156c222110fceffa4 100644 --- a/analysis/current/analysis-report-hybrid.json +++ b/analysis/current/analysis-report-hybrid.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260430T120024Z", - "generated_at": "2026-04-30T12:09:47Z", + "snapshot_id": "20260501T113108Z", + "generated_at": "2026-05-01T11:39:25Z", "evidence_quality": "full", "llm_enrichment": true, "meta_bugs": [ @@ -1148,6 +1148,204 @@ } ] }, + { + "cluster_id": "cluster-43656-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43656, + "issue_numbers": [ + 43824 + ], + "pr_numbers": [ + 43656, + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43656, + "right_pr_number": 43836, + "code_similarity": 0.36, + "size_similarity": 0.176, + "file_overlap": 1.0, + "area_overlap": 0.051, + "patch_similarity": 0.048, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ] + } + ] + }, + { + "left_pr_number": 43656, + "right_pr_number": 43842, + "code_similarity": 0.405, + "size_similarity": 0.5, + "file_overlap": 1.0, + "area_overlap": 0.0, + "patch_similarity": 0.036, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + }, + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, { "cluster_id": "cluster-43240-3", "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", @@ -1222,7 +1420,7 @@ "code_similarity": 0.805, "size_similarity": 0.885, "file_overlap": 0.75, - "area_overlap": 0.856, + "area_overlap": 0.854, "patch_similarity": 0.693, "shared_filenames": [ "docs/source/en/_toctree.yml", @@ -1288,8 +1486,8 @@ 705 ], [ - 1300, - 1306 + 1301, + 1307 ] ] }, @@ -1372,76 +1570,6 @@ } ] }, - { - "cluster_id": "cluster-43824-3", - "summary": "Cluster of 1 issues and 2 PRs centered on issue #43824.", - "status": "open", - "confidence": 0.75, - "canonical_issue_number": 43824, - "canonical_pr_number": 43836, - "issue_numbers": [ - 43824 - ], - "pr_numbers": [ - 43836, - 43842 - ], - "evidence_types": [ - "closing_reference", - "shared_issue_target" - ], - "pr_comparisons": [ - { - "left_pr_number": 43836, - "right_pr_number": 43842, - "code_similarity": 0.332, - "size_similarity": 0.088, - "file_overlap": 1.0, - "area_overlap": 0.033, - "patch_similarity": 0.017, - "shared_filenames": [ - "src/transformers/cli/serve.py" - ], - "shared_file_areas": [ - { - "filename": "src/transformers/cli/serve.py", - "left_ranges": [ - [ - 11, - 18 - ], - [ - 359, - 429 - ], - [ - 584, - 590 - ], - [ - 1892, - 1910 - ], - [ - 1917, - 1923 - ] - ], - "right_ranges": [ - [ - 54, - 61 - ], - [ - 587, - 593 - ] - ] - } - ] - } - ] - }, { "cluster_id": "cluster-45081-3", "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", @@ -1939,6 +2067,16 @@ "target_issue_number": 43240, "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." }, + { + "cluster_id": "cluster-43656-4", + "canonical_pr_number": 43656, + "duplicate_pr_numbers": [ + 43836, + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-43656-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, { "cluster_id": "cluster-43698-3", "canonical_pr_number": 43779, @@ -1948,15 +2086,6 @@ "target_issue_number": 43698, "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." }, - { - "cluster_id": "cluster-43824-3", - "canonical_pr_number": 43836, - "duplicate_pr_numbers": [ - 43842 - ], - "target_issue_number": 43824, - "reason": "PRs in cluster-43824-3 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target evidence." - }, { "cluster_id": "cluster-43979-11", "canonical_pr_number": 44007, diff --git a/analysis/current/analysis-report-hybrid.llm-reviews.json b/analysis/current/analysis-report-hybrid.llm-reviews.json index 9ad22a77d0a4776c7db6269a04870048a6def59c..0cc1643dbac0f74eb3c76a621b875ca9b1710b0a 100644 --- a/analysis/current/analysis-report-hybrid.llm-reviews.json +++ b/analysis/current/analysis-report-hybrid.llm-reviews.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260430T120024Z", - "generated_at": "2026-04-30T12:09:47Z", + "snapshot_id": "20260501T113108Z", + "generated_at": "2026-05-01T11:39:25Z", "model": "gpt-5.4-mini?service_tier=flex", "reviews": [ { @@ -26,7 +26,7 @@ "issue:13244|issue:16618", "issue:15980|issue:16618" ], - "prepared_review_unit_hash": "aaa9f2a701374a0841e1a6c6df27038619146e8dd77b6d7067c5ba9f52f97ada", + "prepared_review_unit_hash": "b2b4497bbd41fdc39c6dab4d9500b9944ca443294a6d925e643ce74d380dfdb1", "component_budget": { "node_count": 3, "item_count": 3, @@ -48,24 +48,24 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These three issues do not look like a duplicate cluster. Two are model-loading problems, but one is specifically about private-model authentication/error handling and the other is about a generic failure loading a public BERT model; the Tapas tokenization issue is unrelated.", - "confidence": 0.71, - "canonical_issue_reason": "Issue #16618 is the best representative of the cluster because it has the broadest model-loading framing and the most discussion activity, so it best captures the central user-visible failure among the candidates.", + "summary": "The three issues are not true duplicates: one is about Tapas tokenization parity, one about a private-model auth/download error message, and one about failing to load bert-base-uncased. The only plausible commonality is model-loading/download friction, but the concrete failures differ.", + "confidence": 0.83, + "canonical_issue_reason": "Issue #16618 is the broadest and most central model-loading failure in this set, with the most discussion, so it is the best representative if one issue must anchor the cluster.", "canonical_pr_reason": null, - "best_issue_reason": "#16618 is the strongest overall issue to keep as the cluster anchor: it is the most general, appears most impactful, and has the highest engagement. #15980 is narrower and likely an authentication/error-message variant, while #13244 is unrelated.", + "best_issue_reason": "#16618 best captures the cluster\u2019s general theme of model loading problems and is the most likely target for cross-references or triage, even though it is not a duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:13244", "right": "issue:16618", "accept": false, - "reason": "Different problems: Tapas tokenization mismatching TensorFlow code is a tokenizer/parity issue, while #16618 is about failing to load bert-base-uncased. They do not describe the same underlying bug." + "reason": "Tapas tokenization parity with TensorFlow is a different code-path problem from failing to load bert-base-uncased; same library area, but not the same bug." }, { "left": "issue:15980", "right": "issue:16618", "accept": false, - "reason": "Both involve model download/loading, but #15980 is specifically about the error when downloading a private model while logged out, whereas #16618 concerns loading bert-base-uncased. That is not clearly the same concrete bug." + "reason": "One is a bad error message when downloading a private model without login, the other is a model load failure for a specific public model; related subsystem, but not the same underlying issue." } ] }, @@ -180,8 +180,8 @@ "issue:41762|issue:43638", "issue:43039|issue:44918", "issue:43827|issue:44512", - "issue:43828|issue:45685", "issue:43716|issue:44560", + "issue:43828|issue:45685", "issue:44918|issue:45685", "issue:43039|issue:43828", "issue:30277|issue:36331", @@ -206,350 +206,386 @@ "issue:44841|issue:45084", "issue:41093|issue:41762" ], - "prepared_review_unit_hash": "9a021d0059cbcd75e44d1d88d77197beffc5100c63f598422d63ba96531571b6", + "prepared_review_unit_hash": "58b88e8edbc20920314b52a9d588e47a1c6da9ebae85ea7ad4c827ee5186d42f", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 67, - "serialized_chars": 47108, - "estimated_input_tokens": 11777, - "estimated_eval_tokens": 23810 + "serialized_chars": 47117, + "estimated_input_tokens": 11780, + "estimated_eval_tokens": 23816 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a heterogeneous cluster of unrelated model/runtime/docs issues. There is one close BERT/DeepSpeed ZeRO-3 subgroup, but the 43596/43638 pair is similar rather than clearly identical, so it should stay unmerged under a conservative duplicate policy.", - "confidence": 0.87, - "canonical_issue_reason": "Issue 43596 is the clearest representative of the BERT + DeepSpeed ZeRO-3 index-out-of-bounds subgroup, but the nearest match (43638) is not safe to collapse into the same bug because it describes a similar symptom in a non-pretrained Bert model rather than the exact same setup.", + "summary": "Mostly unrelated issue set; the only clear duplicate edge is the ZeRO-3/BertModel index-0 failure pair. Most other high-similarity pairs are superficial symptom matches across different models, backends, or code paths.", + "confidence": 0.82, + "canonical_issue_reason": "issue:43596 is the clearest representative of the shared ZeRO-3/BertModel empty-index failure; issue:43638 appears to be the same bug with a slightly different repro.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43596 is the best representative issue for this cluster\u2019s only closely related subtopic: it is specific, concrete, and more central than the surrounding unrelated reports.", + "best_issue_reason": "issue:43596 is the best canonical issue for this set because it most directly captures the recurring ZeRO-3/BertModel index-0 bug and is the cleanest duplicate anchor.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44322", "right": "issue:45464", "accept": false, - "reason": "Both involve Qwen3.5, but they hit different paths: missing config attribute vs streaming chat/completions failure." + "reason": "Different Qwen3.5 failures: missing config attribute vs streaming chat/completions error." }, { "left": "issue:45237", "right": "issue:45685", "accept": false, - "reason": "Different platforms and failures: AMD GPU runtime issue vs MPS histogram not implemented." + "reason": "Unrelated backend issues: AMD GPU failure vs MPS Int histogram op missing." }, { "left": "issue:43716", "right": "issue:45237", "accept": false, - "reason": "Mistral-3 dtype mismatch and GPT-OSS AMD GPU failure are unrelated model/backend bugs." + "reason": "Different models and failure modes; dtype mismatch vs AMD GPU runtime failure." }, { "left": "issue:43828", "right": "issue:45237", "accept": false, - "reason": "Autocast dtype mismatch on Phi-tiny-MoE is not the same bug as GPT-OSS on AMD GPUs." + "reason": "Autocast dtype mismatch is a different bug than the AMD GPU startup failure." }, { "left": "issue:45237", "right": "issue:45507", "accept": false, - "reason": "GPT-OSS AMD GPU failure and GraniteMoEHybrid invalid method are different code-path problems." + "reason": "Different code paths: GPU/backend crash vs invalid method call." }, { "left": "issue:44292", "right": "issue:45237", "accept": false, - "reason": "Qwen-3-8B NVFP4 runtime error is a different backend/model issue from GPT-OSS AMD GPU failure." + "reason": "Different model families and distinct runtime errors." }, { "left": "issue:30277", "right": "issue:35545", "accept": false, - "reason": "DeepSpeed collective mismatch during Jamba training is unrelated to ModernBERT ONNX export failure." + "reason": "DeepSpeed collective mismatch and ONNX export are unrelated." }, { "left": "issue:35545", "right": "issue:42915", "accept": false, - "reason": "ModernBERT ONNX export and Qwen3Moe FP8 failure are different model/export paths." + "reason": "ONNX export error vs FP8 config load failure; different bugs." }, { "left": "issue:35545", "right": "issue:41720", "accept": false, - "reason": "ModernBERT export error and Qwen3 auto device-map cuda assert are unrelated." + "reason": "Different problem areas: export tooling vs auto device-mapping assert." }, { "left": "issue:44509", "right": "issue:44512", "accept": false, - "reason": "Both are v5 docs regressions, but they concern different removed items and fixes." + "reason": "Both are docs regressions, but they refer to different removed v5 references." }, { "left": "issue:35545", "right": "issue:36010", "accept": false, - "reason": "ONNX export failure and GenerationMixin import error are different issues." + "reason": "ONNX export issue is unrelated to GenerationMixin import failure." }, { "left": "issue:43638", "right": "issue:44805", "accept": false, - "reason": "Both are shape/index-style errors, but the reported BERT ZeRO-3 failure and the mask-shape mismatch do not clearly match." + "reason": "Same broad index-error theme, but the specific repros and code paths differ too much." }, { "left": "issue:43596", "right": "issue:44805", "accept": false, - "reason": "Same broad area, but the mask-shape mismatch is not clearly the same as the BERT ZeRO-3 index-0-out-of-bounds bug." + "reason": "ZeRO-3/Bert empty-index failure vs a generic mask/tensor shape mismatch; not clearly the same bug." }, { "left": "issue:44805", "right": "issue:45161", "accept": false, - "reason": "Shape/mask indexing error and GPT-OSS tensor-parallel failure are unrelated." + "reason": "Mask/index shape error is unrelated to GPT-OSS tensor-parallel failure." }, { "left": "issue:44805", "right": "issue:44918", "accept": false, - "reason": "Different models and contexts: mask/tensor shape mismatch vs Qwen3.5 embedding unpacking with TRL." + "reason": "Different training/runtime paths and different underlying failures." }, { "left": "issue:44661", "right": "issue:44805", "accept": false, - "reason": "Tokenizer mapping-name bug is unrelated to a tensor/mask indexing error." + "reason": "Tokenizer-mapping bug is unrelated to the index/mask error." }, { "left": "issue:34634", "right": "issue:35545", "accept": false, - "reason": "BarkProcessor voice_preset behavior and ModernBERT ONNX export are unrelated." + "reason": "BarkProcessor voice preset handling is unrelated to ModernBERT ONNX export." }, { "left": "issue:30277", "right": "issue:43638", "accept": false, - "reason": "Different DeepSpeed problems: collective mismatch for Jamba vs ZeRO-3 BERT loading failure." + "reason": "Both involve DeepSpeed, but one is collective mismatch and the other is a Bert empty-index crash." }, { "left": "issue:43596", "right": "issue:43638", - "accept": false, - "reason": "Highly similar ZeRO-3/BERT index-0 failures, but one is BertModel and the other is a non-pretrained Bert model; same symptom, not clearly the same underlying bug." + "accept": true, + "reason": "Same ZeRO-3/BertModel index-0 empty-tensor failure; 43638 is effectively a rephrased report of the same bug." }, { "left": "issue:43975", "right": "issue:44457", "accept": false, - "reason": "Tokenizer detokenization bug and LoRA save/reload mismatch are distinct issues." + "reason": "Detokenization bug and LoRA save/load mismatch are different issues." }, { "left": "issue:43039", "right": "issue:45161", "accept": false, - "reason": "Liger Kernel cross-entropy invocation and GPT-OSS tensor-parallel failure are unrelated." + "reason": "Liger kernel cross_entropy path is unrelated to GPT-OSS tensor-parallel failure." }, { "left": "issue:44292", "right": "issue:44457", "accept": false, - "reason": "Qwen NVFP4 runtime error and LoRA merge/save inconsistency are different problems." + "reason": "Qwen-3 NVFP4 runtime error and LoRA merge/load inconsistency are unrelated." }, { "left": "issue:34928", "right": "issue:44805", "accept": false, - "reason": "Activation-checkpointing/FSDP recompute mismatch is a different training-stack bug than the BERT index/mask errors." + "reason": "Activation checkpointing/FSDP recompute mismatch is a different failure than the generic mask/index error." }, { "left": "issue:24643", "right": "issue:30277", "accept": false, - "reason": "Both involve DeepSpeed, but a 2-D weight RuntimeError is not the same as a collective mismatch." + "reason": "Both are DeepSpeed-related, but the underlying runtime errors and repros differ." }, { "left": "issue:43638", "right": "issue:45161", "accept": false, - "reason": "ZeRO-3 BERT load failure and GPT-OSS tensor-parallel failure are unrelated." + "reason": "ZeRO-3 Bert index error is unrelated to GPT-OSS MoE tensor-parallel failure." }, { "left": "issue:34928", "right": "issue:36331", "accept": false, - "reason": "FSDP checkpointing shape mismatch and CustomTrainer signature breakage are unrelated." + "reason": "FSDP activation-checkpointing issue is unrelated to the trainer signature change." }, { "left": "issue:43596", "right": "issue:45161", "accept": false, - "reason": "BERT ZeRO-3 init failure and GPT-OSS tensor-parallel issue are different models and paths." + "reason": "Different model families and unrelated failure modes." }, { "left": "issue:43716", "right": "issue:45507", "accept": false, - "reason": "Mistral-3 preprocessing dtype mismatch and GraniteMoEHybrid invalid method are unrelated." + "reason": "Different models and distinct bugs: dtype mismatch vs invalid method call." }, { "left": "issue:39290", "right": "issue:45081", "accept": false, - "reason": "Gemma3 sliding-window attribute error and Mistral tokenizer regex crash are different bugs." + "reason": "Sliding-window config issue in vLLM is unrelated to the tokenizer regex patch crash." }, { "left": "issue:43828", "right": "issue:45161", "accept": false, - "reason": "Phi-tiny-MoE autocast dtype mismatch is unrelated to GPT-OSS tensor-parallel behavior." + "reason": "Autocast dtype mismatch is not the same as GPT-OSS TP failure." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "TRL SFT embedding unpacking error is unrelated to GPT-OSS tensor-parallel failure." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "FSDP recompute mismatch and Qwen3 auto device-map cuda assert are different bugs." + }, + { + "left": "issue:43716", + "right": "issue:45685", + "accept": false, + "reason": "Different backends and different failures: dtype mismatch vs MPS unsupported op." + }, + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Different model families and code paths; tracing grouped_mm vs image-preprocessor dtype mismatch." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "Both are Qwen/Phi-family runtime errors, but the concrete bugs are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44918", + "accept": false, + "reason": "Liger kernel cross_entropy call and TRL embedding unpacking are unrelated." }, { "left": "issue:30277", "right": "issue:36331", "accept": false, - "reason": "DeepSpeed collective mismatch and trainer compute_loss API break are different layers and failures." + "reason": "DeepSpeed collective mismatch vs trainer API signature change are unrelated." }, { "left": "issue:43039", "right": "issue:44805", "accept": false, - "reason": "Liger Kernel cross-entropy call issue is unrelated to the tensor/mask indexing errors." + "reason": "Liger kernel behavior has no clear relation to the mask/index mismatch." }, { "left": "issue:34928", "right": "issue:36010", "accept": false, - "reason": "FSDP checkpointing mismatch and GenerationMixin import error are unrelated." + "reason": "FSDP checkpointing issue is unrelated to GenerationMixin import failure." }, { "left": "issue:41762", "right": "issue:43596", "accept": false, - "reason": "Gemma3 ZeRO-3 loading issue and BERT ZeRO-3 index failure are only broadly similar, not the same bug." + "reason": "Both are ZeRO-3/index errors, but they involve different models and are not the same concrete bug." }, { "left": "issue:43901", "right": "issue:44512", "accept": false, - "reason": "Documentation around return_all_scores and documentation about transformers run are separate regressions." + "reason": "Different docs regressions: return_all_scores wording vs removed run command." }, { "left": "issue:39290", "right": "issue:41720", "accept": false, - "reason": "Gemma3 sliding-window attribute error and Qwen3 auto device-map cuda assert are different model-specific failures." + "reason": "Gemma3 sliding_window_pattern and Qwen3 auto device-map cuda assert are unrelated." }, { "left": "issue:43866", "right": "issue:44863", "accept": false, - "reason": "Checkpoint corruption and NemotronH loading failure are not clearly the same underlying issue." + "reason": "Checkpoint corruption is not the same as an implementation failing to load checkpoints." }, { "left": "issue:43039", "right": "issue:44560", "accept": false, - "reason": "Liger Kernel cross-entropy call and Qwen3-vl video StopIteration are unrelated." + "reason": "Liger kernel cross_entropy issue and Qwen3-vl video StopIteration are unrelated." }, { "left": "issue:43854", "right": "issue:43866", "accept": false, - "reason": "Unit-test model-loading failure and corrupted checkpoint report are different problem classes." + "reason": "Unit-test loading failure and corrupted checkpoint are different bugs." }, { "left": "issue:41093", "right": "issue:43638", "accept": false, - "reason": "Both are indexing/shape errors, but the specific mask mismatch and BERT ZeRO-3 failure do not match." + "reason": "Different index/mask shape errors and different repro contexts." }, { "left": "issue:39290", "right": "issue:43531", "accept": false, - "reason": "Gemma3 sliding-window attribute error and Qwen3-MoE sliding_window issue are different model-specific config bugs." + "reason": "Both mention sliding_window, but the model families and concrete failures differ." }, { "left": "issue:43425", "right": "issue:45070", "accept": false, - "reason": "Torch version incompatibility and pydantic PretrainedConfig breakage are unrelated." + "reason": "Torch-version incompatibility and pydantic config-field breakage are unrelated." }, { "left": "issue:45362", "right": "issue:45464", "accept": false, - "reason": "Qwen3.5 chat crash and Qwen3.5 streaming API failure are different code paths and symptoms." + "reason": "Both involve Qwen3.5 chat, but one is a crash in transformers chat and the other is a streaming API failure." }, { "left": "issue:43404", "right": "issue:43866", "accept": false, - "reason": "Mistral3 lm_head tie bug and Ovis2 checkpoint corruption are unrelated." + "reason": "Mistral3 weight tying and Ovis2 checkpoint corruption are unrelated." }, { "left": "issue:43792", "right": "issue:45070", "accept": false, - "reason": "Whisper runtime failure and pydantic PretrainedConfig regression are unrelated." + "reason": "Whisper loading failure is unrelated to the pydantic PretrainedConfig regression." }, { "left": "issue:45464", "right": "issue:45685", "accept": false, - "reason": "Qwen3.5 streaming inference failure and MPS histogram error are unrelated." + "reason": "Streaming inference on Qwen3.5 and an MPS Int op limitation are unrelated." }, { "left": "issue:43541", "right": "issue:45685", "accept": false, - "reason": "Mixtral grouped_mm tracing failure and MPS histogram implementation error are different bugs." + "reason": "Torch dynamo tracing error and MPS histogram support issue are unrelated." }, { "left": "issue:43541", "right": "issue:43828", "accept": false, - "reason": "Both are MoE/dtype-adjacent, but grouped_mm tracing and autocast mismatch are not the same code-path problem." + "reason": "Different runtime failures on different models; grouped_mm tracing vs autocast dtype mismatch." }, { "left": "issue:36010", "right": "issue:39290", "accept": false, - "reason": "GenerationMixin import failure and Gemma3 sliding-window attribute error are unrelated." + "reason": "GenerationMixin import failure is unrelated to the Gemma3 sliding_window_pattern regression." }, { "left": "issue:44841", "right": "issue:45084", "accept": false, - "reason": "Voxtral processor failure and non-template-node compilation error are unrelated." + "reason": "Processor failure for Voxtral is unrelated to the template compilation error." }, { "left": "issue:41093", "right": "issue:41762", "accept": false, - "reason": "Both are shape/index errors, but they involve different models and different failure contexts." + "reason": "Both are index errors, but they arise in different model/loading paths and are not the same bug." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The cluster summary is broadly grounded, but it overstates the only positive soft-edge: 43596 vs 43638 is similar, yet the packet marks this pair as not deterministically accept-able and the two reports are not fully identical (BertModel vs non-pretrained Bert model). Given the instruction to stay conservative on soft edges, that acceptance is too strong." + "accept": true, + "feedback": "The summary is well grounded in the packet and stays conservative. The canonical choice of issue:43596 over issue:43638 is supported by the near-duplicate DeepSpeed ZeRO-3/Bert index-0 titles, and the rest of the soft-edge rejections are cautious given the clearly different models/backends/code paths in the packet. I did not see any overstated duplicate claims." }, "evaluator_used": true, - "retried": true, - "accepted_nontrivial_soft_edge": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -630,8 +666,8 @@ "issue:43541|issue:44589", "issue:40990|issue:44960", "issue:45632|issue:45698", - "issue:39186|issue:44918", "issue:43716|issue:44918", + "issue:39186|issue:44918", "issue:43404|issue:45005", "issue:43645|issue:45399", "issue:44530|issue:45005", @@ -646,24 +682,24 @@ "issue:43644|issue:43950", "issue:44292|issue:45507", "issue:39692|issue:43054", - "issue:41553|issue:43883", "issue:43828|issue:44928", - "issue:43866|issue:44403" + "issue:43866|issue:44403", + "issue:41553|issue:43883" ], - "prepared_review_unit_hash": "732f6969b04036ee16fb5213ebfa7e5d7f597aab5c6cfe4ca1ae04b3e9c5f3cd", + "prepared_review_unit_hash": "fde77ef4f8b30eb45cf3dcdbe180c100d5b5d6768139f94378d616a0b888d7e4", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 33, - "serialized_chars": 37183, + "serialized_chars": 37182, "estimated_input_tokens": 9296, "estimated_eval_tokens": 18848 }, @@ -672,219 +708,219 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mixed, but a few duplicate families are solid. The strongest one is the non-persistent-buffer regression around v5 loading, while the SigLIP2 and custom-module cache/loading pairs are too weakly supported to merge on this packet alone.", - "confidence": 0.79, - "canonical_issue_reason": "43950 is the best canonical issue: it states the non-persistent-buffer regression clearly and is the most actionable description of that bug family, with 43644 and 44534 looking like close duplicate reports.", + "summary": "Mostly a grab-bag of unrelated Transformers bug reports; the only clear duplicate cluster is the 5.x non-persistent buffer corruption regression.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43950 is the clearest root report for the non-persistent-buffer regression, and it matches the duplicate phrasing in issue:43644 and issue:44534.", "canonical_pr_reason": null, - "best_issue_reason": "43950 is the best issue overall because it is the clearest, highest-signal report of a concrete regression and covers the strongest duplicate family in the set.", + "best_issue_reason": "issue:43950 is the best representative issue because it names the concrete 5.x regression most directly and is the most likely anchor for duplicate consolidation.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43296", "right": "issue:43366", "accept": false, - "reason": "Both are model-loading/support problems, but they involve different architectures and failures; not the same concrete bug." + "reason": "Different problems: vLLM/PaddleOCR-VL load failure vs GGUF architecture support request." }, { "left": "issue:43572", "right": "issue:45070", "accept": false, - "reason": "Both are v5 config regressions, but the missing StableLmConfig field and the pydantic PretrainedConfig break are different bugs." + "reason": "Unrelated config regression vs pydantic-model field breakage." }, { "left": "issue:44387", "right": "issue:45005", "accept": false, - "reason": "OOM under int4 quantization and tied-weights regressions are unrelated failure modes." + "reason": "Different failure modes: int4 OOM vs tied-weights handling on translation models." }, { "left": "issue:43541", "right": "issue:45161", "accept": false, - "reason": "Different concrete problems: torch-dynamo grouped_mm tracing versus GPT-OSS tensor-parallel support." + "reason": "Torch-dynamo grouped_mm tracing error is not the same as GPT-OSS tensor-parallel support." }, { "left": "issue:43828", "right": "issue:45464", "accept": false, - "reason": "Different model families and different code paths: autocast dtype mismatch versus streaming chat/completions failure." + "reason": "Both mention dtype/runtime issues, but the models and code paths differ completely." }, { "left": "issue:44291", "right": "issue:45070", "accept": false, - "reason": "Unexpected _is_hf_initialized during init_empty_weights is not the same bug as the pydantic PretrainedConfig regression." + "reason": "init_empty_weights constructor-arg breakage is separate from the PretrainedConfig/pydantic field issue." }, { "left": "issue:43366", "right": "issue:45464", "accept": false, - "reason": "GGUF architecture support and Qwen3.5 streaming inference are separate issues." + "reason": "Architecture support for GPT-OSS GGUF is unrelated to Qwen3.5 streaming inference failures." }, { "left": "issue:43531", "right": "issue:45070", "accept": false, - "reason": "Qwen3-MoE sliding-window handling is unrelated to the PretrainedConfig/pydantic break." + "reason": "Qwen3-MoE sliding_window bug is not the same as the pydantic PretrainedConfig regression." }, { "left": "issue:39290", "right": "issue:43742", "accept": false, - "reason": "Gemma3 sliding_window_pattern/vLLM and MobileLLM key errors are different loading bugs." + "reason": "Gemma3TextConfig sliding_window_pattern error and MobileLLM key error are distinct model-loading bugs." }, { "left": "issue:43054", "right": "issue:43994", "accept": false, - "reason": "This is only the same model family (SigLIP2); the packet does not show the same concrete underlying bug." + "reason": "Both concern SigLIP2 quality, but one is embedding degradation and the other is bad AutoModel/pipeline outputs." }, { "left": "issue:41093", "right": "issue:43596", "accept": false, - "reason": "Different shape-related errors in different contexts; no clear shared root cause." + "reason": "Mask-shape mismatch and zero3 index error are different tensor/indexing failures." }, { "left": "issue:43541", "right": "issue:44589", "accept": false, - "reason": "Grouped_mm tracing failure and missing Float8 storage are unrelated." + "reason": "Different concrete bugs: torch tracing grouped_mm vs missing Float8 storage type." }, { "left": "issue:40990", "right": "issue:44960", "accept": false, - "reason": "Different models and different symptoms; both are quality reports only at a broad level." + "reason": "High perplexity on gpt-oss-20b and GLM5 are unrelated model-quality reports." }, { "left": "issue:45632", "right": "issue:45698", "accept": false, - "reason": "The packet suggests a similar loading/caching theme, but not enough to prove the same underlying bug." + "reason": "Both involve custom-module loading, but the titles point to different root causes; not enough to treat as the same bug." }, { - "left": "issue:39186", + "left": "issue:43716", "right": "issue:44918", "accept": false, - "reason": "FSDP 2-D weight error and Qwen3.5 embedding unpacking are unrelated." + "reason": "Image-preprocessor dtype mismatch is separate from Qwen3.5 embedding unpacking with TRL." }, { - "left": "issue:43716", + "left": "issue:39186", "right": "issue:44918", "accept": false, - "reason": "Different model families and failures: image preprocessor dtype mismatch versus TRL embedding unpacking." + "reason": "FSDP 2-D weight assertion and TRL embedding unpacking are unrelated." }, { "left": "issue:43404", "right": "issue:45005", "accept": false, - "reason": "Both mention tied weights, but the concrete bug and affected path differ." + "reason": "LM head tying bug in Mistral3 is not the same as translation-model tied-weights issues in v5." }, { "left": "issue:43645", "right": "issue:45399", "accept": false, - "reason": "Notebook/custom-model initialization breakage is not the same as the flash-attn2 fallback gating issue." + "reason": "Notebook custom-model init regression is unrelated to flash-attn fallback blocking." }, { "left": "issue:44530", "right": "issue:45005", "accept": false, - "reason": "PagedAttentionCache linear_attention crash is unrelated to tied-weights handling." + "reason": "PagedAttentionCache/Qwen3.5. models issue is unrelated to tied-weights handling." }, { "left": "issue:44863", "right": "issue:45005", "accept": false, - "reason": "NemotronH checkpoint loading failure is a different bug from tied-weights regression." + "reason": "NemotronH checkpoint loading failure is a different model-specific bug." }, { "left": "issue:44360", "right": "issue:44512", "accept": false, - "reason": "DSA indexer discussion and docs typo are unrelated documentation issues." + "reason": "DSA indexer behavior and documentation cleanup are unrelated." }, { "left": "issue:42915", "right": "issue:45399", "accept": false, - "reason": "FineGrainedFP8Config on Qwen3Moe and flash-attn2 fallback gating are different code paths." + "reason": "Qwen3Moe FineGrainedFP8 failure is separate from flash-attn fallback selection." }, { "left": "issue:43873", "right": "issue:45005", "accept": false, - "reason": "Quantization/offloading behavior is unrelated to tied-weights handling." + "reason": "Quantization/offloading behavior is not the same as tied-weights regressions." }, { "left": "issue:44492", "right": "issue:44512", "accept": false, - "reason": "A cache-strategy typo and outdated docs mention are separate docs issues." + "reason": "A cache-strategy typo and removed CLI docs are unrelated documentation issues." }, { "left": "issue:44918", "right": "issue:45507", "accept": false, - "reason": "TRL embedding unpacking and GraniteMoEHybrid invalid method dispatch are different failures." + "reason": "TRl SFT embedding unpacking and GraniteMoEHybrid invalid method are different model/trainer bugs." }, { "left": "issue:43975", "right": "issue:44292", "accept": false, - "reason": "Different models and symptoms: detokenization regression versus NVFP4 loading error." + "reason": "Detokenization regression and NVFP4 runtime error are unrelated." }, { "left": "issue:43950", "right": "issue:44534", "accept": true, - "reason": "Both report the same v5 regression where non-persistent buffers are corrupted or filled with junk during loading." + "reason": "Same regression: 5.x non-persistent buffers being filled with junk/corrupted." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Same underlying bug: non-persistent buffers are corrupted during from_pretrained in 5.x." }, { "left": "issue:44292", "right": "issue:45507", "accept": false, - "reason": "These are unrelated model/runtime failures with no shared concrete path." + "reason": "NVFP4 model runtime error and GraniteMoEHybrid invalid method are not the same code path." }, { "left": "issue:39692", "right": "issue:43054", "accept": false, - "reason": "SigLIP2 docs-example errors and SigLIP2 embedding-quality regression are related only by model family, not by the same bug." - }, - { - "left": "issue:41553", - "right": "issue:43883", - "accept": false, - "reason": "AutoTokenizer error messaging for Voxtral is unrelated to missing all_tied_weights_keys in MolmoForCausalLM." + "reason": "SigLIP2 docs/example breakage is unrelated to SigLIP2 embedding quality complaints." }, { "left": "issue:43828", "right": "issue:44928", "accept": false, - "reason": "Autocast dtype mismatch and RLHF NaN from 3D position_ids are different root causes." + "reason": "Different symptoms and causes: autocast dtype mismatch vs RLHF NaN explosion from 3D position_ids." }, { "left": "issue:43866", "right": "issue:44403", "accept": false, - "reason": "Checkpoint corruption and loading-noise/reporting issues are not the same bug." + "reason": "Checkpoint corruption and loading noise are not the same underlying bug." }, { - "left": "issue:43644", - "right": "issue:43950", - "accept": true, - "reason": "Both are the same non-persistent-buffer regression family in v5 loading/initialization." + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Bad AutoTokenizer error messaging and missing all_tied_weights_keys are unrelated issues." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The overall cluster framing is reasonable, but at least one soft-edge acceptance is too strong for the evidence in the packet. In particular, issue:43054 vs issue:43994 is only clearly the same model family (SigLIP2), not clearly the same concrete bug, and issue:45632 vs issue:45698 is also more speculative than the packet supports. The non-persistent-buffer cluster looks well grounded, but the accepted soft edges are not uniformly conservative enough to accept this summary as-is." + "accept": true, + "feedback": "The summary is well grounded in the packet: the 43950/43644/44534 non-persistent-buffer regression is the only clearly supported duplicate cluster, and the other soft-edge pairs are rejected conservatively based on distinct titles and failure modes. No overstatement stands out." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -954,23 +990,23 @@ "issue:36331|issue:41762", "issue:43872|issue:44589", "issue:43054|issue:45701", - "issue:41553|issue:43881", "issue:38175|issue:43054", + "issue:41553|issue:43881", "issue:42947|issue:44387", "issue:43054|issue:43493", - "issue:43881|issue:43883", "issue:44403|issue:44451", + "issue:43881|issue:43883", "issue:43975|issue:45520", "issue:43643|issue:45020", - "issue:43975|issue:45237", "issue:44403|issue:44488", + "issue:43975|issue:45237", "issue:42947|issue:43856", - "issue:43782|issue:44960", "issue:42491|issue:44368", "issue:41762|issue:42915", + "issue:43782|issue:44960", "issue:43975|issue:44451", - "issue:44661|issue:45020", "issue:43824|issue:43872", + "issue:44661|issue:45020", "issue:44991|issue:45399", "issue:45020|issue:45356", "issue:43827|issue:44509", @@ -988,256 +1024,256 @@ "issue:42915|issue:43872", "issue:41553|issue:44488" ], - "prepared_review_unit_hash": "a3898b7a3c7e255f84d2b77e73d9a29510f9b18bbb35c2a06cc5f741975d3d9e", + "prepared_review_unit_hash": "11f28d0b049e4573edceb8668e99113b61d89c2d7312deff7110ef4b58f7d619", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 36, - "serialized_chars": 37867, - "estimated_input_tokens": 9467, - "estimated_eval_tokens": 19190 + "serialized_chars": 37864, + "estimated_input_tokens": 9466, + "estimated_eval_tokens": 19188 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Most pairs are unrelated model-specific bugs, but there are a few clear duplicate clusters: docs cleanup for removed pipeline tasks, the `_is_hf_initialized` TypeError, SigLIP2 implementation mismatch, remote_code config-loading regression, and Qwen3/Qwen3-Next ZeRO-3 weight-loading failures.", - "confidence": 0.68, - "canonical_issue_reason": "Issue 45020 is the best global anchor: it is the broadest current runtime regression report in this set, and it cleanly covers the remote_code/config-loading symptom family without being as narrow as the model-specific reports.", + "summary": "Mostly false-positive thematic matches: the cluster spans many unrelated model-loading, quantization, tokenizer, and docs issues. Only the v5 docs cleanup pair looks like a true duplicate; the rest are too model- or code-path-specific to merge safely.", + "confidence": 0.79, + "canonical_issue_reason": "issue:45020 is the broadest umbrella report in the set ('recent transformers versions break models using remote_code') and is the best representative of the shared regression theme, though the cluster is still quite heterogeneous.", "canonical_pr_reason": null, - "best_issue_reason": "45020 is the strongest overall issue candidate because it is open, broadly scoped, and more actionable as an umbrella than the narrower model-specific failures or documentation-only regressions.", + "best_issue_reason": "issue:45020 is the most suitable anchor issue because it is open, broad, and centrally covers the main compatibility-regression pattern seen across several items.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36331", "right": "issue:41762", "accept": false, - "reason": "Different subsystems and failure modes: custom trainer loss signature vs Gemma3 ZeRO-3 loading." + "reason": "Different failures: CustomTrainer loss signature mismatch vs Gemma3 ZeRO-3 loading IndexError." }, { "left": "issue:43872", "right": "issue:44589", "accept": false, - "reason": "Both are quantization/loading errors, but the specific errors differ: `_is_hf_initialized` vs missing `Float8_e4m3fnStorage`." + "reason": "Both are low-level dtype/storage errors, but they point to different incompatibilities (_is_hf_initialized vs missing Float8 storage) and different code paths." }, { "left": "issue:43054", "right": "issue:45701", "accept": false, - "reason": "Related tokenizer/model-quality area, but one is a SigLIP2 embedding complaint and the other is a broad tokenization-version regression." + "reason": "SigLIP2 embedding quality complaint vs tokenizer/version-induced tokenization change; not the same concrete bug." }, { - "left": "issue:41553", - "right": "issue:43881", + "left": "issue:38175", + "right": "issue:43054", "accept": false, - "reason": "Different models and different error surfaces; bad AutoTokenizer error message for Voxtral vs a general glm-4v-9b load failure." + "reason": "Same model family, but one is zero probabilities and the other is embedding quality regression; insufficient evidence of one root cause." }, { - "left": "issue:38175", - "right": "issue:43054", + "left": "issue:41553", + "right": "issue:43881", "accept": false, - "reason": "Both involve SigLIP-family models, but one is zero probabilities in siglip2 and the other is a text-embedding quality regression; not clearly the same bug." + "reason": "Different models and failures: Voxtral tokenizer error vs glm-4v-9b load failure." }, { "left": "issue:42947", "right": "issue:44387", "accept": false, - "reason": "Different problems: LoRA gradient checkpointing ineffective vs int4 quantization memory/OOM regression." + "reason": "Both mention memory/perf, but one is gradient checkpointing with LoRA and the other is int4 quantization CUDA reserved memory; different bugs." }, { "left": "issue:43054", "right": "issue:43493", - "accept": true, - "reason": "Both point to the same SigLIP2 implementation issue; the JAX-vs-HF discrepancy is a plausible root cause of the poor text embeddings." - }, - { - "left": "issue:43881", - "right": "issue:43883", "accept": false, - "reason": "Unrelated model-loading failures with different concrete errors and model classes." + "reason": "Both involve SigLIP2, but one is general embedding degradation and the other is HF-vs-JAX implementation discrepancy; not clearly the same fix." }, { "left": "issue:44403", "right": "issue:44451", "accept": false, - "reason": "Broad loading-noise complaint vs a specific inability to load ScandiBERT; not the same bug." + "reason": "Loading noise/logging issue vs a hard model loading failure for ScandiBERT." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different models and code paths: glm-4v-9b loading failure vs Molmo tied-weights attribute error." }, { "left": "issue:43975", "right": "issue:45520", "accept": false, - "reason": "Different areas: tokenizer detokenization regression vs flash-attn import KeyError on Python 3.13." + "reason": "DeepSeek detokenization regression vs flash_attn import key error on Python 3.13; unrelated." }, { "left": "issue:43643", "right": "issue:45020", - "accept": true, - "reason": "The specific missing-fields behavior with `trust_remote_code=True` looks like a concrete manifestation of the broader remote_code regression." - }, - { - "left": "issue:43975", - "right": "issue:45237", "accept": false, - "reason": "Tokenizer/detokenization bug vs AMD GPU runtime incompatibility; unrelated." + "reason": "Both involve remote_code/config loading, but one is missing fields in AutoConfig and the other is a broad recent-version breakage report; not the same concrete bug." }, { "left": "issue:44403", "right": "issue:44488", "accept": false, - "reason": "Both concern model loading, but one is generic noise and the other is a specific BERT load failure." + "reason": "Both are loading-related, but one is excessive noise/logging and the other is a specific tokenizer/model load failure." }, { - "left": "issue:42947", - "right": "issue:43856", + "left": "issue:43975", + "right": "issue:45237", "accept": false, - "reason": "LoRA checkpointing regression vs Qwen3 MoE training memory usage; different bugs." + "reason": "Tokenizer detokenization bug vs AMD GPU model execution issue; different subsystems." }, { - "left": "issue:43782", - "right": "issue:44960", + "left": "issue:42947", + "right": "issue:43856", "accept": false, - "reason": "Different model families and failure modes; Qwen3VL weight-only error vs GLM5 issue." + "reason": "Gradient checkpointing ineffective with LoRA is unrelated to Qwen3 MoE training memory inefficiency." }, { "left": "issue:42491", "right": "issue:44368", "accept": false, - "reason": "Both are Qwen-related, but one is a LoRA portability regression and the other is a tie_word_embeddings warning; not the same bug." + "reason": "Both mention Qwen3/Qwen3.5 LoRA, but one is a cross-version incompatibility and the other is a warning about tie_word_embeddings; different problems." }, { "left": "issue:41762", "right": "issue:42915", "accept": false, - "reason": "Both mention model loading, but Gemma3 ZeRO-3 missing weights and Qwen3 MoE FineGrainedFP8Config failures are distinct code paths." + "reason": "Gemma3 ZeRO-3 load failure and Qwen3 MoE FP8 load failure are different model/config regressions." }, { - "left": "issue:43975", - "right": "issue:44451", + "left": "issue:43782", + "right": "issue:44960", "accept": false, - "reason": "Different tokenizer/loading problems and different models; no clear shared underlying bug." + "reason": "Different models and failures: Qwen3VL weight_only error vs GLM5 issue." }, { - "left": "issue:44661", - "right": "issue:45020", + "left": "issue:43975", + "right": "issue:44451", "accept": false, - "reason": "`add-new-model-like` mapping bug is unrelated to remote_code loading regressions." + "reason": "DeepSeek tokenizer regression vs inability to load ScandiBERT; not the same bug." }, { "left": "issue:43824", "right": "issue:43872", "accept": false, - "reason": "Import error for a missing class vs bitsandbytes `_is_hf_initialized` kwarg incompatibility." + "reason": "Import error for Qwen2_5_VL class vs bitsandbytes Int8Params constructor mismatch; unrelated." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "Model-template registration bug in add-new-model-like vs remote_code loading regressions; different code paths." }, { "left": "issue:44991", "right": "issue:45399", "accept": false, - "reason": "Tokenizer loading failure for EMBEDDIA/est-roberta vs flash-attn fallback blocking; unrelated." + "reason": "Tokenizer load failure for EMBEDDIA/est-roberta vs flash-attn fallback gating; unrelated." }, { "left": "issue:45020", "right": "issue:45356", "accept": false, - "reason": "Remote_code breakage is not the same bug as the Kimi-K2.5 tokenizer regression / fix_mistral_regex warning issue." + "reason": "Broad remote_code regression vs Kimi-K2.5 tokenizer codec regression; different concrete issues." }, { "left": "issue:43827", "right": "issue:44509", "accept": true, - "reason": "Same docs cleanup: both report stale pipeline-task references removed in v5." + "reason": "Same docs cleanup after v5 pipeline removals: both report stale documentation still referencing removed pipeline tasks." }, { "left": "issue:45081", "right": "issue:45399", "accept": false, - "reason": "Tokenizer patch crash vs flash-attn fallback gating; different loading problems." + "reason": "Tokenizer regex patch crash vs flash-attn fallback blocked by checks; different bugs." }, { "left": "issue:43792", "right": "issue:44220", "accept": false, - "reason": "Whisper runtime failure vs `_torch_extract_fbank_features()` issue; too little evidence they share the same root cause." + "reason": "Whisper model run failure vs generic fbank feature-extraction issue; possible relation, but not enough evidence of the same root bug." }, { "left": "issue:43404", "right": "issue:43950", "accept": false, - "reason": "Tied lm_head weights vs non-persistent buffer corruption; separate model-state bugs." + "reason": "Tied lm_head weights in Mistral3 vs silent corruption of non-persistent buffers; unrelated loader regressions." }, { "left": "issue:43257", "right": "issue:43866", "accept": false, - "reason": "Qwen3 MoE weight conversion issue vs Ovis2 checkpoint corruption; unrelated." + "reason": "Qwen3 MoE weight conversion issue vs Ovis2 checkpoint corruption; different models and failures." }, { "left": "issue:43742", "right": "issue:45399", "accept": false, - "reason": "MobileLLM key error vs flash-attn fallback gating; different loading paths." + "reason": "MobileLLM key error while loading vs flash-attn fallback gating; unrelated." }, { "left": "issue:42915", "right": "issue:45020", "accept": false, - "reason": "Qwen3 MoE FP8 failure and broad remote_code breakage are different regressions." + "reason": "Qwen3 MoE ZeRO-3/FP8 load failure is a specific model/config issue, not the broader remote_code regression." }, { "left": "issue:41553", "right": "issue:44991", "accept": false, - "reason": "Different model load failures with no clear shared mechanism." + "reason": "Both are tokenizer loading problems, but different models and failure modes." }, { "left": "issue:43742", "right": "issue:45020", "accept": false, - "reason": "Key error for MobileLLM loading is not the same as the remote_code config regression." + "reason": "Model-specific loading key error vs general remote_code regression; different scopes and causes." }, { "left": "issue:43940", "right": "issue:45313", - "accept": true, - "reason": "Both describe the same ZeRO-3 weight-loading failure pattern for Qwen-family conditional generation models." + "accept": false, + "reason": "Qwen3-Next ZeRO-3 missing weights vs Qwen3.5 ZeRO-3 language_model load failure; similar theme but not enough evidence of one bug." }, { "left": "issue:43872", "right": "issue:44291", - "accept": true, - "reason": "Same unexpected `_is_hf_initialized` kwarg incompatibility during model initialization/loading." + "accept": false, + "reason": "bitsandbytes Int8Params constructor mismatch vs init_empty_weights/_is_hf_initialized error; both adapter/loading related but different incompatibilities." }, { "left": "issue:43854", "right": "issue:44863", "accept": false, - "reason": "Different model-loading failures for different models; not the same underlying bug." + "reason": "GLM-4.7-Flash unit-test load problem vs NemotronH checkpoint loading failure; different models and issues." }, { "left": "issue:42915", "right": "issue:43872", "accept": false, - "reason": "FP8/Qwen3 MoE failure is unrelated to the bitsandbytes `_is_hf_initialized` incompatibility." + "reason": "Qwen3 MoE loading issue vs bitsandbytes constructor incompatibility; unrelated." }, { "left": "issue:41553", "right": "issue:44488", "accept": false, - "reason": "Different models and different loading issues; not duplicates." + "reason": "Bad AutoTokenizer error for Voxtral vs failure to load cjvt/sleng-bert; different models and bugs." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is well grounded in the packet and stays mostly conservative. The accepted soft-edge pairs are plausible from the titles/bodies, especially the docs cleanup, the SigLIP2 discrepancy, the remote_code regression, and the Qwen ZeRO-3 load failures. I did not see an overclaim strong enough to reject." + "feedback": "Grounded and conservative overall. The summary matches the packet\u2019s broad mix of unrelated loading/tokenizer/quantization/docs issues, and the only accepted soft-edge pair (issue:43827 with issue:44509) is well-supported by the titles as docs cleanup around removed pipeline tasks. No obvious overstatement." }, "evaluator_used": true, "retried": false, @@ -1322,8 +1358,8 @@ "issue:43550|issue:44910", "issue:44560|issue:45685", "issue:43645|issue:43646", - "issue:43950|issue:45325", "issue:43824|issue:43957", + "issue:43950|issue:45325", "issue:42915|issue:43957", "issue:43366|issue:44589", "issue:44560|issue:44918", @@ -1340,20 +1376,20 @@ "issue:38175|issue:39692", "issue:42491|issue:43720" ], - "prepared_review_unit_hash": "2df86cea26b766518d7c043834377f230696e4fdcfa5bf5630400a0bc80f670e", + "prepared_review_unit_hash": "95afaa823216ad8348eb215623944f26ff0380adc8dbd3d4b5c75d500639e9e5", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37221, + "serialized_chars": 37222, "estimated_input_tokens": 9306, "estimated_eval_tokens": 18868 }, @@ -1362,208 +1398,205 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a loose cluster of unrelated bug reports, with a few genuine duplicate-style pairs around the same regression: non-persistent buffer corruption, custom model initialization in v5, SigLIP2 output corruption, and a Qwen video-input regression.", - "confidence": 0.69, - "canonical_issue_reason": "issue:43644 is the clearest broad regression report in the set and has a near-duplicate in issue:44534 with the same non-persistent-buffer corruption symptom.", + "summary": "The cluster is mostly a loose similarity set of unrelated transformer bugs; none of the soft pairs look like the same underlying issue strongly enough to merge. The clearest duplicate-looking subgroup is the Transformers v5 non-persistent-buffer regression, centered around issue 44534.", + "confidence": 0.81, + "canonical_issue_reason": "Issue 44534 is the most canonical representative of the shared regression theme: it is concise, broadly phrased, and matches the closely related non-persistent-buffer loading reports in the cluster.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43644 is the strongest standalone issue to keep because it states the bug crisply, is broadly scoped, and maps well to the duplicate-like follow-up report.", + "best_issue_reason": "Issue 44534 is the best issue anchor because it captures the broadest version of the recurring Transformers v5 buffer-loading regression without overfitting to one model or workaround.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44928", "right": "issue:45588", "accept": false, - "reason": "Both mention Qwen3.5/attention-related failures, but the concrete failures and code paths differ: gradient instability vs a flash_attention.py AttributeError." + "reason": "Different failures: RLHF gradient explosion from 3D position_ids/SDPA fallback vs a flash-attention AttributeError on s_aux=None." }, { "left": "issue:43257", "right": "issue:45305", "accept": false, - "reason": "Both involve DeepSpeed/Qwen3-MoE, but one is a loading/conversion issue and the other is a gradient-averaging bug; different fixes." + "reason": "Both involve DeepSpeed, but one is about MOE weight conversion during loading and the other about gradient averaging with model_accepts_loss_kwargs." }, { "left": "issue:36331", "right": "issue:41093", "accept": false, - "reason": "These are different error classes in different trainer/data paths; shared shape-related wording is not enough to make them the same bug." + "reason": "Unrelated bugs: a trainer API signature change versus a mask/tensor shape mismatch." }, { "left": "issue:41093", "right": "issue:44805", "accept": false, - "reason": "Same error pattern, but no evidence they share the same underlying failing code path or fix." + "reason": "Same error text pattern, but the reports do not establish the same code path or root cause." }, { "left": "issue:43872", "right": "issue:45538", "accept": false, - "reason": "Unrelated subsystems: bitsandbytes initialization vs tokenizer model_max_length." + "reason": "Completely different areas: bitsandbytes init incompatibility vs CLIPTokenizer model_max_length." }, { "left": "issue:44479", "right": "issue:44560", - "accept": true, - "reason": "Both are video-input regressions in the Qwen video stack on 5.3.0 and plausibly stem from the same processor/video handling bug." + "accept": false, + "reason": "Related video/model families, but the concrete failures differ and do not clearly share one fixable code path." }, { "left": "issue:44530", "right": "issue:44863", "accept": false, - "reason": "Different models and failures; one is a Qwen3.5 cache/group-type crash, the other is a NemotronH checkpoint-loading issue." + "reason": "Different model-loading bugs: PagedAttentionCache linear_attention crash vs NemotronH checkpoint loading failure." }, { "left": "issue:24643", "right": "issue:30064", "accept": false, - "reason": "Completely different bugs: DeepSpeed training weight-shape error vs segmentation-map processing failure." + "reason": "No common underlying bug: DeepSpeed training weight shape error vs image processor void-segmentation-map handling." }, { "left": "issue:43366", "right": "issue:43828", "accept": false, - "reason": "No shared concrete bug; one is GGUF architecture support, the other is an autocast dtype mismatch." + "reason": "Unrelated: GGUF architecture support request vs a dtype mismatch in Phi-tiny-MoE under autocast." }, { "left": "issue:44155", "right": "issue:45584", "accept": false, - "reason": "Different audio-generation paths and symptoms; not the same underlying defect." + "reason": "Both mention incorrect inference behavior, but one is an AudioFlamingo3 batching leak and the other is Whisper empty-transcription generation." }, { "left": "issue:43366", "right": "issue:43531", "accept": false, - "reason": "Both mention Qwen-related models only broadly; the actual issues are unrelated." + "reason": "No match: GGUF gpt-oss support request vs a Qwen3-MoE sliding_window bug." }, { "left": "issue:43550", "right": "issue:44910", "accept": false, - "reason": "Different model families and failure mechanisms; torch.compile/SDPA crash is not the same as Qwen3.5 flash-attention memory corruption." + "reason": "Different subsystems and symptoms: torch.compile+SDPA crash in Bamba vs Qwen3.5 flash-attention illegal memory access from position_ids handling." }, { "left": "issue:44560", "right": "issue:45685", "accept": false, - "reason": "One is a Qwen video-processing regression, the other is an MPS integer-kernel limitation; unrelated." + "reason": "Both are model-specific regressions, but one is a video StopIteration issue and the other is an MPS histogram Int implementation error in MOE." }, { "left": "issue:43645", "right": "issue:43646", - "accept": true, - "reason": "Same v5.0.0 custom-model initialization regression; the notebook-specific report is just a narrower manifestation of the generic one." - }, - { - "left": "issue:43950", - "right": "issue:45325", "accept": false, - "reason": "Both are transformer-regression reports, but one is buffer corruption and the other is a Qwen2.5-VL rope-index scaling bug." + "reason": "Closely related 5.x custom-model initialization regressions, but not the same concrete failure mode." }, { "left": "issue:43824", "right": "issue:43957", "accept": false, - "reason": "ImportError for a specific class and meta-device loading breakage are different problems." + "reason": "Different load/import failures: missing Qwen2_5_VLForConditionalGeneration export vs meta-device loading breakage." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Shared Transformers v5 context, but one is non-persistent-buffer corruption and the other is a Qwen2.5-VL rope-index scaling bug." }, { "left": "issue:42915", "right": "issue:43957", "accept": false, - "reason": "Different failure modes and code paths: FineGrainedFP8Config vs meta-device loading." + "reason": "Different issues: Qwen3Moe FP8 failure vs meta-device loading regression." }, { "left": "issue:43366", "right": "issue:44589", "accept": false, - "reason": "No shared underlying bug; architecture support issue vs missing Float8 storage class." + "reason": "Unrelated: GGUF support request vs missing Float8 storage type when loading." }, { "left": "issue:44560", "right": "issue:44918", "accept": false, - "reason": "Both involve Qwen3.5-family models, but one is video StopIteration and the other is SFT embedding unpacking; different code paths." + "reason": "Both are video/embedding related, but the failures are different and not clearly the same bug." }, { "left": "issue:42617", "right": "issue:43366", "accept": false, - "reason": "Unrelated issues: 3d_parallel.py execution failure vs GGUF architecture support." + "reason": "No overlap beyond broad model-loading/runtime trouble; not the same bug." }, { "left": "issue:38175", "right": "issue:43994", - "accept": true, - "reason": "Same SigLIP2 model/output corruption theme; both describe the model producing invalid or nonsensical probabilities on the same checkpoint." + "accept": false, + "reason": "Same model family, but one report is zero probabilities and the other nonsensical AutoModel/pipeline output." }, { "left": "issue:43646", "right": "issue:43950", "accept": false, - "reason": "Custom-model initialization regression and non-persistent-buffer corruption are separate v5 issues." + "reason": "Related regression family, but one is custom model initialization while the other is non-persistent buffer corruption." }, { "left": "issue:36010", "right": "issue:42915", "accept": false, - "reason": "Importing GenerationMixin and Qwen3Moe FP8 loading are unrelated defects." + "reason": "Different import/runtime problems: GenerationMixin import breakage vs Qwen3Moe FP8 loading failure." }, { "left": "issue:44560", "right": "issue:44805", "accept": false, - "reason": "Both mention an IndexError-like failure elsewhere, but the video regression and mask-shape mismatch do not appear to share a code path." + "reason": "Different video/embedding issue vs generic mask-shape IndexError; no shared concrete code path." }, { "left": "issue:44877", "right": "issue:45030", "accept": false, - "reason": "Different config-validation problems in different models." + "reason": "Different config validation problems for different model families." }, { "left": "issue:43638", "right": "issue:44661", "accept": false, - "reason": "Different root causes: zero3/Bert loading failure vs tokenizer-mapping-name handling." + "reason": "Different failures: zero-sized Bert model indexing under ZeRO-3 vs tokenizer mapping name validation." }, { "left": "issue:43596", "right": "issue:44661", "accept": false, - "reason": "Same as above; the out-of-bounds zero3 error is unrelated to TOKENIZER_MAPPING_NAMES handling." + "reason": "No shared underlying bug: ZeRO-3 index error vs tokenizer mapping validation." }, { "left": "issue:43317", "right": "issue:44756", "accept": false, - "reason": "GPU/CPU offload loading failure is not the same as disabling mmap to avoid OOM." + "reason": "Both are loading/memory related, but one is dequantized model offload and the other is mmap OOM on Strix Halo." }, { "left": "issue:43644", "right": "issue:44534", - "accept": true, - "reason": "These are the same non-persistent-buffer regression with near-identical symptom wording, so they can plausibly be fixed together." + "accept": false, + "reason": "These look related, but the titles are not identical enough to assert the same bug without the underlying traceback; keep separate." }, { "left": "issue:38175", "right": "issue:39692", "accept": false, - "reason": "Related model family, but one is incorrect probabilities and the other is a doc example with model/processor mismatch and quantization errors." + "reason": "Same model family, but one is runtime zero-probability behavior and the other is documentation/example errors." }, { "left": "issue:42491", "right": "issue:43720", "accept": false, - "reason": "Both involve loading/weights, but the actual bugs are different: Qwen3-MoE hf4.x/hf5.x incompatibility vs BitNet packed-weight unpacking." + "reason": "Different compatibility issues: qwen3_moe LoRA loading across versions vs BitNet packed-weight loading with accelerate." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is well grounded in the packet and stays conservative overall. The accepted soft pairs are plausibly duplicate-style reports with enough symptom overlap to justify a cautious accept, and the rejected pairs are appropriately non-committal where the evidence only suggests loose thematic similarity. The canonical issue choice is also defensible from the provided titles and descriptions." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -1630,22 +1663,22 @@ ], "soft_pairs": [ "issue:45313|issue:45357", - "issue:44960|issue:44977", "issue:44451|issue:44488", + "issue:44960|issue:44977", "issue:43854|issue:43931", "issue:43708|issue:44075", "issue:42491|issue:45276", "issue:41720|issue:42915", "issue:43856|issue:43873", "issue:43257|issue:43854", - "issue:44488|issue:44991", - "issue:39692|issue:43994", "issue:42915|issue:44756", + "issue:39692|issue:43994", + "issue:44488|issue:44991", "issue:42491|issue:43931", "issue:44661|issue:44683", - "issue:45362|issue:45406", "issue:43644|issue:43645", "issue:43856|issue:44387", + "issue:45362|issue:45406", "issue:44910|issue:45588", "issue:42491|issue:45292", "issue:43404|issue:43454", @@ -1663,238 +1696,208 @@ "issue:43854|issue:45216", "issue:44291|issue:44756" ], - "prepared_review_unit_hash": "41ce707c5bbf4c558e59b714d3cd611cc0539967d5e870c6e08f90711fc4da5c", + "prepared_review_unit_hash": "748fcd26268cc9a7649eca2f84597e0b5a4c9905ef075ecae50418683ed847bb", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 33, - "serialized_chars": 37004, - "estimated_input_tokens": 9251, - "estimated_eval_tokens": 18758 + "serialized_chars": 37009, + "estimated_input_tokens": 9253, + "estimated_eval_tokens": 18762 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Most pairs are different bugs touching the same broad subsystem. The strongest duplicate candidates are the two lm_head weight-tying reports and the two BERT loading reports; the rest should stay separate.", - "confidence": 0.64, - "canonical_issue_reason": "issue:43454 is the clearest root-cause report: it states the missing lm_head tying directly and the symptom is concrete user-visible corruption, making it a strong representative for the weight-tying bug family.", + "summary": "Loose mixed cluster of Transformers issues, mostly about recent model-loading / generation regressions and a few version-specific architecture bugs. Only two duplicate-like pairs look strong enough to merge; the rest are similar only at a broad subsystem level.", + "confidence": 0.59, + "canonical_issue_reason": "issue:43404 is the clearest concrete bug report in the set: a missing lm_head weight-tying regression in Mistral3ForConditionalGeneration, with issue:43454 looking like the same underlying failure on AyaVision.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43454 is the best single issue to keep: it is specific, actionable, and captures the underlying weight-tying defect succinctly.", + "best_issue_reason": "issue:43404 is the best standalone issue here: it has a specific code-path, clear user-visible failure, and a clean fix surface that can plausibly cover the related AyaVision report too.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45313", "right": "issue:45357", "accept": false, - "reason": "Same model family, but different failures: DeepSpeed weight loading vs save_pretrained visual-encoder key serialization." + "reason": "Same model family, but one is ZeRO-3 load failure and the other is save_pretrained visual-encoder key regression; different code paths." }, { - "left": "issue:44960", - "right": "issue:44977", + "left": "issue:44451", + "right": "issue:44488", "accept": false, - "reason": "Different models and different flash-attention/generation symptoms; no shared concrete bug path." + "reason": "Both are loading failures on related BERT-like models, but the titles don\u2019t \u0581\u0578\u0582\u0575\u0581 a shared concrete bug or fix path." }, { - "left": "issue:44451", - "right": "issue:44488", - "accept": true, - "reason": "Both report the current/latest Transformers version failing to load specific BERT models, consistent with one loading regression affecting multiple checkpoints." + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Unrelated models and failure modes: GLM5 vs Qwen3.5 flash-attention generation bug." }, { "left": "issue:43854", "right": "issue:43931", "accept": false, - "reason": "Both are model-loading failures, but the errors and affected models are unrelated." + "reason": "Different models and different load failures; no evidence of the same underlying issue." }, { "left": "issue:43708", "right": "issue:44075", "accept": false, - "reason": "Trainer resume/max_steps logic is unrelated to optimizer argument handling." + "reason": "Trainer checkpoint step accounting and SGD argument handling are separate bugs." }, { "left": "issue:42491", "right": "issue:45276", "accept": false, - "reason": "Different model families and different bugs: Qwen3 MoE compatibility vs Gemma4 resize_token_embeddings propagation." + "reason": "Both are regressions, but one is Qwen3 MoE version compatibility and the other is Gemma4 embedding resize propagation." }, { "left": "issue:41720", "right": "issue:42915", "accept": false, - "reason": "Both involve Qwen3, but one is auto device mapping/assertion and the other is FineGrainedFP8Config; not the same defect." + "reason": "Both involve Qwen3, but one is a device-map CUDA assert and the other is a FineGrainedFP8Config failure." }, { "left": "issue:43856", "right": "issue:43873", "accept": false, - "reason": "Both concern memory/performance, but one is Qwen3 MoE training memory use and the other is quantization offloading behavior." + "reason": "Both are memory-related, but one is Qwen3 MoE training inefficiency and the other is offloading with quantization." }, { "left": "issue:43257", "right": "issue:43854", "accept": false, - "reason": "Different subsystems and failures: Qwen3 MoE weight conversion vs GLM-4.7-Flash loading in tests." - }, - { - "left": "issue:44488", - "right": "issue:44991", - "accept": false, - "reason": "Both are load failures, but they involve different models and different tokenizer/model-loading regressions." - }, - { - "left": "issue:39692", - "right": "issue:43994", - "accept": false, - "reason": "Same general model family, but doc example errors and nonsensical AutoModel/pipeline outputs are not the same bug." - }, - { - "left": "issue:42915", - "right": "issue:44756", - "accept": false, - "reason": "Qwen3 MoE FP8 failure is unrelated to Strix Halo mmap OOM." - }, - { - "left": "issue:42491", - "right": "issue:43931", - "accept": false, - "reason": "Different model families and failure modes; no evidence of the same bug." + "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to GLM-4.7-Flash loading." }, { "left": "issue:44661", "right": "issue:44683", "accept": false, - "reason": "Tokenizer mapping-name behavior is unrelated to compiled flex_attention on torch>=2.9." - }, - { - "left": "issue:45362", - "right": "issue:45406", - "accept": false, - "reason": "Different crash sites and different models: chat generation vs serve processor initialization." + "reason": "Tokenizer mapping registration bug vs compiled flex_attention / torch>=2.9 failure; different subsystems." }, { "left": "issue:43644", "right": "issue:43645", "accept": false, - "reason": "Both are Transformers 5.0 regressions, but one is non-persistent buffer init and the other is Jupyter custom-model initialization." + "reason": "Both are Transformers 5.0 regressions, but they affect different initialization/save workflows." }, { "left": "issue:43856", "right": "issue:44387", "accept": false, - "reason": "Both mention memory, but one is Qwen3 MoE training and the other is int4 quantization reserved-memory growth." + "reason": "Related broadly to memory, but not the same bug: Qwen3 MoE training footprint vs int4 quantization reserved-memory/OOM regression." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different products and failures: Qwen3.5 chat crash vs Gemma4 processor missing _tokenizer." }, { "left": "issue:44910", "right": "issue:45588", "accept": false, - "reason": "Both hit flash_attention.py, but one is a packed-sequence interpretation bug and the other is an s_aux=None AttributeError." + "reason": "Both touch flash attention, but one is packed-sequence misinterpretation on Qwen3.5 and the other is an s_aux=None AttributeError on sink-less models." }, { "left": "issue:42491", "right": "issue:45292", "accept": false, - "reason": "Different regressions: Qwen3 MoE hf4.x/hf5.x compatibility vs token embedding resizing not updating output embeddings." + "reason": "Qwen3 MoE hf4.x/hf5.x compatibility is not the same as a generic resize_token_embeddings propagation bug." }, { "left": "issue:43404", "right": "issue:43454", "accept": true, - "reason": "Both describe the same concrete defect class: lm_head weights are not tied, causing bad multimodal generation across closely related model implementations." + "reason": "Same underlying bug class: lm_head weights not tied, causing bad generation; 43454 looks like the same issue on a different model." }, { "left": "issue:45276", "right": "issue:45335", - "accept": false, - "reason": "Related API surface, but the affected model internals differ and the titles do not show the same concrete code-path bug." + "accept": true, + "reason": "Both describe the same resize_token_embeddings propagation bug, with model-specific symptoms around additional embedding/output heads." }, { "left": "issue:36010", "right": "issue:39404", "accept": false, - "reason": "ImportError for GenerationMixin is unrelated to Whisper pipeline return_language behavior." + "reason": "Importing GenerationMixin and Whisper pipeline return_language are unrelated regressions." }, { "left": "issue:44164", "right": "issue:44291", "accept": false, - "reason": "Save/from_pretrained extra_state handling and init_empty_weights/_is_hf_initialized are different load/save path bugs." + "reason": "save/from_pretrained extra_state handling and init_empty_weights argument mismatch are distinct loader/saver bugs." }, { "left": "issue:44402", "right": "issue:44488", "accept": false, - "reason": "Tokenizer vocab-size mismatch is not the same as the Sleng-BERT loading failure." + "reason": "Both are load problems, but one is a tokenizer vocab mismatch and the other is an unspecified model-loading failure." }, { "left": "issue:41720", "right": "issue:44155", "accept": false, - "reason": "Qwen3 device-mapping assertion is unrelated to AudioFlamingo3 batched inference token leakage." + "reason": "CUDA assert in device mapping is unrelated to AudioFlamingo3 batched inference token/embedding leakage." }, { "left": "issue:39692", "right": "issue:44960", "accept": false, - "reason": "Different model families and failure types; no shared underlying issue." + "reason": "SigLIP2 documentation/example issues are unrelated to GLM5." }, { "left": "issue:43335", "right": "issue:45663", "accept": false, - "reason": "Sparse-layer construction in SwitchTransformers is unrelated to Gemma4 FSDP2 shared_kv_states rebuilding." + "reason": "Sparse-layer config creation and FSDP2 shared_kv_states rebuild are different model bugs." }, { "left": "issue:43425", "right": "issue:43957", "accept": false, - "reason": "Torch version incompatibility is not the same as meta-device model loading breakage." + "reason": "Torch 2.10 compatibility and meta-device loading regressions are separate incompatibilities." }, { "left": "issue:43873", "right": "issue:44387", "accept": false, - "reason": "Both are memory-related, but one is offloading+quantization behavior and the other is reserved-memory growth under int4 quantization." - }, - { - "left": "issue:42947", - "right": "issue:45704", - "accept": false, - "reason": "Gradient checkpointing with PEFT LoRA is unrelated to the T5 apex.FusedRMSNorm memory leak." + "reason": "Offloading/quantization behavior and int4 reserved-memory growth are related only at a high level, not the same concrete bug." }, { "left": "issue:44387", "right": "issue:44756", "accept": false, - "reason": "Different OOM causes: quantization reserved-memory regression vs Strix Halo mmap behavior." + "reason": "Different OOM causes: quantization memory regression vs mmap on Strix Halo." }, { "left": "issue:43854", "right": "issue:45216", "accept": false, - "reason": "Different models and different regression points: loading GLM-4.7-Flash in tests vs Qwen3.5 save_pretrained checkpoint correctness." + "reason": "Different models and failures: GLM-4.7-Flash test loading vs Qwen3.5 save_pretrained regression." }, { "left": "issue:44291", "right": "issue:44756", "accept": false, - "reason": "Meta-init TypeError is unrelated to disabling mmap on Strix Halo." + "reason": "init_empty_weights argument mismatch and Strix Halo mmap OOM are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The two accepted soft pairs are supported by the titles as the same bug family (lm_head tying; BERT loading regression), and the rejected pairs are mostly properly separated by distinct models or failure modes." + "feedback": "The cluster summary is grounded in the packet and stays conservative. The two accepted pairs are well-supported by the titles alone (43404/43454 on untied lm_head weights; 45276/45335 on resize_token_embeddings propagation). The remaining rejections are generally cautious and do not overclaim shared root causes beyond broad subsystem similarity." }, "evaluator_used": true, "retried": false, @@ -1967,17 +1970,17 @@ "soft_pairs": [ "issue:44292|issue:44912", "issue:44743|issue:44960", - "issue:45684|issue:45698", "issue:45276|issue:45292", - "issue:43957|issue:44756", + "issue:45684|issue:45698", "issue:42947|issue:43381", + "issue:43957|issue:44756", "issue:43381|issue:44928", "issue:44164|issue:44756", "issue:43646|issue:43708", "issue:45588|issue:45663", "issue:44479|issue:44877", - "issue:44402|issue:44451", "issue:45292|issue:45335", + "issue:44402|issue:44451", "issue:43645|issue:44164", "issue:43257|issue:44863", "issue:43828|issue:43957", @@ -1990,268 +1993,268 @@ "issue:43611|issue:43645", "issue:43425|issue:43716", "issue:44530|issue:45084", - "issue:43901|issue:44509", "issue:39404|issue:44912", + "issue:43901|issue:44509", "issue:43299|issue:43931", "issue:44863|issue:45663", "issue:44479|issue:44805", - "issue:43257|issue:45663", "issue:44265|issue:44898", + "issue:43257|issue:45663", "issue:43994|issue:44079", "issue:44671|issue:44991", "issue:39404|issue:44671", "issue:43425|issue:44918" ], - "prepared_review_unit_hash": "589688d1cd5100f11d708fdf101bbbcc82be1832efa56b86d9c7256188c3a499", + "prepared_review_unit_hash": "260c9ce7432bbc0d3e3806ec8b515f1ad87e22aa28baf7a7bc7783fafe3b2f30", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38406, - "estimated_input_tokens": 9602, - "estimated_eval_tokens": 19460 + "serialized_chars": 38409, + "estimated_input_tokens": 9603, + "estimated_eval_tokens": 19462 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a grab-bag of unrelated Transformers regressions, but there is a tight duplicate family around `resize_token_embeddings` not updating model-specific output/input embeddings. A separate Qwen3-VL loading pair also looks plausibly duplicate, while the rest are better treated as distinct issues.", - "confidence": 0.66, - "canonical_issue_reason": "issue:45292 is the most general member of the tight resize_token_embeddings regression family; the Gemma4 and t5gemma reports look like narrower manifestations of the same missing propagation behavior.", + "summary": "This cluster is mostly heterogeneous, with one clear duplicate subcluster around `resize_token_embeddings` not updating output embeddings. Most other soft pairs are related only at the subsystem level and should stay separate.", + "confidence": 0.94, + "canonical_issue_reason": "`issue:45292` is the most general phrasing of the `resize_token_embeddings` bug and cleanly covers the Gemma4- and t5gemma-specific reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45292 is the best representative issue in this cluster because it states the bug in the broadest, least model-specific terms.", + "best_issue_reason": "`issue:45292` is the best representative of the only strong duplicate set in the cluster because it states the shared failure mode without model-specific narrowing.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44292", "right": "issue:44912", "accept": false, - "reason": "Both are quantization/loading failures, but they affect different models and different failure modes (`Qwen-3-8B-NVFP4` vs `git-oss-20b` MXFP4 fallback)." + "reason": "Both are quantization/loading failures, but they involve different models and different failure modes." }, { "left": "issue:44743", "right": "issue:44960", "accept": false, - "reason": "Different bugs: recurrent state reset with cache/seq_len>1 versus an unrelated GLM5 issue." - }, - { - "left": "issue:45684", - "right": "issue:45698", - "accept": false, - "reason": "One is a permissions propagation problem in `save_pretrained`; the other is loading the wrong custom module after save. Related workflow, not the same bug." + "reason": "Same broad MoE/cache area, but no evidence they describe the same recurrent-state bug." }, { "left": "issue:45276", "right": "issue:45292", "accept": true, - "reason": "Same underlying bug: `resize_token_embeddings` does not propagate to model-specific embedding/output layers; one report is just Gemma4-specific wording." + "reason": "Same underlying bug: `resize_token_embeddings` does not propagate to the model's output embeddings / tied embedding structures." }, { - "left": "issue:43957", - "right": "issue:44756", + "left": "issue:45684", + "right": "issue:45698", "accept": false, - "reason": "`torch.device(\"meta\")` model-loading breakage and disabling mmap on Strix Halo are different causes and fixes." + "reason": "Both involve custom-model save/load, but one is a permissions problem and the other is wrong module resolution." }, { "left": "issue:42947", "right": "issue:43381", "accept": false, - "reason": "Both mention gradient checkpointing, but one is ineffective with PEFT LoRA and the other forbids checkpointing in eval mode; distinct code paths." + "reason": "Both mention gradient checkpointing, but one is LoRA ineffectiveness and the other is an eval-mode restriction." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Unrelated failures: meta-device loading versus mmap/OOM behavior." }, { "left": "issue:43381", "right": "issue:44928", "accept": false, - "reason": "Eval-mode checkpointing restriction is unrelated to the Qwen3.5 NaN/SDPA fallback regression." + "reason": "Different problems: eval-mode checkpointing versus RLHF NaN explosion from 3D position ids." }, { "left": "issue:44164", "right": "issue:44756", "accept": false, - "reason": "`extra_state` serialization handling is unrelated to the Strix Halo mmap OOM problem." + "reason": "`extra_state` serialization is unrelated to Strix Halo mmap/OOM." }, { "left": "issue:43646", "right": "issue:43708", "accept": false, - "reason": "Custom model initialization in notebooks and `resume_from_checkpoint` max_steps calculation are unrelated." + "reason": "Custom model initialization in notebooks is unrelated to `resume_from_checkpoint` step calculation." }, { "left": "issue:45588", "right": "issue:45663", "accept": false, - "reason": "Flash-attention `s_aux=None` crash and Gemma4 FSDP2 `shared_kv_states` rebuild are different model internals and failures." + "reason": "Different code paths: flash-attention `s_aux=None` crash versus Gemma4 FSDP2 shared-state rebuild." }, { "left": "issue:44479", "right": "issue:44877", "accept": false, - "reason": "Video-input regression for Qwen VL models versus strict config blocking `granite_speech`; not the same issue." - }, - { - "left": "issue:44402", - "right": "issue:44451", - "accept": false, - "reason": "Different tokenizer/vocab-loading failures for different models." + "reason": "Video-input regression and strict-config loading are separate bugs." }, { "left": "issue:45292", "right": "issue:45335", "accept": true, - "reason": "Same resize_token_embeddings regression family: the generic output embedding bug and the t5gemma decoder embedding bug are both missing propagation to custom embedding modules." + "reason": "Same duplicate family: `resize_token_embeddings` failing to update the output/decoder embeddings." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Tokenizer vocab mismatch and model-loading failure are not the same concrete bug." }, { "left": "issue:43645", "right": "issue:44164", "accept": false, - "reason": "Notebook custom-model initialization breakage is unrelated to handling `extra_state` in save/load." + "reason": "Jupyter custom-model initialization and `extra_state` save/load handling are distinct issues." }, { "left": "issue:43257", "right": "issue:44863", "accept": false, - "reason": "Both are model-loading problems, but they involve different model families and different failure mechanisms." + "reason": "Different model-loading bugs affecting different architectures and failure points." }, { "left": "issue:43828", "right": "issue:43957", "accept": false, - "reason": "Autocast dtype mismatch for Phi-tiny-MoE is not the same bug as meta-device loading breakage." + "reason": "Autocast dtype mismatch is not the same as meta-device loading breakage." }, { "left": "issue:43425", "right": "issue:43828", "accept": false, - "reason": "Torch 2.10 compatibility and an autocast dtype mismatch are different problems." + "reason": "Torch version incompatibility is too broad; the other is a specific dtype mismatch under autocast." }, { "left": "issue:44589", "right": "issue:44756", "accept": false, - "reason": "Float8 storage lookup failure is unrelated to mmap/OOM behavior." + "reason": "Float8 storage lookup failure is unrelated to mmap/OOM on Strix Halo." }, { "left": "issue:39404", "right": "issue:45701", "accept": false, - "reason": "Whisper pipeline `return_language` regression and tokenization changes across versions are different issues." + "reason": "Whisper pipeline language return and tokenizer-version drift are unrelated." }, { "left": "issue:44479", "right": "issue:45663", "accept": false, - "reason": "Qwen VL video regression versus Gemma4 FSDP2 key error are unrelated." + "reason": "Video-input regression and Gemma4 FSDP2 KeyError are different bugs." }, { "left": "issue:43906", "right": "issue:44530", "accept": false, - "reason": "An isolated reproduction of another bug versus a cache crash on Qwen3.5 linear_attention; not the same defect." + "reason": "A reproduction of another issue is not enough to treat this as the same bug as the Qwen3.5 cache crash." }, { "left": "issue:43931", "right": "issue:44863", "accept": false, - "reason": "Both are loading errors, but they concern different model families and distinct shape-mismatch situations." + "reason": "Weight-shape mismatch for Qwen3-VL and NemotronH checkpoint loading are different failures." }, { "left": "issue:43611", "right": "issue:43645", "accept": false, - "reason": "`base_model_prefix` loading regression and Jupyter notebook custom-model initialization are separate problems." + "reason": "Both are Transformer 5 regressions around custom models, but the reported breakages are different." }, { "left": "issue:43425", "right": "issue:43716", "accept": false, - "reason": "Torch version incompatibility and Mistral-3 dtype mismatch are not the same bug." + "reason": "Torch incompatibility and image-preprocessor dtype mismatch are unrelated." }, { "left": "issue:44530", "right": "issue:45084", "accept": false, - "reason": "Cache `linear_attention` crash and template compilation failure are unrelated." + "reason": "Qwen3.5 linear-attention cache crash is unrelated to template-node compilation failure." }, { - "left": "issue:43901", - "right": "issue:44509", + "left": "issue:39404", + "right": "issue:44912", "accept": false, - "reason": "Both are docs-related, but one is about `return_all_scores` text and the other about removed pipeline tasks." + "reason": "Whisper pipeline behavior and MXFP4 quantization fallback are unrelated." }, { - "left": "issue:39404", - "right": "issue:44912", + "left": "issue:43901", + "right": "issue:44509", "accept": false, - "reason": "Whisper pipeline behavior and MXFP4 quantization loading are unrelated." + "reason": "Both are docs-related, but they refer to different deprecated/changed pipeline APIs." }, { "left": "issue:43299", "right": "issue:43931", - "accept": true, - "reason": "Both describe Qwen3-VL/MoE model loading failures with weight-shape mismatch symptoms under Transformers 5.x; they look like the same underlying loader regression." + "accept": false, + "reason": "Different Qwen3-VL loading regressions with different symptoms and likely different causes." }, { "left": "issue:44863", "right": "issue:45663", "accept": false, - "reason": "NemotronH checkpoint loading and Gemma4 FSDP2 `shared_kv_states` rebuilding are unrelated." + "reason": "NemotronH checkpoint loading and Gemma4 FSDP2 attention state handling are unrelated." }, { "left": "issue:44479", "right": "issue:44805", "accept": false, - "reason": "Qwen VL video input regression and a mask/tensor shape mismatch are different errors." + "reason": "Video regression and mask/tensor shape mismatch are not the same bug." }, { - "left": "issue:43257", - "right": "issue:45663", + "left": "issue:44265", + "right": "issue:44898", "accept": false, - "reason": "Qwen3 MOE loading under accelerate/deepspeed and Gemma4 FSDP2 attention state handling are unrelated." + "reason": "`torch.export` failing on `torch_compilable_check` is unrelated to Perceiver interpolation failure." }, { - "left": "issue:44265", - "right": "issue:44898", + "left": "issue:43257", + "right": "issue:45663", "accept": false, - "reason": "`torch.export.export` with `torch_compilable_check` is a different path from Perceiver resize/interpolate failures." + "reason": "Accelerate/deepspeed conversion failure and Gemma4 FSDP2 `KeyError` are different problems." }, { "left": "issue:43994", "right": "issue:44079", "accept": false, - "reason": "Incorrect SigLIP2 outputs and `ModelOutput` key assignment are different bugs." + "reason": "Incorrect predictions in a model/pipeline and `ModelOutput` key assignment are different failure modes." }, { "left": "issue:44671", "right": "issue:44991", "accept": false, - "reason": "CamemBERT masked-LM predictions and tokenizer loading for `EMBEDDIA/est-roberta` are unrelated." + "reason": "CamemBERT masked-LM prediction drift and tokenizer loading failure are not the same issue." }, { "left": "issue:39404", "right": "issue:44671", "accept": false, - "reason": "Whisper pipeline language-return regression and CamemBERT prediction regression are unrelated." + "reason": "Whisper pipeline language return and CamemBERT masked-LM predictions are unrelated." }, { "left": "issue:43425", "right": "issue:44918", "accept": false, - "reason": "Torch version incompatibility and Qwen3.5 input-embedding unpacking are different issues." + "reason": "Torch version compatibility and TRL embedding unpacking are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays mostly conservative. The resize_token_embeddings family is the clearest cluster, and the two accepted soft pairs are plausibly same-bug duplicates based on the titles. The rest are rejected without overclaiming. Minor caution: the Qwen3-VL pair is only plausibly duplicate from the available metadata, but the wording does not overstate beyond that." + "feedback": "The cluster summary is grounded in the packet: `issue:45292` is a reasonable canonical representative for the resize_token_embeddings duplicate family, and the two accepted soft edges (`45276`/`45292`, `45292`/`45335`) are conservative and well supported by the titles. The remaining soft-edge rejections stay appropriately cautious rather than overstating similarity." }, "evaluator_used": true, "retried": false, @@ -2324,269 +2327,269 @@ "issue:43856|issue:44756", "issue:43596|issue:44683", "issue:44589|issue:45084", - "issue:43425|issue:45685", "issue:40990|issue:42947", + "issue:43425|issue:45685", "issue:36032|issue:43452", - "issue:44991|issue:45081", "issue:32090|issue:39404", + "issue:44991|issue:45081", "issue:43957|issue:44164", - "issue:43582|issue:43632", - "issue:43883|issue:44488", "issue:44164|issue:44479", + "issue:43582|issue:43632", "issue:44488|issue:45081", "issue:40990|issue:43421", + "issue:43883|issue:44488", "issue:43611|issue:43644", "issue:44488|issue:44987", "issue:44206|issue:44220", "issue:44811|issue:45584", - "issue:43883|issue:44991", "issue:43716|issue:45084", "issue:34567|issue:40444", "issue:43643|issue:44530", + "issue:43883|issue:44991", "issue:43644|issue:43646", - "issue:43452|issue:43482", "issue:44451|issue:44987", + "issue:43452|issue:43482", "issue:36032|issue:42915", "issue:43317|issue:43482", "issue:43541|issue:45084", "issue:42915|issue:44530", "issue:44849|issue:45325", "issue:43404|issue:44466", - "issue:44987|issue:44991", - "issue:36032|issue:43645" + "issue:36032|issue:43645", + "issue:44987|issue:44991" ], - "prepared_review_unit_hash": "f3d8285cff194fedcd75f4857fecfcbc87cedd4736398d7b53187aa31e14fd97", + "prepared_review_unit_hash": "3096386bb432f83b1874cdf837b0c91b74b160955fc153c82991144295a23679", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 33, - "serialized_chars": 36986, - "estimated_input_tokens": 9247, - "estimated_eval_tokens": 18750 + "serialized_chars": 36982, + "estimated_input_tokens": 9246, + "estimated_eval_tokens": 18748 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a heterogeneous set of Transformers v5 regressions, especially around model/tokenizer loading and initialization. Only one soft pair looks like a true duplicate; the rest are related-by-theme but not the same bug.", - "confidence": 0.82, - "canonical_issue_reason": "issue:43611 is the broadest and most representative loading-regression report in this cluster, covering a core v5 model-loading breakage that sits near several other init/load complaints.", + "summary": "This cluster is mostly a grab bag of unrelated bug reports, with a small number of Transformers v5 loading regressions. Only one soft pair looks like the same underlying bug: the gguf_file/from_pretrained loading failure.", + "confidence": 0.81, + "canonical_issue_reason": "Issue 43452 is the broadest and clearest description of the gguf_file loading regression, and it naturally covers the model-specific follow-up in 43482.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43611 is the best single issue to anchor the cluster because it describes a central, generic Transformers v5 loading failure rather than a model-specific symptom.", + "best_issue_reason": "43452 is the best representative issue for this cluster because it states the core loading failure in a generic way and can subsume narrower duplicates.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43856", "right": "issue:44756", "accept": false, - "reason": "Both mention memory, but one is Qwen3 MoE training efficiency and the other is Strix Halo mmap OOM; different code paths and fixes." + "reason": "Different problems: Qwen3 MoE training memory use vs Strix Halo mmap OOM avoidance." }, { "left": "issue:43596", "right": "issue:44683", "accept": false, - "reason": "Deepspeed/Zero3 Bert loading is unrelated to compiled flex_attention failing on newer torch." + "reason": "BertModel/zero3 init IndexError is unrelated to torch>=2.9 flex_attention compilation." }, { "left": "issue:44589", "right": "issue:45084", "accept": false, - "reason": "Different failures: a float8 storage lookup error versus a template-node compilation error." - }, - { - "left": "issue:43425", - "right": "issue:45685", - "accept": false, - "reason": "Torch 2.10 incompatibility is a different problem from an MPS int histogram kernel missing on MoE." + "reason": "Float8 storage lookup error and template-node compile error are different failure modes." }, { "left": "issue:40990", "right": "issue:42947", "accept": false, - "reason": "Perplexity on gpt-oss and ineffective gradient checkpointing with LoRA are unrelated bugs." + "reason": "Perplexity regression on gpt-oss is unrelated to gradient checkpointing/LoRA behavior." }, { - "left": "issue:36032", - "right": "issue:43452", + "left": "issue:43425", + "right": "issue:45685", "accept": false, - "reason": "Tokenizer method-name conflict is unrelated to gguf_file loading failures." + "reason": "Torch version incompatibility and an MPS histogram kernel missing for Int are not the same bug." }, { - "left": "issue:44991", - "right": "issue:45081", + "left": "issue:36032", + "right": "issue:43452", "accept": false, - "reason": "Both are tokenizer load regressions, but they involve different models and different failure causes." + "reason": "Tokenizer special-token method conflict vs gguf_file/from_pretrained loading breakage." }, { "left": "issue:32090", "right": "issue:39404", "accept": false, - "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper return_language pipeline behavior." + "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper return_language pipeline failure." }, { - "left": "issue:43957", - "right": "issue:44164", + "left": "issue:44991", + "right": "issue:45081", "accept": false, - "reason": "Meta-device loading issues and extra_state save/load handling are different regressions." + "reason": "Generic tokenizer load failure and Mistral regex patch crash are different code paths." }, { - "left": "issue:43582", - "right": "issue:43632", + "left": "issue:43957", + "right": "issue:44164", "accept": false, - "reason": "AppleSilicon allocator warmup TypeError is not the same as the _is_hf_initialized flag regression." + "reason": "torch.device(meta) loading problems are not the same as extra_state save/from_pretrained handling." }, { - "left": "issue:43883", - "right": "issue:44488", + "left": "issue:44164", + "right": "issue:44479", "accept": false, - "reason": "Missing all_tied_weights_keys for Molmo is unrelated to loading a specific BERT model." + "reason": "extra_state serialization and Qwen video-input regression are unrelated." }, { - "left": "issue:44164", - "right": "issue:44479", + "left": "issue:43582", + "right": "issue:43632", "accept": false, - "reason": "extra_state serialization and Qwen video-input regressions do not share the same bug." + "reason": "Apple Silicon allocator warmup TypeError is unrelated to the _is_hf_initialized flag regression." }, { "left": "issue:44488", "right": "issue:45081", "accept": false, - "reason": "Both are load-time failures, but one is a BERT model load and the other is a Mistral tokenizer regex patch crash." + "reason": "Both mention loading, but one is a model-load failure and the other is a specific Mistral regex patch crash." }, { "left": "issue:40990", "right": "issue:43421", "accept": false, - "reason": "Perplexity regression and tokenizer backend post-processor updating are unrelated." + "reason": "Perplexity on gpt-oss is unrelated to runtime special-token post-processor updates." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Missing all_tied_weights_keys on Molmo is unrelated to loading cjvt/sleng-bert." }, { "left": "issue:43611", "right": "issue:43644", "accept": false, - "reason": "Both are v5 loading regressions, but one is base_model_prefix loading and the other is non-persistent buffer initialization; too broad to treat as one bug." + "reason": "Both are v5 regressions, but base_model_prefix loading and non-persistent-buffer initialization are different bugs." }, { "left": "issue:44488", "right": "issue:44987", "accept": false, - "reason": "Different models and different loading failures; no clear shared concrete bug." + "reason": "Different model-loading failures on different models; no shared concrete bug." }, { "left": "issue:44206", "right": "issue:44220", - "accept": true, - "reason": "Both point to the same audio feature-extraction path: _torch_extract_fbank_features causing a crash via an unsupported center argument." + "accept": false, + "reason": "Unsupported center arg in feature extractor and _torch_extract_fbank_features() issue are distinct audio regressions." }, { "left": "issue:44811", "right": "issue:45584", "accept": false, - "reason": "Both involve Whisper, but one is batch_decode skip_special_tokens handling and the other is empty-transcription generation after align_special_tokens." - }, - { - "left": "issue:43883", - "right": "issue:44991", - "accept": false, - "reason": "Different model-loading issues with no evidence of the same underlying cause." + "reason": "Both are Whisper-related, but batch_decode skip_special_tokens and empty-transcription generation are separate issues." }, { "left": "issue:43716", "right": "issue:45084", "accept": false, - "reason": "Mistral-3 dtype mismatch and template-node compilation error are unrelated." + "reason": "Mistral-3 dtype mismatch is unrelated to the template-node compilation error." }, { "left": "issue:34567", "right": "issue:40444", "accept": false, - "reason": "Trainer token counting and Qwen2.5-VL iterable dataset multi-image failure are separate problems." + "reason": "TrainerState token counting and Qwen2.5-VL multi-image IterableDataset failure are unrelated." }, { "left": "issue:43643", "right": "issue:44530", "accept": false, - "reason": "AutoConfig missing fields with trust_remote_code is unrelated to Qwen3.5 paged attention cache crashes." + "reason": "trust_remote_code field loss is unrelated to PagedAttentionCache's linear_attention group error." }, { - "left": "issue:43644", - "right": "issue:43646", + "left": "issue:43883", + "right": "issue:44991", "accept": false, - "reason": "Both are v5 initialization regressions, but one is junk buffers and the other is custom model init; not the same concrete bug." + "reason": "Molmo tied-weights attribute error and est-roberta tokenizer loading failure do not share a concrete root cause." }, { - "left": "issue:43452", - "right": "issue:43482", + "left": "issue:43644", + "right": "issue:43646", "accept": false, - "reason": "Both concern GGUF loading, but one is a generic gguf_file API break and the other is a specific Qwen2.5-GGUF failure; too speculative to merge." + "reason": "Non-persistent buffer junk and custom model initialization are related only at a high level, not the same bug." }, { "left": "issue:44451", "right": "issue:44987", "accept": false, - "reason": "Different model load failures with no clear shared code-path bug." + "reason": "Two different model-loading failures with different affected models and symptoms." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": true, + "reason": "Both point to the same gguf_file/from_pretrained loading regression; 43482 is the model-specific manifestation of 43452." }, { "left": "issue:36032", "right": "issue:42915", "accept": false, - "reason": "Tokenizer method conflict and Qwen3Moe FP8 training failure are unrelated." + "reason": "Tokenizer special-token conflict is unrelated to FineGrainedFP8Config failure in Qwen3Moe." }, { "left": "issue:43317", "right": "issue:43482", "accept": false, - "reason": "Dequantized model offload loading and GGUF loading failure are different issues." + "reason": "device_map=auto dequantized-model offload failure is unrelated to gguf loading breakage." }, { "left": "issue:43541", "right": "issue:45084", "accept": false, - "reason": "Torch dynamo grouped_mm tracing failure is unrelated to template-node compilation." + "reason": "Torch Dynamo grouped_mm tracing error is unrelated to the template-node compile issue." }, { "left": "issue:42915", "right": "issue:44530", "accept": false, - "reason": "Both mention Qwen/MoE, but they fail in different subsystems and with different causes." + "reason": "FineGrainedFP8Config training failure and Qwen3.5 PagedAttentionCache crash are different bugs." }, { "left": "issue:44849", "right": "issue:45325", "accept": false, - "reason": "Qwen3.5 hidden-states output and Qwen2.5-VL rope index scaling are separate regressions." + "reason": "Qwen3.5 hidden-state output bug and Qwen2.5-VL rope-index scaling bug are separate model-specific regressions." }, { "left": "issue:43404", "right": "issue:44466", "accept": false, - "reason": "Mistral3 weight-tying bug is unrelated to inconsistent lm_head.weight serialization." + "reason": "Missing tied weights on Mistral3ForConditionalGeneration is not the same as device-dependent serialization inconsistency." }, { - "left": "issue:44987", - "right": "issue:44991", + "left": "issue:36032", + "right": "issue:43645", "accept": false, - "reason": "Both are loading failures on different models, but there is no evidence they share the same defect." + "reason": "Tokenizer method-name conflict and custom model initialization in notebooks are unrelated." }, { - "left": "issue:36032", - "right": "issue:43645", + "left": "issue:44987", + "right": "issue:44991", "accept": false, - "reason": "Tokenizer method conflict and custom model initialization in notebooks are not the same bug." + "reason": "Both are load failures, but for different models and likely different root causes." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative about the soft edges. The single accepted pair (44206/44220) is plausibly the same audio feature-extraction regression, and the rejected pairs are generally justified as distinct failure modes or at least not clearly mergeable." + "feedback": "Grounded overall: the cluster is mostly heterogeneous bug reports, and the only clearly same-bug soft pair is 43452/43482 around the gguf_file/from_pretrained loading regression. The other soft-edge rejections are conservative and do not overclaim shared root cause." }, "evaluator_used": true, "retried": false, @@ -2658,253 +2661,253 @@ ], "soft_pairs": [ "issue:43262|issue:43377", - "issue:43646|issue:44534", "issue:45084|issue:45507", + "issue:43646|issue:44534", "issue:43994|issue:44610", "issue:43645|issue:44534", "issue:43322|issue:43540", "issue:44403|issue:45081", "issue:45216|issue:45357", - "issue:41553|issue:43582", "issue:43377|issue:43819", - "issue:44220|issue:44683", "issue:44451|issue:45081", - "issue:43404|issue:45127", - "issue:44265|issue:44610", + "issue:44220|issue:44683", "issue:42915|issue:45084", - "issue:43632|issue:44756", + "issue:41553|issue:43582", + "issue:44265|issue:44610", + "issue:43404|issue:45127", "issue:44987|issue:45081", + "issue:43632|issue:44756", "issue:44479|issue:45325", "issue:44062|issue:44987", "issue:42915|issue:44291", "issue:43296|issue:43531", - "issue:43611|issue:44534", "issue:36032|issue:43742", "issue:40444|issue:45325", - "issue:43317|issue:43856", - "issue:44079|issue:45663", + "issue:43611|issue:44534", "issue:43531|issue:44877", "issue:42915|issue:44589", + "issue:43317|issue:43856", + "issue:44079|issue:45663", "issue:40990|issue:44811", "issue:42175|issue:45542" ], - "prepared_review_unit_hash": "0e56d02194b4e730e4837f4af9a6c7ccc555931b8c5587e1c719826c2f8e87e5", + "prepared_review_unit_hash": "dc22133b70acc0017f1619be1140e00d479f719889c090ef1fe4264244108e23", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36654, - "estimated_input_tokens": 9164, - "estimated_eval_tokens": 18584 + "serialized_chars": 36662, + "estimated_input_tokens": 9166, + "estimated_eval_tokens": 18588 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a loose grouping of Transformers v5 regression reports, but only a couple of pairs look like true duplicates. Most pairs are different bugs in adjacent areas (model loading, audio/video preprocessing, quantization, packaging) and should stay separate.", - "confidence": 0.78, - "canonical_issue_reason": "issue:43646 is the broadest and most representative of the shared theme in this cluster: Transformers 5 breaking custom model initialization/loading. It captures the main regression family better than the narrower model-specific reports.", + "summary": "Mostly heterogeneous issue set. The only duplicate-style pair I\u2019d accept is the Qwen3.5 save_pretrained regression (45216/45357). The Qwen VL/video and Qwen2.5-VL position-id pairs are related by theme but are still distinct bugs, so they should stay separate.", + "confidence": 0.64, + "canonical_issue_reason": "Issue:45216 is the best anchor because it has the only clear duplicate-style companion in the packet (45357) and its wording is the broader checkpoint-regression report; the rest of the set is too mixed to justify a stronger canonical choice.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43646 is the best single issue to anchor this cluster because it is generic, centrally phrased, and aligns with several nearby v5 init/load regressions; it is the most useful umbrella among otherwise mixed reports.", + "best_issue_reason": "Issue:45216 is the most suitable representative issue overall: it matches the single accepted issue-duplicate pair and describes the broadest concrete failure in that mini-cluster. The remaining issues are mostly unrelated regressions across different models and subsystems.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43262", "right": "issue:43377", "accept": false, - "reason": "Different audio bugs: one is a 16kHz default in apply_chat_template, the other is batched-vs-single MIMI output mismatch from missing padding-mask support." + "reason": "Different bugs: audio chat-template sampling-rate handling vs MIMI batched-output mismatch from missing padding-mask support." }, { - "left": "issue:43646", - "right": "issue:44534", + "left": "issue:45084", + "right": "issue:45507", "accept": false, - "reason": "Both involve Transformers v5 regressions, but the underlying bugs differ: custom model initialization vs non-persistent buffers being filled with junk." + "reason": "Different code paths and failures; same broad model/workflow theme is not enough to merge." }, { - "left": "issue:45084", - "right": "issue:45507", + "left": "issue:43646", + "right": "issue:44534", "accept": false, - "reason": "Unrelated failures in different code paths; no shared concrete bug or fix surface is evident from the titles." + "reason": "Custom model initialization regression vs non-persistent buffer corruption are distinct bugs." }, { "left": "issue:43994", "right": "issue:44610", "accept": false, - "reason": "Both are vision-related, but they describe different model/processor mismatches and likely different root causes." + "reason": "Unrelated failures: bad inference results vs processor/model image-size mismatch." }, { "left": "issue:43645", "right": "issue:44534", "accept": false, - "reason": "Jupyter/custom-model initialization regression is distinct from the non-persistent-buffer corruption bug." + "reason": "Notebook custom-model init bug and junk-filled non-persistent buffers are not the same defect." }, { "left": "issue:43322", "right": "issue:43540", "accept": false, - "reason": "Different multimodal loading/runtime failures on different model families; no common underlying code-path is clear." + "reason": "Different multimodal loading/video crashes with different triggers and code paths." }, { "left": "issue:44403", "right": "issue:45081", "accept": false, - "reason": "A generic loading noise issue and a Mistral regex patch crash are not the same bug." + "reason": "Generic load noise vs tokenizer regex patch crash; no shared concrete bug." }, { "left": "issue:45216", "right": "issue:45357", "accept": true, - "reason": "Both report the same Qwen3.5 save_pretrained regression producing incorrect checkpoints/visual encoder keys across nearby versions." + "reason": "Same Qwen3.5 save_pretrained regression; 45357 looks like the narrower report on incorrect visual encoder keys within the same failure." }, { - "left": "issue:41553", - "right": "issue:43582", + "left": "issue:43377", + "right": "issue:43819", "accept": false, - "reason": "Bad AutoTokenizer error handling for Voxtral and Apple Silicon caching_allocator_warmup TypeError are unrelated." + "reason": "Both touch audio models, but padding-mask mismatch and DAC from_latents/forward mismatch are different bugs." }, { - "left": "issue:43377", - "right": "issue:43819", + "left": "issue:44451", + "right": "issue:45081", "accept": false, - "reason": "Both concern audio models, but one is a padding-mask batching mismatch and the other is a DAC latent/STE inconsistency." + "reason": "Different models and failures; no evidence of the same underlying issue." }, { "left": "issue:44220", "right": "issue:44683", "accept": false, - "reason": "Different subsystems: fbank feature extraction vs compiled flex_attention." + "reason": "Audio feature extraction issue vs compiled flex_attention failure on newer torch are unrelated." }, { - "left": "issue:44451", - "right": "issue:45081", + "left": "issue:42915", + "right": "issue:45084", "accept": false, - "reason": "Loading ScandiBERT and Mistral regex patching are different model-specific problems." + "reason": "Both are regressions, but they describe different concrete failures and fixes." }, { - "left": "issue:43404", - "right": "issue:45127", + "left": "issue:41553", + "right": "issue:43582", "accept": false, - "reason": "Weight tying in Mistral3ForConditionalGeneration is not the same as LoRA merge collapse with extended vocab." + "reason": "AutoTokenizer error handling and Apple Silicon allocator TypeError are unrelated." }, { "left": "issue:44265", "right": "issue:44610", "accept": false, - "reason": "torch.export exportability and processor image-size mismatch are unrelated." + "reason": "torch.export/torch_compilable_check failure is unrelated to the OmDet processor image-size regression." }, { - "left": "issue:42915", - "right": "issue:45084", + "left": "issue:43404", + "right": "issue:45127", "accept": false, - "reason": "Quantization/config failure for Qwen3MoE is unrelated to template compilation errors." + "reason": "Both involve embeddings/weights, but they are different concrete bugs on different models and workflows." }, { - "left": "issue:43632", - "right": "issue:44756", + "left": "issue:44987", + "right": "issue:45081", "accept": false, - "reason": "_is_hf_initialized flag regression and Strix Halo mmap OOM are separate issues." + "reason": "Specific model loading failure vs Mistral tokenizer regex patch crash; not the same bug." }, { - "left": "issue:44987", - "right": "issue:45081", + "left": "issue:43632", + "right": "issue:44756", "accept": false, - "reason": "Loading physical-intelligence/fast and patching the Mistral regex are different failures." + "reason": "Transformers v5 flag regression vs mmap/OOM workaround request; no shared code-path bug." }, { "left": "issue:44479", "right": "issue:45325", - "accept": true, - "reason": "These point to the same Qwen2.5-VL/Qwen3-VL temporal-position regression: a broad video-input regression and its specific rope-index scaling cause." + "accept": false, + "reason": "Related Qwen-VL/position-id theme, but one is a video-input regression and the other is a still-image temporal position-id scaling bug; too different to accept as a duplicate." }, { "left": "issue:44062", "right": "issue:44987", "accept": false, - "reason": "AddedToken special-argument duplication is unrelated to the physical-intelligence/fast loading failure." + "reason": "Tokenizer AddedToken keyword conflict is unrelated to loading a specific model." }, { "left": "issue:42915", "right": "issue:44291", "accept": false, - "reason": "FineGrainedFP8Config failure and init_empty_weights unexpected _is_hf_initialized are different regressions." + "reason": "Both are v5-era loading problems, but the concrete failures differ." }, { "left": "issue:43296", "right": "issue:43531", "accept": false, - "reason": "PaddleOCR-VL load failure in vLLM and Qwen3-MoE sliding_window behavior are unrelated." - }, - { - "left": "issue:43611", - "right": "issue:44534", - "accept": false, - "reason": "base_model_prefix loading breakage is a different bug from non-persistent buffer corruption." + "reason": "Different models, modalities, and error mechanisms." }, { "left": "issue:36032", "right": "issue:43742", "accept": false, - "reason": "T5Tokenizer add_special_tokens conflict and MobileLLM key error are different tokenizer/model-load failures." + "reason": "Tokenizer special-token conflict vs MobileLLM key error are not the same bug." }, { "left": "issue:40444", "right": "issue:45325", "accept": false, - "reason": "IterableDataset multi-image finetuning failure is not the same as the Qwen2.5-VL rope-index temporal scaling bug." - }, - { - "left": "issue:43317", - "right": "issue:43856", - "accept": false, - "reason": "device_map offload/dequantized-model loading and Qwen3 MoE memory usage are distinct performance/loading issues." + "reason": "Multi-image finetuning failure vs still-image temporal position-id scaling bug; only a broad Qwen-VL overlap." }, { - "left": "issue:44079", - "right": "issue:45663", + "left": "issue:43611", + "right": "issue:44534", "accept": false, - "reason": "ModelOutput key assignment logic and Gemma4 shared_kv_states/FSDP2 KeyError are different code paths." + "reason": "Different Transformers v5 regressions affecting different internals." }, { "left": "issue:43531", "right": "issue:44877", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior and granite_speech config strictness are unrelated." - }, - { - "left": "issue:42915", - "right": "issue:44589", - "accept": false, - "reason": "Qwen3MoE FP8 config failure and missing Float8 storage object are not the same concrete bug." + "reason": "Different model/config failures with no shared concrete defect." }, { "left": "issue:40990", "right": "issue:44811", "accept": false, - "reason": "High perplexity on gpt-oss and Whisper batch_decode skip_special_tokens handling are unrelated." + "reason": "Perplexity anomaly and Whisper batch_decode skip_special_tokens handling are unrelated." }, { "left": "issue:42175", "right": "issue:45542", "accept": false, - "reason": "Both touch TensorFlow/package setup, but one is missing tf backend in the torch extra while the other is an undefined tf-backend error when only tensorboard is installed." + "reason": "Both concern TensorFlow backend packaging, but the triggers and failure modes differ." + }, + { + "left": "issue:44589", + "right": "issue:44987", + "accept": false, + "reason": "Missing Float8 storage type error vs model-loading failure are unrelated." + }, + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "GPU/CPU offload loading failure and MoE memory inefficiency are different bugs." + }, + { + "left": "issue:44079", + "right": "issue:45663", + "accept": false, + "reason": "ModelOutput key-assignment bug vs Gemma4 FSDP2 shared_kv_states KeyError are unrelated." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is well grounded in the packet: issue:43646 is a reasonable umbrella for the Transformers v5 custom-init/load regression family, and the overall assessment stays appropriately cautious. The accepted soft-edge pairs are the only ones with clear title-level overlap, while the rest are conservatively rejected as distinct bugs." + "accept": false, + "feedback": "The write-up is mostly careful, but one soft-edge accept is too strong for the packet: issue:44479 vs issue:45325 only share a broad Qwen-VL / position-id theme from the titles, while one is specifically a video-input regression and the other is a still-image temporal position-id scaling bug. That is enough for a tentative similarity, not a confident duplicate-style accept. Because the evaluation must stay conservative, I can\u2019t accept this clustering as written." }, "evaluator_used": true, - "retried": false, + "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -2972,251 +2975,258 @@ ], "soft_pairs": [ "issue:43818|issue:45357", - "issue:43421|issue:44568", - "issue:43632|issue:44534", "issue:43425|issue:44403", + "issue:43632|issue:44534", + "issue:43421|issue:44568", "issue:36032|issue:41720", "issue:44683|issue:45020", - "issue:44829|issue:45479", "issue:33666|issue:40444", + "issue:44829|issue:45479", "issue:43482|issue:45538", "issue:44403|issue:44991", "issue:43792|issue:44589", "issue:44079|issue:44534", + "issue:43482|issue:43525", "issue:35707|issue:40444", "issue:43377|issue:45491", - "issue:43482|issue:43525", - "issue:43749|issue:44079", "issue:44291|issue:44589", + "issue:43749|issue:44079", "issue:43644|issue:44079", "issue:40444|issue:45381", - "issue:43881|issue:44488", "issue:45325|issue:45381", "issue:44265|issue:45072", "issue:41720|issue:45081", "issue:43632|issue:45072", "issue:43531|issue:44291", - "issue:43611|issue:43646", + "issue:43881|issue:44488", "issue:42175|issue:43296", + "issue:43611|issue:43646", "issue:43994|issue:44898", "issue:43454|issue:44466", "issue:42915|issue:43482", - "issue:43828|issue:44683" + "issue:43828|issue:44683", + "issue:43525|issue:44589" ], - "prepared_review_unit_hash": "c6aa746e40892992b5a0de34d2a9fc1f0498aed5421eebf494c928a3201c7fe8", + "prepared_review_unit_hash": "d0928b90e396923581288ed0ef5aa1973c06b17d38113f7f9e82ee4b6c111383", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, - "soft_pair_count": 31, - "serialized_chars": 36554, - "estimated_input_tokens": 9139, - "estimated_eval_tokens": 18534 + "soft_pair_count": 32, + "serialized_chars": 36857, + "estimated_input_tokens": 9215, + "estimated_eval_tokens": 18686 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a grab-bag of transformers v5 regression reports across different models and subsystems. No soft pair looks like a safe duplicate with the same concrete bug; the closest ones are still too model-specific or symptom-specific to merge.", - "confidence": 0.84, - "canonical_issue_reason": "issue:45020 is the broadest and most representative issue: it frames the cluster as a general regression class affecting recent transformers versions and remote-code models, which matches the cluster\u2019s overall theme better than the narrower model-specific reports.", + "summary": "This is a heterogeneous cluster of mostly unrelated bug reports, with a loose theme around Transformers v5 regressions and model/tokenizer edge cases. I don\u2019t see any soft-edge pair that is safely the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": "No true duplicate hub stands out; issue 43632 is the closest representative of the broad core-initialization regression theme, but the cluster is too mixed to treat it as a canonical duplicate.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 is the best global issue candidate because it is open, high-level, and can plausibly serve as the umbrella for the surrounding v5 compatibility regressions.", + "best_issue_reason": "Issue 43632 is the most representative single report of the cluster\u2019s main Transformers v5 initialization/loading regression pattern, though it still shouldn\u2019t absorb the rest as duplicates.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43818", "right": "issue:45357", "accept": false, - "reason": "Different models and different failure modes: Video-LLaVA missing temporal attention/weight tying vs Qwen3.5 visual encoder key serialization." + "reason": "Both involve Qwen multimodal models, but one is a Video-LLaVA tower bug and the other is Qwen3.5 save_pretrained key serialization; different failures." }, { - "left": "issue:43421", - "right": "issue:44568", + "left": "issue:43425", + "right": "issue:44403", "accept": false, - "reason": "Both involve tokenizers, but one is runtime post-processor updates and the other is BOS/EOS insertion for a specific tokenizer; not the same bug." + "reason": "Torch 2.10 compatibility is unrelated to generic loading noise; no shared code-path bug." }, { "left": "issue:43632", "right": "issue:44534", "accept": false, - "reason": "One is about the `_is_hf_initialized` flag, the other about non-persistent buffers being filled with junk; related era, different problem." + "reason": "Both are Transformers v5 regressions, but one is `_is_hf_initialized` handling and the other is non-persistent buffer junk; distinct bugs." }, { - "left": "issue:43425", - "right": "issue:44403", + "left": "issue:43421", + "right": "issue:44568", "accept": false, - "reason": "Torch version incompatibility and loading noise are unrelated." + "reason": "Both touch special-token behavior, but runtime post-processor updates and tokenizer `add_special_tokens` BOS/EOS insertion are different issues." }, { "left": "issue:36032", "right": "issue:41720", "accept": false, - "reason": "Tokenizer method conflict vs Qwen3 device-mapping CUDA assert; no shared code-path." + "reason": "Tokenizer method conflict and Qwen3 auto device mapping assert are unrelated." }, { "left": "issue:44683", "right": "issue:45020", "accept": false, - "reason": "Compiled flex_attention on torch >=2.9 is a torch/attention issue, not the remote_code loading regression in 45020." + "reason": "Compiled flex_attention on torch>=2.9 and remote_code model breakage are separate problems." }, { - "left": "issue:44829", - "right": "issue:45479", + "left": "issue:33666", + "right": "issue:40444", "accept": false, - "reason": "Both are training regressions, but one is flash_attention_3 behavior and the other is single-label classification loss handling; different bugs." + "reason": "Both are multimodal training issues, but multi-GPU training and iterable-dataset multi-image failures are different code paths." }, { - "left": "issue:33666", - "right": "issue:40444", + "left": "issue:44829", + "right": "issue:45479", "accept": false, - "reason": "Both are Qwen VL training reports, but one is multi-GPU training and the other is IterableDataset with multiple images per prompt." + "reason": "Both cause bad training behavior, but flash_attention_3 degeneration and single-label zero-loss classification are distinct bugs." }, { "left": "issue:43482", "right": "issue:45538", "accept": false, - "reason": "GGUF loading under transformers v5 vs CLIPTokenizer max length behavior are unrelated." + "reason": "GGUF loading in v5 and CLIPTokenizer max length are unrelated." }, { "left": "issue:44403", "right": "issue:44991", "accept": false, - "reason": "Unnecessary loading noise is not the same as tokenizer loading failure for a specific model." + "reason": "Generic loading noise and tokenizer loading failure for a specific model are not the same bug." }, { "left": "issue:43792", "right": "issue:44589", "accept": false, - "reason": "Whisper loading failure and Float8 storage lookup failure are separate errors." + "reason": "Whisper loading/runtime failure and Float8 storage lookup are different failures." }, { "left": "issue:44079", "right": "issue:44534", "accept": false, - "reason": "ModelOutput key assignment and non-persistent buffer junk are different internal behaviors." + "reason": "ModelOutput key assignment and buffer serialization junk are unrelated." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Qwen2.5-GGUF loading and missing `pad_token_id` on Llama4Config are separate issues." }, { "left": "issue:35707", "right": "issue:40444", "accept": false, - "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL multi-image IterableDataset finetuning." + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to iterable-dataset multi-image finetuning." }, { "left": "issue:43377", "right": "issue:45491", "accept": false, - "reason": "Both are batching/padding-related, but the former is encoder padding-mask support and the latter is Gemma3 sliding-window NaNs; not the same bug." + "reason": "Both are batching/padding-related, but MIMI padding-mask mismatch and Gemma3 sliding-window NaNs are different model/path bugs." }, { - "left": "issue:43482", - "right": "issue:43525", + "left": "issue:44291", + "right": "issue:44589", "accept": false, - "reason": "Model loading failure in GGUF and missing `pad_token_id` on Llama4Config are unrelated." + "reason": "Unexpected `_is_hf_initialized` arg during `init_empty_weights` is unrelated to Float8 storage resolution." }, { "left": "issue:43749", "right": "issue:44079", "accept": false, - "reason": "FSDP CPU RAM-efficient loading and ModelOutput key assignment do not share the same bug." - }, - { - "left": "issue:44291", - "right": "issue:44589", - "accept": false, - "reason": "`init_empty_weights` argument handling and Float8 storage resolution are different failure points." + "reason": "FSDP CPU RAM-efficient loading and ModelOutput key bookkeeping are unrelated." }, { "left": "issue:43644", "right": "issue:44079", "accept": false, - "reason": "Non-persistent buffer serialization and ModelOutput key bookkeeping are unrelated." + "reason": "Non-persistent buffer serialization junk and ModelOutput key assignment are different code paths." }, { "left": "issue:40444", "right": "issue:45381", "accept": false, - "reason": "Qwen2.5-VL multi-image finetuning failure vs Qwen2.5-VL video vision_position_ids bug are distinct symptoms." - }, - { - "left": "issue:43881", - "right": "issue:44488", - "accept": false, - "reason": "GLM-4V loading failure and sleng-bert tokenizer load failure are unrelated." + "reason": "Multi-image iterable finetuning failure and Qwen2.5-VL video position ids are unrelated." }, { "left": "issue:45325", "right": "issue:45381", "accept": false, - "reason": "Both are Qwen2.5-VL position-id regressions, but one is still-image temporal scaling and the other is video input position_ids; not clearly the same concrete bug." + "reason": "Both are Qwen2.5-VL position-id bugs, but one is still-image temporal scaling and the other is video vision_position_ids; not enough to call the same concrete bug." }, { "left": "issue:44265", "right": "issue:45072", "accept": false, - "reason": "torch.export/torch_compilable_check is unrelated to bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "torch.export/export-check failures and bf16 dtype mismatches are different issues." }, { "left": "issue:41720", "right": "issue:45081", "accept": false, - "reason": "Qwen3 auto device-mapping crash and Mistral regex patch failure are different code paths." + "reason": "Qwen3 device-mapping assert and Mistral tokenizer regex crash are unrelated." }, { "left": "issue:43632", "right": "issue:45072", "accept": false, - "reason": "_is_hf_initialized regressions and bfloat16 dtype mismatches are unrelated." + "reason": "Core v5 init flag regression and bf16 dtype mismatch are unrelated." }, { "left": "issue:43531", "right": "issue:44291", "accept": false, - "reason": "Sliding-window behavior in Qwen3-MoE does not match init_empty_weights/_is_hf_initialized loading issues." + "reason": "Qwen3-MoE sliding-window behavior and init_empty_weights `_is_hf_initialized` handling are not the same bug." }, { - "left": "issue:43611", - "right": "issue:43646", + "left": "issue:43881", + "right": "issue:44488", "accept": false, - "reason": "Both are broad v5 regressions, but one is about `base_model_prefix` loading and the other about custom model initialization; too different to treat as the same bug." + "reason": "GLM-4V loading failure and a specific tokenizer load failure are unrelated." }, { "left": "issue:42175", "right": "issue:43296", "accept": false, - "reason": "Packaging/backend dependency issue vs vLLM model-loading failure are unrelated." + "reason": "Packaging/backend inclusion and vLLM PaddleOCR-VL loading are different issues." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 model-init regressions, but base_model_prefix loading failure and custom initialization breakage are not clearly the same concrete bug." }, { "left": "issue:43994", "right": "issue:44898", "accept": false, - "reason": "Different models and different failure mechanisms: SigLIP pipeline outputs vs Perceiver non-default resolution handling." + "reason": "SigLIP2 nonsense outputs and Perceiver non-default resolution failure are unrelated vision-model bugs." }, { "left": "issue:43454", "right": "issue:44466", "accept": false, - "reason": "Both mention tied lm_head weights, but one is a missing tie causing bad generation and the other is inconsistent serialization; not the same concrete bug." + "reason": "lm_head tie failure causing bad generation and device-dependent serialization inconsistency are related in theme but not the same bug." }, { "left": "issue:42915", "right": "issue:43482", "accept": false, - "reason": "Qwen3Moe FP8 config failure and Qwen2.5-GGUF loading failure are unrelated." + "reason": "Qwen3Moe FineGrainedFP8 failure and Qwen2.5-GGUF loading failure are unrelated." }, { "left": "issue:43828", "right": "issue:44683", "accept": false, - "reason": "Autocast dtype mismatch in Phi-tiny-MoE is unrelated to compiled flex_attention failures on torch >=2.9." + "reason": "Autocast dtype mismatch on Phi-tiny-MoE and compiled flex_attention on torch>=2.9 are different problems." + }, + { + "left": "issue:43525", + "right": "issue:44589", + "accept": false, + "reason": "Missing `pad_token_id` and Float8 storage lookup errors are unrelated." } ] }, @@ -3251,8 +3261,8 @@ "issue:43317", "issue:43322", "issue:43377", + "issue:43404", "issue:43454", - "issue:43525", "issue:43526", "issue:43550", "issue:43606", @@ -3273,12 +3283,12 @@ "issue:44387", "issue:44466", "issue:44534", - "issue:44589", "issue:44610", "issue:44617", "issue:44683", "issue:44877", "issue:44898", + "issue:44977", "issue:44991", "issue:45072", "issue:45325", @@ -3291,18 +3301,17 @@ ], "soft_pairs": [ "issue:44387|issue:44617", - "issue:43525|issue:44589", "issue:43792|issue:43957", - "issue:43526|issue:43883", "issue:43701|issue:45325", - "issue:43881|issue:44991", + "issue:43526|issue:43883", "issue:43262|issue:43746", - "issue:43317|issue:43873", - "issue:43632|issue:45563", "issue:40990|issue:45584", + "issue:43317|issue:43873", "issue:44291|issue:44877", - "issue:34634|issue:35707", + "issue:43881|issue:44991", + "issue:43632|issue:45563", "issue:43606|issue:44265", + "issue:34634|issue:35707", "issue:43873|issue:45701", "issue:44186|issue:44265", "issue:44610|issue:45325", @@ -3316,226 +3325,241 @@ "issue:43749|issue:44534", "issue:44466|issue:45357", "issue:43792|issue:44683", - "issue:43454|issue:43720", "issue:43296|issue:44991", + "issue:43454|issue:43720", "issue:43296|issue:44877", "issue:41720|issue:43322", - "issue:45632|issue:45684" + "issue:43404|issue:45357", + "issue:45632|issue:45684", + "issue:41720|issue:44977", + "issue:40990|issue:45701" ], - "prepared_review_unit_hash": "c36c4987efbf9e62e534d77c210a2c92ec528c23b67cd7efd0dee4460ed279ac", + "prepared_review_unit_hash": "be0f48de431d42728c5c73d5d28b153668c2e0e8d604baac796bb9e876a3c9c1", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 37001, - "estimated_input_tokens": 9251, - "estimated_eval_tokens": 18758 + "soft_pair_count": 33, + "serialized_chars": 37623, + "estimated_input_tokens": 9406, + "estimated_eval_tokens": 19068 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are mostly unrelated single-issue bugs across different models/features. I did not find any soft pair that looks like the same underlying bug/change, so no soft edges are accepted.", - "confidence": 0.86, - "canonical_issue_reason": "issue:43749 is the strongest anchor: it has high discussion activity/inbound references and describes a concrete, impactful loading regression rather than a niche edge case.", + "summary": "This set is mostly a mix of unrelated transformer/model-loading regressions rather than one duplicate cluster. I rejected all soft edges because the paired reports target different models, code paths, or failure modes. If an anchor is needed, issue 43873 is the broadest discussion-heavy offloading/quantization report, but it is not a strong duplicate hub.", + "confidence": 0.74, + "canonical_issue_reason": "Issue 43873 is the most discussion-heavy and broadly framed report in the set, centered on offloading/quantization behavior, so it is the least arbitrary issue to anchor the noisy group.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43749 is the best standalone representative because it is clearly scoped, high-impact, and broadly referenced.", + "best_issue_reason": "Issue 43873 is the best representative only in a loose sense because it has the broadest scope and strongest surrounding discussion, but the overall set is not a coherent duplicate cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44387", "right": "issue:44617", "accept": false, - "reason": "Both mention OOM/memory issues, but one is int4 quantization reserved memory and the other is Sam3Video runtime OOM; different code paths and causes." - }, - { - "left": "issue:43525", - "right": "issue:44589", - "accept": false, - "reason": "Pad token ID missing on Llama4Config is unrelated to the Float8 storage lookup error; no shared bug mechanism." + "reason": "Different models and symptoms: int4 quantization OOM vs generic SAM3Video OOM." }, { "left": "issue:43792", "right": "issue:43957", "accept": false, - "reason": "Whisper runtime failure and meta-device loading regression are different failure modes affecting different model families." - }, - { - "left": "issue:43526", - "right": "issue:43883", - "accept": false, - "reason": "BeitImageProcessorFast label reduction bug is unrelated to Molmo weight-tying attribute errors." + "reason": "Both are loading failures, but one is Whisper-specific while the other is a broad meta-device loading regression." }, { "left": "issue:43701", "right": "issue:45325", "accept": false, - "reason": "Checkpoint key mismatch is unrelated to Qwen2.5-VL rope/position-id scaling." + "reason": "Checkpoint key mismatch and Qwen2.5-VL rope/indexing are unrelated bugs." }, { - "left": "issue:43881", - "right": "issue:44991", + "left": "issue:43526", + "right": "issue:43883", "accept": false, - "reason": "GLM-4V load failure and tokenizer loading failure for a different model are not the same underlying defect." + "reason": "BeitImageProcessorFast label reduction bug is unrelated to Molmo tied-weights attribute error." }, { "left": "issue:43262", "right": "issue:43746", "accept": false, - "reason": "Both touch audio/speech models, but one is chat-template sampling-rate handling and the other is PEFT checkpoint loading." + "reason": "Audio sampling-rate default mismatch is unrelated to GraniteSpeech PEFT checkpoint loading." }, { - "left": "issue:43317", - "right": "issue:43873", + "left": "issue:40990", + "right": "issue:45584", "accept": false, - "reason": "Both involve offloading/quantization, but the reported failures are different enough that they do not look like one concrete bug." + "reason": "Perplexity regression and Whisper empty-transcription generation are different failures." }, { - "left": "issue:43632", - "right": "issue:45563", + "left": "issue:43317", + "right": "issue:43873", "accept": false, - "reason": "Broken _is_hf_initialized flag handling is unrelated to a stale warning in paged generate()." + "reason": "Both mention offload/quantization, but the concrete failures and affected paths are different." }, { - "left": "issue:40990", - "right": "issue:45584", + "left": "issue:44291", + "right": "issue:44877", "accept": false, - "reason": "High perplexity on gpt-oss-20b is unrelated to Whisper empty-transcription generation failure." + "reason": "Unexpected init_empty_weights argument and strict config loading are distinct issues." }, { - "left": "issue:44291", - "right": "issue:44877", + "left": "issue:43881", + "right": "issue:44991", "accept": false, - "reason": "These are both loading-related v5 regressions, but one is an init_empty_weights argument error and the other is strict config loading for granite_speech." + "reason": "GLM-4V loading failure and tokenizer loading failure for est-roberta are unrelated." }, { - "left": "issue:34634", - "right": "issue:35707", + "left": "issue:43632", + "right": "issue:45563", "accept": false, - "reason": "Bark voice_preset and progressive generation with inputs_embeds/past_key_values are different features and failures." + "reason": "_is_hf_initialized regression and a stale generate() warning do not describe the same bug." }, { "left": "issue:43606", "right": "issue:44265", "accept": false, - "reason": "CPU-offload device mismatch for bark-small is unrelated to torch.export failures with torch_compilable_check." + "reason": "CPU-offload device mismatch is unrelated to torch.export failures with torch_compilable_check." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "Bark voice_preset bug and progressive generation with inputs_embeds/past_key_values are unrelated." }, { "left": "issue:43873", "right": "issue:45701", "accept": false, - "reason": "Quantization/offloading behavior and tokenization changes across versions are not the same bug." + "reason": "Offloading/quantization behavior and version-dependent tokenization are different problems." }, { "left": "issue:44186", "right": "issue:44265", "accept": false, - "reason": "Tokenizer padding/truncation crash and torch.export/torch_compilable_check failure are unrelated." + "reason": "Tokenizer crash on NER/padding is unrelated to torch.export compatibility." }, { "left": "issue:44610", "right": "issue:45325", "accept": false, - "reason": "Processor image-size mismatch is unrelated to Qwen2.5-VL temporal position-id scaling." + "reason": "Processor output size mismatch and Qwen2.5-VL temporal position scaling are unrelated." }, { "left": "issue:35707", "right": "issue:41720", "accept": false, - "reason": "Inputs_embeds/past_key_values progressive generation and auto device mapping cudaErrorAssert are different problems." + "reason": "Different Qwen issues: generation with inputs_embeds vs CUDA assert under auto device mapping." }, { "left": "issue:43606", "right": "issue:43819", "accept": false, - "reason": "Bark CPU-offload device mismatch is unrelated to DAC.from_latents vs forward-pass mismatch." + "reason": "Bark CPU-offload mismatch and DAC latent mismatch are different code paths." }, { "left": "issue:43454", "right": "issue:43550", "accept": false, - "reason": "AyaVision tied-weights serialization and Bamba torch.compile/SDPA failure are different code paths." + "reason": "lm_head tying bug and Bamba torch.compile SDPA failure are unrelated." }, { "left": "issue:43377", "right": "issue:44610", "accept": false, - "reason": "MIMI batching/padding-mask bug is unrelated to OmDet-Turbo processor size mismatch." + "reason": "MIMI padding-mask inconsistency and OmDet-Turbo input-size mismatch are different bugs." }, { "left": "issue:43377", "right": "issue:44898", "accept": false, - "reason": "MIMI batch-vs-single discrepancy is not the same as Perceiver non-default-resolution failure." + "reason": "MIMI batch-vs-single mismatch and Perceiver resolution handling are unrelated." }, { "left": "issue:43454", "right": "issue:45072", "accept": false, - "reason": "lm_head tying/serialization issues do not match dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "Weight-tying serialization bug and bf16 dtype mismatch are different failures." }, { "left": "issue:39692", "right": "issue:40444", "accept": false, - "reason": "SigLIP2 doc example errors are unrelated to Qwen2.5-VL iterable dataset multi-image finetuning failures." + "reason": "SigLIP2 doc-example errors and Qwen2.5-VL finetuning with multi-image iterable data are unrelated." }, { "left": "issue:43749", "right": "issue:44534", "accept": false, - "reason": "FSDP CPU RAM efficient loading is unrelated to non-persistent buffer initialization junk in v5." + "reason": "FSDP CPU RAM efficient loading and non-persistent buffer junk are not the same bug." }, { "left": "issue:44466", "right": "issue:45357", "accept": false, - "reason": "Both involve serialization, but one is lm_head.weight tied-weight behavior and the other is Qwen3.5 visual encoder keys." + "reason": "Both involve save/serialization, but they affect different models and different saved keys." }, { "left": "issue:43792", "right": "issue:44683", "accept": false, - "reason": "Whisper runtime failure and compiled flex_attention failure are unrelated." + "reason": "Whisper loading/runtime failure and compiled flex_attention torch-version regression are unrelated." }, { - "left": "issue:43454", - "right": "issue:43720", + "left": "issue:43296", + "right": "issue:44991", "accept": false, - "reason": "AyaVision weight tying and BitNet packed-weight unpacking are different bugs." + "reason": "PaddleOCR-VL load failure in vLLM and tokenizer loading failure for est-roberta are different problems." }, { - "left": "issue:43296", - "right": "issue:44991", + "left": "issue:43454", + "right": "issue:43720", "accept": false, - "reason": "PaddleOCR-VL loading in vLLM is unrelated to EMBEDDIA tokenizer loading failure in transformers v5." + "reason": "lm_head tying in Mistral3 and BitNet packed-weight unloading during accelerate loading are unrelated." }, { "left": "issue:43296", "right": "issue:44877", "accept": false, - "reason": "vLLM/PaddleOCR-VL loading failure is unrelated to strict config preventing granite_speech loading." + "reason": "Model loading failure in vLLM and strict granite_speech config rejection are distinct issues." }, { "left": "issue:41720", "right": "issue:43322", "accept": false, - "reason": "cudaErrorAssert with Qwen3 auto device mapping is not the same as Llava Next segmentation fault." + "reason": "Qwen3 device-map CUDA assert and Llava Next segmentation fault are different model-loading crashes." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "Mistral3 lm_head tying bug and Qwen3.5 visual-encoder save_pretrained regression are unrelated." }, { "left": "issue:45632", "right": "issue:45684", "accept": false, - "reason": "Cache-path collisions for trust_remote_code are unrelated to read-only permission propagation in save_pretrained()." + "reason": "Cache-path collision for trust_remote_code and read-only permission propagation are different save/load issues." + }, + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "CUDA assert with auto device mapping and flash-attention generation bug are different failures." + }, + { + "left": "issue:40990", + "right": "issue:45701", + "accept": false, + "reason": "High perplexity regression and version-dependent tokenization are unrelated." } ] }, @@ -3561,7 +3585,6 @@ "nodes": [ "issue:36010", "issue:36331", - "issue:40990", "issue:41720", "issue:42617", "issue:42915", @@ -3570,7 +3593,7 @@ "issue:43317", "issue:43322", "issue:43388", - "issue:43404", + "issue:43421", "issue:43450", "issue:43526", "issue:43531", @@ -3590,6 +3613,8 @@ "issue:44387", "issue:44423", "issue:44479", + "issue:44492", + "issue:44509", "issue:44610", "issue:44671", "issue:44743", @@ -3598,34 +3623,30 @@ "issue:44898", "issue:44913", "issue:44918", - "issue:44977", "issue:44991", "issue:45059", "issue:45072", "issue:45161", - "issue:45357", "issue:45406", + "issue:45464", "issue:45701", "issue:45704" ], "soft_pairs": [ - "issue:43404|issue:45357", - "issue:41720|issue:44977", - "issue:43526|issue:43881", "issue:44423|issue:45406", - "issue:40990|issue:45701", - "issue:43873|issue:44756", "issue:44186|issue:45072", + "issue:43873|issue:44756", + "issue:43526|issue:43881", "issue:43577|issue:43708", "issue:44387|issue:45704", "issue:42617|issue:42915", "issue:44877|issue:44991", - "issue:44479|issue:44918", "issue:43450|issue:45701", "issue:36331|issue:42915", "issue:43550|issue:43720", - "issue:44610|issue:44898", "issue:36331|issue:42617", + "issue:44610|issue:44898", + "issue:44479|issue:44918", "issue:44186|issue:44610", "issue:43316|issue:44913", "issue:44671|issue:44743", @@ -3640,235 +3661,245 @@ "issue:44186|issue:44361", "issue:43540|issue:43782", "issue:42915|issue:43322", + "issue:36010|issue:42617", "issue:41720|issue:43873", - "issue:36010|issue:42617" + "issue:45406|issue:45464", + "issue:36331|issue:41720", + "issue:43421|issue:45701", + "issue:44492|issue:44509" ], - "prepared_review_unit_hash": "050eb0c97b0b35a0afe07ea0af3254dad1fc48ff7ccd752804fa3f48b829fec5", + "prepared_review_unit_hash": "197fc57a6c795a7bce86e7b53bcc5966e56d6c6093054fe14fe121b84d9fa2b2", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37478, - "estimated_input_tokens": 9370, - "estimated_eval_tokens": 18996 + "soft_pair_count": 34, + "serialized_chars": 37765, + "estimated_input_tokens": 9442, + "estimated_eval_tokens": 19140 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is largely heterogeneous: most items are model-specific runtime/configuration bugs that only loosely share themes like loading, quantization, offload, tokenization, or multimodal processing. The soft edges are mostly topical rather than true duplicates, so none should be merged.", - "confidence": 0.91, - "canonical_issue_reason": "issue:41720 is the closest hub because it is a concrete runtime failure around Qwen3 auto device mapping, and several soft links are thematically nearby (device mapping, offload, quantization), but it is still not a true duplicate representative of the whole cluster.", + "summary": "The cluster is heterogeneous: most items are unrelated transformer bugs/features across different models and subsystems. All soft edges are rejected; none look like the same underlying bug/change.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits this cluster; the items span distinct regressions and model-specific failures with only loose thematic overlap.", "canonical_pr_reason": null, - "best_issue_reason": "issue:41720 is the best single issue anchor only in the loose sense of being one of the more central runtime/model-loading bugs with broader discussion and related soft links; however, the cluster is too diverse for any issue to be a strong dedupe canonical.", + "best_issue_reason": "Issue 43873 is the closest broad representative of the offloading/quantization subset, but it is not a true cluster-wide canonical bug.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:43404", - "right": "issue:45357", - "accept": false, - "reason": "Different bugs: one is tied lm_head weights in Mistral3, the other is incorrect visual encoder keys in Qwen3.5 save_pretrained." - }, - { - "left": "issue:41720", - "right": "issue:44977", - "accept": false, - "reason": "Both involve Qwen3-family models, but one is a device-mapping CUDA assert and the other is a flash-attention generation bug; different code paths and failures." - }, - { - "left": "issue:43526", - "right": "issue:43881", - "accept": false, - "reason": "Unrelated: BeitImageProcessorFast label reduction vs glm-4v-9b loading failure." - }, { "left": "issue:44423", "right": "issue:45406", "accept": false, - "reason": "Both are transformers-serve multimodal crashes, but they hit different processors and different errors (`str.to` vs missing `_tokenizer`)." + "reason": "Both are serve-time multimodal crashes, but they involve different models and different failure modes (Qwen3.5/continuous batching vs Gemma4Processor _tokenizer)." }, { - "left": "issue:40990", - "right": "issue:45701", + "left": "issue:44186", + "right": "issue:45072", "accept": false, - "reason": "Perplexity on a specific model/dataset is not the same as a generic tokenization change across versions." + "reason": "One is a tokenizer/NER padding crash; the other is a dtype mismatch in inference. Different components and root causes." }, { "left": "issue:43873", "right": "issue:44756", "accept": false, - "reason": "One is quantization offload behavior, the other is mmap-induced OOM on Strix Halo; same general area, different defect." + "reason": "Both mention offloading/memory, but one is quantization/offload behavior and the other is mmap OOM on Strix Halo. Not the same bug." }, { - "left": "issue:44186", - "right": "issue:45072", + "left": "issue:43526", + "right": "issue:43881", "accept": false, - "reason": "Tokenizer padding/truncation crash and bfloat16 dtype mismatch are unrelated bugs." + "reason": "BeitImageProcessorFast label reduction is unrelated to glm-4v-9b model loading failure." }, { "left": "issue:43577", "right": "issue:43708", "accept": false, - "reason": "Model dtype loading regression and Trainer resume/max_steps calculation are different subsystems." + "reason": "Blip2 dtype persistence and Trainer resume/max_steps are unrelated issues in different code paths." }, { "left": "issue:44387", "right": "issue:45704", "accept": false, - "reason": "Int4 CUDA memory regression and apex RMSNorm memory leak are separate memory issues with different causes." + "reason": "Both are memory-related, but int4 quantization OOM and apex RMSNorm leak are different causes and fixes." }, { "left": "issue:42617", "right": "issue:42915", "accept": false, - "reason": "3d_parallel.py failure and Qwen3Moe FineGrainedFP8Config failure are distinct." + "reason": "A 3d_parallel.py launch problem and a Qwen3Moe FP8 failure are different model/runtime bugs." }, { "left": "issue:44877", "right": "issue:44991", "accept": false, - "reason": "Strict config loading and tokenizer loading regressions are not the same bug." - }, - { - "left": "issue:44479", - "right": "issue:44918", - "accept": false, - "reason": "Video input regression and TRL SFT embedding unpacking failure are unrelated multimodal bugs." + "reason": "Strict config loading and tokenizer loading are separate failures affecting different artifacts." }, { "left": "issue:43450", "right": "issue:45701", "accept": false, - "reason": "Batched video shape bug vs tokenizer behavior change; no shared code-path failure." + "reason": "Video processor batch-shape regression and tokenization-version changes are unrelated." }, { "left": "issue:36331", "right": "issue:42915", "accept": false, - "reason": "CustomTrainer compute_loss API breakage is unrelated to Qwen3Moe FP8 loading." + "reason": "CustomTrainer compute_loss signature mismatch is unrelated to Qwen3Moe FP8 loading." }, { "left": "issue:43550", "right": "issue:43720", "accept": false, - "reason": "Torch.compile/SDPA bug and packed-weight accelerate loading bug do not describe the same failure." + "reason": "torch.compile+SDPA failure in Bamba and packed-weights unpacking in BitNet are different code paths." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Trainer compute_loss API break and 3d_parallel.py failure are unrelated." }, { "left": "issue:44610", "right": "issue:44898", "accept": false, - "reason": "Both are image-resolution-related, but one is a processor/model size mismatch and the other is interpolate_pos_encoding behavior." + "reason": "Both concern image sizing, but OmDet-Turbo and Perceiver have different model/processor expectations and distinct bugs." }, { - "left": "issue:36331", - "right": "issue:42617", + "left": "issue:44479", + "right": "issue:44918", "accept": false, - "reason": "Trainer API mismatch and 3d_parallel runtime failure are unrelated." + "reason": "Video-input regression and Qwen3.5 embedding unpacking with TRL SFT are unrelated." }, { "left": "issue:44186", "right": "issue:44610", "accept": false, - "reason": "Tokenizer NER padding crash and OmDet-Turbo image size mismatch are different model/component bugs." + "reason": "Tokenizer NER crash and image processor resolution mismatch are different subsystems and failures." }, { "left": "issue:43316", "right": "issue:44913", "accept": false, - "reason": "Config API discrepancy and GPTNeoX rotary_pct reload regression are different configuration issues." + "reason": "Gemma3TextConfig API discrepancy and GPTNeoX rotary_pct reload issue are unrelated config bugs." }, { "left": "issue:44671", "right": "issue:44743", "accept": false, - "reason": "CamemBERT masked LM predictions and Qwen3.5 recurrent state reset are unrelated model-specific bugs." + "reason": "CamemBERT masked-LM predictions and Qwen3.5 recurrent-state reset are unrelated model behaviors." }, { "left": "issue:42915", "right": "issue:43317", "accept": false, - "reason": "FineGrainedFP8Config failure and dequantized model offload failure are different loading problems." + "reason": "Qwen3Moe FP8 failure and dequantized-model device_map offload failure are different loading problems." }, { "left": "issue:43295", "right": "issue:45701", "accept": false, - "reason": "A specific regression with processor.tokenizer/images is not the same as a broad tokenization-version change." + "reason": "Processor.tokenizer regression and tokenization-version changes are not the same underlying issue." }, { "left": "issue:43550", "right": "issue:43606", "accept": false, - "reason": "SDPA/torch.compile failure and CPU offload device mismatch are different runtime paths." + "reason": "Bamba SDPA/torch.compile bug and Bark CPU-offload device mismatch are unrelated." }, { "left": "issue:41720", "right": "issue:42617", "accept": false, - "reason": "Both are runtime failures, but one is Qwen3 device mapping and the other is 3d_parallel execution; not the same bug." + "reason": "Qwen3 auto device mapping cuda assert and 3d_parallel.py launch failure do not share a concrete root cause." }, { "left": "issue:43531", "right": "issue:43572", "accept": false, - "reason": "Sliding window behavior in Qwen3-MoE and a missing pad_token_idx in StableLmConfig are unrelated." + "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig pad_token_idx regression are separate issues." }, { "left": "issue:41720", "right": "issue:45161", "accept": false, - "reason": "Both mention model parallelism, but auto device mapping CUDA assert and TP-only GPT-OSS MoE failure are distinct code paths." + "reason": "Both involve MoE/device parallelism, but they target different models and distinct failures." }, { "left": "issue:44292", "right": "issue:44479", "accept": false, - "reason": "NVFP4 model execution and video-input regression are unrelated." + "reason": "Qwen-3-8B-NVFP4 runtime error and v5.3.0 video regression are unrelated." }, { "left": "issue:43388", "right": "issue:45059", "accept": false, - "reason": "Metric gathering label truncation and SAM3 text/bbox behavior are different bugs." + "reason": "Metric-gathering label truncation and SAM3 text/box odd behavior are different bugs." }, { "left": "issue:44186", "right": "issue:44361", "accept": false, - "reason": "Two tokenizer crashes, but on different tokenizer classes and different failure modes." + "reason": "LayoutLMv2Tokenizer NER/padding crash and MLukeTokenizer task AttributeError are separate tokenizer issues." }, { "left": "issue:43540", "right": "issue:43782", "accept": false, - "reason": "Video processing ValueError and Qwen3VL weight_only loading error are separate failures." + "reason": "Qwen3OmniMoe video-processing ValueError and Qwen3VL weight_only load error are different code paths." }, { "left": "issue:42915", "right": "issue:43322", "accept": false, - "reason": "Qwen3Moe FP8 config failure and Llava Next segmentation fault are not the same bug." + "reason": "Qwen3Moe FP8 config failure and Llava Next segfault are unrelated model-loading bugs." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "GenerationMixin import error is unrelated to a 3d_parallel.py execution failure." }, { "left": "issue:41720", "right": "issue:43873", "accept": false, - "reason": "Device-map CUDA assert and quantization offloading misbehavior are related only at a high level, not the same concrete defect." + "reason": "Auto device-map cuda asserts and quantization/offloading issues are only loosely related, not the same bug." }, { - "left": "issue:36010", - "right": "issue:42617", + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Gemma4Processor missing _tokenizer in serve and Qwen3.5 streaming chat/completions failure are different regressions." + }, + { + "left": "issue:36331", + "right": "issue:41720", + "accept": false, + "reason": "Trainer API signature mismatch and Qwen3 auto device mapping failure are unrelated." + }, + { + "left": "issue:43421", + "right": "issue:45701", + "accept": false, + "reason": "Runtime special-token post-processor updates and tokenization-version changes are not the same defect." + }, + { + "left": "issue:44492", + "right": "issue:44509", "accept": false, - "reason": "ImportError for GenerationMixin and 3d_parallel execution failure are entirely unrelated." + "reason": "A cache-strategy typo and outdated pipeline docs are separate documentation/config issues." } ] }, @@ -3897,11 +3928,12 @@ "issue:34567", "issue:34634", "issue:35707", - "issue:36331", + "issue:39692", "issue:41720", "issue:42886", "issue:43381", "issue:43421", + "issue:43525", "issue:43531", "issue:43550", "issue:43606", @@ -3918,11 +3950,10 @@ "issue:44186", "issue:44368", "issue:44451", - "issue:44492", - "issue:44509", "issue:44514", "issue:44545", "issue:44589", + "issue:44610", "issue:44625", "issue:44671", "issue:44843", @@ -3933,22 +3964,17 @@ "issue:44991", "issue:45042", "issue:45072", + "issue:45081", + "issue:45356", "issue:45357", "issue:45381", - "issue:45406", - "issue:45464", "issue:45479", - "issue:45701", "issue:45704" ], "soft_pairs": [ - "issue:45406|issue:45464", - "issue:36331|issue:41720", - "issue:43421|issue:45701", - "issue:44492|issue:44509", "issue:33357|issue:35707", - "issue:44514|issue:44545", "issue:30064|issue:34634", + "issue:44514|issue:44545", "issue:41720|issue:43746", "issue:43606|issue:43720", "issue:43873|issue:45704", @@ -3959,228 +3985,231 @@ "issue:43606|issue:43653", "issue:34567|issue:35707", "issue:34634|issue:41720", - "issue:44062|issue:44451", - "issue:42886|issue:44843", "issue:44451|issue:44991", + "issue:44062|issue:44451", "issue:43782|issue:45381", + "issue:42886|issue:44843", "issue:44077|issue:44625", - "issue:43421|issue:43653", "issue:44898|issue:45072", "issue:44987|issue:45042", - "issue:44918|issue:44936", + "issue:43421|issue:43653", "issue:43531|issue:44589", + "issue:39692|issue:44610", + "issue:44918|issue:44936", "issue:43381|issue:43844", + "issue:43421|issue:44186", "issue:43994|issue:44671", - "issue:43421|issue:44186" + "issue:43525|issue:43531", + "issue:45081|issue:45356" ], - "prepared_review_unit_hash": "043b197a451b8ce55b5142672bf4ba372b711ef1da88bc37e83d9fdf2fe04f16", + "prepared_review_unit_hash": "a04e4cd23dbc6a9f969972f173dc21d0c7aeda452d877d823c4450e4e3d8a668", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 30, - "serialized_chars": 36787, - "estimated_input_tokens": 9197, - "estimated_eval_tokens": 18650 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36093, + "estimated_input_tokens": 9024, + "estimated_eval_tokens": 18304 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Most pairs are superficially similar but describe different bugs or features. The only clear duplicate is the Qwen2_5_VLProcessor.apply_chat_template batched-input crash pair.", - "confidence": 0.93, - "canonical_issue_reason": "issue:44514 is the better canonical representative for the only true duplicate pair: it is the earlier report, already has inbound references, and describes the crash clearly enough to anchor the duplicate thread.", + "summary": "This cluster is mostly heterogeneous: many items are unrelated model/tokenizer/training bugs that only share broad subsystem keywords. I only see two clear duplicate pairs: the Qwen2.5-VL batched `padding=False` crash, and the HF_HUB_OFFLINE / `_patch_mistral_regex` tokenizer failure.", + "confidence": 0.84, + "canonical_issue_reason": "issue:44843 is the clearest root-cause report for the HF_HUB_OFFLINE tokenizer regression, explicitly naming the failing `_patch_mistral_regex` path; issue:42886 is the broader symptom report for the same bug.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44514 is the strongest issue to keep as the representative record because it is the earliest, has more contextual traffic, and matches issue:44545 exactly on the underlying failure mode.", + "best_issue_reason": "issue:44843 is the best representative issue in this set because it pinpoints the offending code path and environment trigger, making it the most actionable canonical bug report.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:45406", - "right": "issue:45464", - "accept": false, - "reason": "Different models and different failure modes: Gemma4Processor _tokenizer attribute error vs Qwen3.5 streaming chat/completions failure." - }, - { - "left": "issue:36331", - "right": "issue:41720", - "accept": false, - "reason": "Unrelated bugs: trainer API keyword mismatch versus Qwen3 auto device-mapping CUDA assert." - }, - { - "left": "issue:43421", - "right": "issue:45701", - "accept": false, - "reason": "One is a specific runtime post-processor update bug; the other is a broad report that tokenization changed across versions." - }, - { - "left": "issue:44492", - "right": "issue:44509", + "left": "issue:33357", + "right": "issue:35707", "accept": false, - "reason": "A typo/docs issue is not the same underlying bug as removed pipeline-task documentation." + "reason": "Different failures: MacOS bus error with a CLIP model vs progressive generation with `inputs_embeds`/`past_key_values`." }, { - "left": "issue:33357", - "right": "issue:35707", + "left": "issue:30064", + "right": "issue:34634", "accept": false, - "reason": "MacOS bus error on a CLIP model is unrelated to progressive generation with inputs_embeds and past_key_values." + "reason": "Unrelated areas: void segmentation-map processing vs BarkProcessor `voice_preset` behavior." }, { "left": "issue:44514", "right": "issue:44545", "accept": true, - "reason": "Same title and same crash scenario for Qwen2_5_VLProcessor.apply_chat_template with batched input and padding=False." - }, - { - "left": "issue:30064", - "right": "issue:34634", - "accept": false, - "reason": "Image segmentation-map processing and BarkProcessor voice_preset are unrelated components and bugs." + "reason": "Same title and same concrete crash: `Qwen2_5_VLProcessor.apply_chat_template` on batched input with `padding=False`." }, { "left": "issue:41720", "right": "issue:43746", "accept": false, - "reason": "CUDA device-mapping assert and PEFT local-checkpoint loading failure are different code paths." + "reason": "Different root causes and code paths: Qwen3 auto device mapping CUDA assert vs GraniteSpeech PEFT local checkpoint loading." }, { "left": "issue:43606", "right": "issue:43720", "accept": false, - "reason": "CPU offload device mismatch is not the same as packed-weight unpacking during accelerate loading." + "reason": "Both are CI/offload-related, but one is a device mismatch under CPU offload and the other is packed-weight unpacking during accelerate loading; not the same bug." }, { "left": "issue:43873", "right": "issue:45704", "accept": false, - "reason": "Quantization/offloading behavior and an apex RMSNorm memory leak are distinct problems." + "reason": "Quantization/offloading behavior vs a T5 apex RMSNorm memory leak are separate issues." }, { "left": "issue:43550", "right": "issue:43653", "accept": false, - "reason": "Torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to a tokenizer special-token registration bug." + "reason": "Different failures: torch.compile/SDPA on Bamba vs BigBirdTokenizer special-token registration." }, { "left": "issue:44625", "right": "issue:45479", "accept": false, - "reason": "Num-label propagation into config and degenerate zero-loss classification are different sequence-classification issues." + "reason": "Both touch labels, but one is config propagation and the other is a zero-loss classification bug; not the same concrete problem." }, { "left": "issue:44368", "right": "issue:45357", "accept": false, - "reason": "A LoRA warning about tie_word_embeddings is not the same as saving incorrect visual encoder keys." + "reason": "Different symptoms and likely fixes: tied-embeddings warning vs incorrect visual encoder keys in save/load." }, { "left": "issue:43421", "right": "issue:43927", "accept": false, - "reason": "Runtime special-token post-processing updates and save/load token-id loss are different tokenizer/config bugs." + "reason": "Tokenizer backend post-processor updates and DiaConfig custom token-ID persistence are different bug classes." }, { - "left": "issue:44898", - "right": "issue:45072", + "left": "issue:43606", + "right": "issue:43653", "accept": false, - "reason": "Perceiver image-size handling and bfloat16 dtype mismatches are unrelated." + "reason": "Device mismatch during offload vs tokenizer special-token registration; no shared underlying code path." }, { - "left": "issue:44987", - "right": "issue:45042", + "left": "issue:34567", + "right": "issue:35707", "accept": false, - "reason": "Model-loading failure for a specific repo and PIL image processors requiring torchvision are not the same bug." + "reason": "Trainer token-count tracking vs generation with embeddings/past KV are unrelated." }, { - "left": "issue:44918", - "right": "issue:44936", + "left": "issue:34634", + "right": "issue:41720", "accept": false, - "reason": "TRL SFT unpacking with Qwen3.5 embeddings and trainer evaluate-after-train failure do not share a concrete code-path bug." + "reason": "BarkProcessor voice preset bug vs Qwen3 auto device mapping CUDA assert." }, { - "left": "issue:43531", - "right": "issue:44589", + "left": "issue:44451", + "right": "issue:44991", "accept": false, - "reason": "Sliding-window handling for Qwen3-MoE is unrelated to missing Float8 storage object errors." + "reason": "Both are tokenizer-loading regressions, but for different models and different failure modes; too broad to merge." }, { - "left": "issue:43381", - "right": "issue:43844", + "left": "issue:44062", + "right": "issue:44451", "accept": false, - "reason": "Eval-mode gradient checkpointing restriction and ZeRO-3 gradient growth are different training issues." + "reason": "AddedToken constructor argument conflict is not the same bug as failing to load ScandiBERT." }, { - "left": "issue:43994", - "right": "issue:44671", + "left": "issue:43782", + "right": "issue:45381", "accept": false, - "reason": "Two different models both having masked-LM prediction issues is too broad to treat as one underlying bug." + "reason": "Different Qwen multimodal issues: `from_pretrained` weight-only error vs video vision-position-id bug." + }, + { + "left": "issue:42886", + "right": "issue:44843", + "accept": true, + "reason": "Same underlying offline-mode failure in tokenizer loading; 44843 identifies the specific `_patch_mistral_regex`/`model_info()` cause." }, { "left": "issue:44077", "right": "issue:44625", "accept": false, - "reason": "Optional post_init policy and num_labels propagation are unrelated." - }, - { - "left": "issue:43421", - "right": "issue:43653", - "accept": false, - "reason": "Both mention tokenizer internals, but one is about runtime special-token updates and the other about mask-token registration/empty decode output." + "reason": "PatchTSMixer post-init policy issue vs Qwen3.5 `num_labels` propagation; unrelated." }, { "left": "issue:44898", "right": "issue:45072", "accept": false, - "reason": "Different failure types: interpolation-related image classification versus dtype mismatch during inference." + "reason": "Different model-specific inference bugs: Perceiver resolution handling vs dtype mismatch in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:44987", "right": "issue:45042", "accept": false, - "reason": "No shared concrete bug path; one is a model loading regression, the other a backend dependency requirement." + "reason": "Model-loading regression vs PIL image processors incorrectly requiring torchvision; separate problems." }, { - "left": "issue:44918", - "right": "issue:44936", + "left": "issue:43421", + "right": "issue:43653", "accept": false, - "reason": "Trainer embedding unpacking and post-training evaluation failure are not the same issue." + "reason": "Special-token post-processor refresh and BigBirdTokenizer mask-token registration are not the same bug." }, { "left": "issue:43531", "right": "issue:44589", "accept": false, - "reason": "Different subsystems and symptoms." + "reason": "Sliding-window handling in Qwen3-MoE is unrelated to missing Float8 storage support." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "SigLIP2 docs/model mismatch and OmDet-Turbo processor size mismatch are different issues." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "TRL embedding unpacking during SFT is not the same as `trainer.evaluate()` failing after training." }, { "left": "issue:43381", "right": "issue:43844", "accept": false, - "reason": "Different training mechanisms and root causes." + "reason": "Gradient checkpointing eval-mode restriction is unrelated to DeepSpeed ZeRO-3 gradient growth." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Runtime post-processor special-token updates vs LayoutLMv2Tokenizer padding/truncation crashes are different bugs." }, { "left": "issue:43994", "right": "issue:44671", "accept": false, - "reason": "Same broad area, but not the same concrete bug or fix." + "reason": "Different models and outputs: SigLIP2 nonsensical results vs CamemBERT masked-LM prediction regression." }, { - "left": "issue:43421", - "right": "issue:44186", + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Missing `pad_token_id` on Llama4Config is unrelated to the Qwen3-MoE `sliding_window` bug." + }, + { + "left": "issue:45081", + "right": "issue:45356", "accept": false, - "reason": "Special-token runtime updates and LayoutLMv2 NER/padding crashes are different tokenizer behaviors." + "reason": "Both mention `fix_mistral_regex`, but the reported failures differ enough that they do not clearly describe the same concrete bug." } ] }, "evaluator_result": { "accept": true, - "feedback": "The analysis is conservative and grounded in the packet. The one accepted pair is an exact-title duplicate with matching crash context, and the rest of the soft-edge rejections stay within the evidence available from titles/body metadata. The canonical issue choice is also reasonable, though the traffic-based phrasing could have been a bit tighter." + "feedback": "The summary is grounded in the packet and stays conservative. The two accepted duplicates are well supported by near-identical titles and matching failure modes, and the other pairwise rejections do not overclaim shared root causes. The heterogeneity summary is also consistent with the listed issues." }, "evaluator_used": true, "retried": false, @@ -4210,7 +4239,6 @@ "issue:43335", "issue:43381", "issue:43421", - "issue:43525", "issue:43531", "issue:43540", "issue:43550", @@ -4218,11 +4246,13 @@ "issue:43596", "issue:43618", "issue:43643", + "issue:43644", "issue:43653", "issue:43697", "issue:43716", "issue:43720", "issue:43746", + "issue:43749", "issue:43756", "issue:43818", "issue:43819", @@ -4234,7 +4264,6 @@ "issue:44220", "issue:44448", "issue:44488", - "issue:44610", "issue:44661", "issue:44671", "issue:44811", @@ -4244,30 +4273,26 @@ "issue:45072", "issue:45081", "issue:45183", - "issue:45356", "issue:45381", "issue:45479", "issue:45685", "issue:45704" ], "soft_pairs": [ - "issue:39692|issue:44610", - "issue:43525|issue:43531", - "issue:45081|issue:45356", "issue:43596|issue:44220", - "issue:43618|issue:43697", "issue:41720|issue:45704", + "issue:43618|issue:43697", "issue:43381|issue:45704", - "issue:43746|issue:45381", "issue:39692|issue:43873", - "issue:45183|issue:45479", + "issue:43746|issue:45381", + "issue:40444|issue:43746", "issue:43746|issue:43873", "issue:44220|issue:44661", - "issue:40444|issue:43746", + "issue:45183|issue:45479", "issue:43540|issue:44008", - "issue:43716|issue:44008", "issue:43335|issue:43756", "issue:43720|issue:43819", + "issue:43716|issue:44008", "issue:43653|issue:43720", "issue:30064|issue:36010", "issue:44062|issue:44488", @@ -4281,222 +4306,211 @@ "issue:43421|issue:44811", "issue:43550|issue:43927", "issue:43540|issue:45381", - "issue:44448|issue:44671" + "issue:44448|issue:44671", + "issue:43644|issue:43749" ], - "prepared_review_unit_hash": "0ea498aef87e85ac31c0d62fc738a4ef172f724424619e88ec07c850947a453d", + "prepared_review_unit_hash": "9365df409774735a6d4f6359a5c3ef00189055b778b925ca06e665ff222a4807", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 37415, - "estimated_input_tokens": 9354, - "estimated_eval_tokens": 18964 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36148, + "estimated_input_tokens": 9037, + "estimated_eval_tokens": 18330 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: the items span unrelated bugs in tokenizers, multimodal processors, configs, quantization/offloading, and model-specific regressions. None of the soft pairs look like the same underlying defect.", - "confidence": 0.92, - "canonical_issue_reason": "No clear canonical issue: the set is not one duplicate family but many unrelated bug reports across different subsystems and models.", + "summary": "This cluster is a loose mix of unrelated Transformers bug reports; the soft-similarity links are mostly false positives. The most representative issue is the highly discussed FSDP loading regression.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43749 has the strongest triage signal (high discussion and inbound references) and is a broad, actionable infrastructure regression compared with the narrower one-off bugs in the set.", "canonical_pr_reason": null, - "best_issue_reason": "No single issue is a strong global representative; the cluster is too broad and inconsistent to treat as one bug thread.", + "best_issue_reason": "issue:43749 is the best single issue to anchor the cluster because it is open, well-discussed, and broadly impactful.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:39692", - "right": "issue:44610", - "accept": false, - "reason": "Different bugs: SigLIP2 doc/example mismatch plus quantization failure vs OmDet processor/model image-size mismatch." - }, - { - "left": "issue:43525", - "right": "issue:43531", - "accept": false, - "reason": "Separate config bugs: missing pad_token_id on Llama4Config vs incorrect sparse-layer creation in SwitchTransformers." - }, - { - "left": "issue:45081", - "right": "issue:45356", + "left": "issue:43596", + "right": "issue:44220", "accept": false, - "reason": "Both touch Mistral regex handling, but one is a backend_tokenizer AttributeError and the other is a Kimi-K2.5 codec/regression issue; not the same concrete bug." + "reason": "Different subsystems and failures: DeepSpeed/BertModel init vs audio feature extraction." }, { - "left": "issue:43596", - "right": "issue:44220", + "left": "issue:41720", + "right": "issue:45704", "accept": false, - "reason": "Unrelated failures: ZeRO3/BertModel zero-size indexing vs _torch_extract_fbank_features audio feature extraction." + "reason": "Qwen3 device-mapping assert is unrelated to T5/apex RMSNorm memory leak." }, { "left": "issue:43618", "right": "issue:43697", "accept": false, - "reason": "Different model regressions: CLIPOutput attentions missing vs RTDetrV2 output differences between v4 and v5." + "reason": "CLIP attentions field regression and RTDetr output drift are separate model bugs." }, { - "left": "issue:41720", + "left": "issue:43381", "right": "issue:45704", "accept": false, - "reason": "Qwen3 auto-device-map CUDA assert is unrelated to T5 silently using apex.FusedRMSNorm with a memory leak." + "reason": "Gradient checkpointing eval-mode limitation is unrelated to the T5 apex leak." }, { - "left": "issue:43381", - "right": "issue:45704", + "left": "issue:39692", + "right": "issue:43873", "accept": false, - "reason": "Gradient-checkpointing eval-mode behavior is a different code path from the T5/apex RMSNorm memory leak." + "reason": "Docs example/model mismatch is not the same bug as quantization offloading behavior." }, { "left": "issue:43746", "right": "issue:45381", "accept": false, - "reason": "Different model families and symptoms: GraniteSpeech PEFT/local checkpoint loading vs Qwen2.5-VL video position IDs." + "reason": "PEFT checkpoint loading for GraniteSpeech is unrelated to Qwen2.5-VL video position IDs." }, { - "left": "issue:39692", - "right": "issue:43873", - "accept": false, - "reason": "SigLIP2 example/model mismatch is unrelated to the quantization/offloading bug in 43873." - }, - { - "left": "issue:45183", - "right": "issue:45479", + "left": "issue:40444", + "right": "issue:43746", "accept": false, - "reason": "XOR validation error messages across models are separate from the single-label-classification zero-loss issue." + "reason": "Multi-image iterable dataset finetuning failure is a different code path from checkpoint loading." }, { "left": "issue:43746", "right": "issue:43873", "accept": false, - "reason": "GraniteSpeech checkpoint loading is not the same underlying problem as offloading with quantization." + "reason": "PEFT local checkpoint loading and quantization offloading are separate loading problems." }, { "left": "issue:44220", "right": "issue:44661", "accept": false, - "reason": "Audio feature extraction vs tokenizer-mapping codegen failure are distinct issues." + "reason": "Audio fbank extraction and tokenizer-mapping model registration are unrelated." }, { - "left": "issue:40444", - "right": "issue:43746", + "left": "issue:45183", + "right": "issue:45479", "accept": false, - "reason": "Multi-image IterableDataset fine-tuning failure is unrelated to GraniteSpeech local checkpoint loading." + "reason": "Generic input validation messaging is not the same as the single-label-classification zero-loss bug." }, { "left": "issue:43540", "right": "issue:44008", "accept": false, - "reason": "Qwen3OmniMoe video-processing ValueError and Gemma3n variable-name collision are different bugs." - }, - { - "left": "issue:43716", - "right": "issue:44008", - "accept": false, - "reason": "Mistral3 dtype mismatch in the image preprocessor is unrelated to Gemma3n's audio_mel_mask AttributeError." + "reason": "Qwen3OmniMoe video processing error is unrelated to Gemma3n variable-name collision." }, { "left": "issue:43335", "right": "issue:43756", "accept": false, - "reason": "Sparse-layer creation in SwitchTransformers and Smollm3 RoPE-layer dropping are different architecture/config issues." + "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer drop are unrelated model-setup bugs." }, { "left": "issue:43720", "right": "issue:43819", "accept": false, - "reason": "Packed-weight loading in BitNet is not the same as DAC.from_latents missing STE equivalence." + "reason": "BitNet packed-weight loading and DAC STE mismatch affect different models and mechanics." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Blip2 dtype mismatch is unrelated to Gemma3n's audio mask attribute error." }, { "left": "issue:43653", "right": "issue:43720", "accept": false, - "reason": "BigBirdTokenizer special-token registration bug is unrelated to BitNet accelerate-loading unpacking." + "reason": "Tokenizer special-token registration and BitNet accelerate loading are not the same defect." }, { "left": "issue:30064", "right": "issue:36010", "accept": false, - "reason": "Void segmentation map processing and the GenerationMixin import error are unrelated." + "reason": "Void segmentation-map processing and GenerationMixin import failure are unrelated." }, { "left": "issue:44062", "right": "issue:44488", "accept": false, - "reason": "Duplicate special-token keyword handling is unrelated to loading cjvt/sleng-bert." + "reason": "AddedToken keyword collision is not the same as failing to load a specific model repo." }, { "left": "issue:44843", "right": "issue:45081", "accept": false, - "reason": "Both involve tokenizer internals, but one is offline model_info behavior in AutoTokenizer while the other is a Mistral regex/backend_tokenizer crash." + "reason": "Both involve _patch_mistral_regex, but one is offline model_info access and the other is a backend_tokenizer attribute crash; different failures." }, { "left": "issue:43322", "right": "issue:45685", "accept": false, - "reason": "Llava Next segmentation fault and MoE MPS histogram error are different runtime/backend problems." + "reason": "Llava Next segmentation fault and MPS histogram support error are unrelated." }, { "left": "issue:43577", "right": "issue:43818", "accept": false, - "reason": "Blip2 dtype propagation and Video-LLaVA temporal-attention/weight-sharing issues are distinct multimodal bugs." + "reason": "Blip2 dtype propagation and Video-LLaVA temporal-attention/weight-sharing are distinct issues." }, { "left": "issue:44186", "right": "issue:44898", "accept": false, - "reason": "LayoutLMv2Tokenizer NER/padding crash is unrelated to Perceiver non-default-resolution handling." + "reason": "LayoutLMv2 tokenizer crashes and Perceiver interpolation failures are different model/tokenizer paths." }, { "left": "issue:43550", "right": "issue:45072", "accept": false, - "reason": "Bamba torch.compile+SDPA failure and SwitchTransformers/TimmWrapperModel bf16 dtype mismatches are separate issues." + "reason": "torch.compile+SDPA on Bamba and bfloat16 dtype mismatch on other models are unrelated." }, { "left": "issue:43531", "right": "issue:43643", "accept": false, - "reason": "SwitchTransformers sparse-layer config bug is unrelated to AutoConfig trust_remote_code missing fields." + "reason": "Qwen3-MoE sliding-window behavior and AutoConfig strict-field loss are different bugs." }, { "left": "issue:43643", "right": "issue:44877", "accept": false, - "reason": "Both are config-loading related, but one is missing fields from trust_remote_code and the other is strict granite_speech config rejection." + "reason": "Both are config-loading issues, but one drops fields under trust_remote_code and the other rejects granite_speech config under strict validation." }, { "left": "issue:43421", "right": "issue:44811", "accept": false, - "reason": "Runtime post-processor updates for special tokens are different from Whisper batch_decode skip_special_tokens behavior." + "reason": "Runtime special-token/post-processor sync and Whisper batch_decode skip_special_tokens are separate tokenizer/processor behaviors." }, { "left": "issue:43550", "right": "issue:43927", "accept": false, - "reason": "Bamba torch.compile/SDPA failure is unrelated to DiaConfig losing custom token IDs after save/load." + "reason": "Bamba torch.compile failure is unrelated to DiaConfig losing custom token IDs on save/load." }, { "left": "issue:43540", "right": "issue:45381", "accept": false, - "reason": "Both are video-related, but they affect different Qwen model families with different failures and likely different fixes." + "reason": "Video input processing failure in Qwen3OmniMoe is unrelated to Qwen2.5-VL vision_position_ids regression." }, { "left": "issue:44448", "right": "issue:44671", "accept": false, - "reason": "Pegasus output regression across versions is unrelated to CamemBERT masked-LM prediction changes." + "reason": "Both are v5 regressions, but Pegasus output drift and CamemBERT masked-LM errors are different model-specific bugs." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Non-persistent buffer corruption and FSDP CPU RAM-efficient loading are different regressions in different loading paths." } ] }, @@ -4532,7 +4546,6 @@ "issue:43577", "issue:43582", "issue:43606", - "issue:43644", "issue:43650", "issue:43653", "issue:43697", @@ -4570,7 +4583,6 @@ "issue:45701" ], "soft_pairs": [ - "issue:43644|issue:43749", "issue:43782|issue:44977", "issue:43653|issue:43819", "issue:44561|issue:45020", @@ -4587,12 +4599,12 @@ "issue:44805|issue:44936", "issue:43606|issue:45072", "issue:42175|issue:45081", + "issue:34634|issue:36010", "issue:43650|issue:43906", "issue:43782|issue:45325", - "issue:34634|issue:36010", - "issue:45081|issue:45406", "issue:44060|issue:44368", "issue:44610|issue:45072", + "issue:45081|issue:45406", "issue:43749|issue:44291", "issue:43720|issue:45072", "issue:43526|issue:43697", @@ -4600,208 +4612,202 @@ "issue:43526|issue:44554", "issue:43262|issue:43819" ], - "prepared_review_unit_hash": "15ca884b721cd716b6517b17ddf9f21116be90b63fc066fd62baeacbd79b7140", + "prepared_review_unit_hash": "a16e9142f8b79a6cd75c4c344a5e185149a6168911730f07a7ceca8c8fe3a7fb", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 29, - "serialized_chars": 36569, - "estimated_input_tokens": 9143, - "estimated_eval_tokens": 18542 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35714, + "estimated_input_tokens": 8929, + "estimated_eval_tokens": 18114 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The candidate pairs are mostly unrelated regressions that only share a broad subsystem, version, or model-family theme. No soft edge looks like the same underlying bug/change, so this is not a duplicate cluster.", - "confidence": 0.96, - "canonical_issue_reason": "No clear canonical issue: the items span unrelated processor, tokenizer, loading, training, and documentation problems, and none of the candidate pairs share the same concrete failure mode.", + "summary": "The set is highly heterogeneous: most items are unrelated model/processor/trainer regressions rather than true duplicates. No PRs are present. I selected the broad remote-code regression report as the best issue representative, but it is only an umbrella, not a true duplicate of the others.", + "confidence": 0.95, + "canonical_issue_reason": "issue:45020 is the broadest umbrella-style report in the set, covering recent-version breakages for remote_code models.", "canonical_pr_reason": null, - "best_issue_reason": "If a single representative issue is forced, #45020 is the broadest umbrella-style report (remote_code regressions), but it is still too general to serve as a duplicate target for the specific issues here.", + "best_issue_reason": "issue:45020 is the best top-level representative if a single issue must stand in for the cluster, but it should not be treated as a duplicate of the more specific bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:43644", - "right": "issue:43749", - "accept": false, - "reason": "Different problems: non-persistent buffer corruption vs FSDP CPU RAM-efficient loading." - }, { "left": "issue:43782", "right": "issue:44977", "accept": false, - "reason": "Different model paths and failures: from_pretrained weight_only error vs flash-attention generation issue." + "reason": "Different models and failure modes: from_pretrained weight_only loading error vs flash-attention generation issue." }, { "left": "issue:43653", "right": "issue:43819", "accept": false, - "reason": "Tokenizer special-token registration bug vs DAC latent/STE mismatch are unrelated." + "reason": "Unrelated subsystems: tokenizer special-token registration vs DAC latent decoding/STE mismatch." }, { "left": "issue:44561", "right": "issue:45020", "accept": false, - "reason": "#44561 is a specific is_torch_fx_available breakage; #45020 is a broad remote_code regression report, not the same concrete bug." + "reason": "One is a specific API removal breakage; the other is a broad umbrella report about remote_code regressions." }, { "left": "issue:43825", "right": "issue:43994", "accept": false, - "reason": "One is a v5 pipeline error-message issue; the other is a model output correctness problem." + "reason": "Different problems: v5 error-message wording vs a model producing bad outputs with AutoModel/pipeline." }, { "left": "issue:43577", "right": "issue:43582", "accept": false, - "reason": "Different code paths: dtype propagation in BLIP2 vs Apple Silicon allocator warmup TypeError." + "reason": "Unrelated: BLIP2 dtype propagation vs Apple Silicon caching allocator TypeError." }, { "left": "issue:43550", "right": "issue:43819", "accept": false, - "reason": "Bamba torch.compile/SDPA failure is unrelated to DAC.from_latents missing STE behavior." + "reason": "Different code paths and models: torch.compile SDPA failure vs DAC forward/from_latents mismatch." }, { "left": "issue:43825", "right": "issue:43827", "accept": false, - "reason": "Both mention removed pipeline tasks, but one is runtime messaging and the other is documentation cleanup." + "reason": "Related theme, but one is runtime messaging and the other is docs cleanup; not the same bug/change." }, { "left": "issue:44821", "right": "issue:45084", "accept": false, - "reason": "AutoImageProcessor URL loading and template-node compilation are unrelated failures." + "reason": "Image processor URL loading issue vs template compilation error; no shared underlying defect." }, { "left": "issue:44485", "right": "issue:44509", "accept": false, - "reason": "GLM-5 RoPE implementation discussion vs docs still mentioning removed pipelines." + "reason": "GLM-5 RoPE discussion is unrelated to docs still mentioning removed pipeline tasks." }, { "left": "issue:41720", "right": "issue:44991", "accept": false, - "reason": "CUDA assert on Qwen3 auto device mapping vs tokenizer load failure for a different model." + "reason": "Different models and symptoms: auto device mapping CUDA assert vs tokenizer loading failure." }, { "left": "issue:44186", "right": "issue:44442", "accept": false, - "reason": "LayoutLMv2 tokenizer NER/padding crash vs FastSpeech2Conformer tokenizer load failure." + "reason": "Tokenizer crash on NER/padding vs AutoTokenizer failing to load a specific tokenizer class." }, { "left": "issue:30064", "right": "issue:36331", "accept": false, - "reason": "Image processor segmentation-map bug vs CustomTrainer compute_loss signature mismatch." + "reason": "Different layers of the stack: image segmentation-map processing vs trainer loss signature change." }, { "left": "issue:44671", "right": "issue:45701", "accept": false, - "reason": "CamemBERT MLM regression is not the same as a generic tokenization-version change report." + "reason": "CamemBERT masked-LM regression is not the same as a broad tokenization-change report." }, { "left": "issue:44805", "right": "issue:44936", "accept": false, - "reason": "Mask/index shape mismatch is a different failure than trainer.evaluate() after train()." + "reason": "Mask-shape IndexError and trainer evaluate-after-train failure are different bugs." }, { "left": "issue:43606", "right": "issue:45072", "accept": false, - "reason": "CPU offload device mismatch is unrelated to bfloat16 dtype mismatches." + "reason": "CPU offload device mismatch is unrelated to bfloat16 dtype mismatches in other models." }, { "left": "issue:42175", "right": "issue:45081", "accept": false, - "reason": "TensorFlow backend installation issue vs Mistral regex patch crash are unrelated." + "reason": "Backend dependency packaging issue vs Mistral tokenizer regex patch crash." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice preset bug is unrelated to GenerationMixin import failure." }, { "left": "issue:43650", "right": "issue:43906", "accept": false, - "reason": "Generic data request vs isolated reproduction of another issue; no shared bug." + "reason": "Placeholder/empty issue title is not the same as an isolated reproduction of another bug." }, { "left": "issue:43782", "right": "issue:45325", "accept": false, - "reason": "Different Qwen-VL bugs: weight_only loading error vs RoPE position-id scaling regression." - }, - { - "left": "issue:34634", - "right": "issue:36010", - "accept": false, - "reason": "BarkProcessor voice preset issue and GenerationMixin import error are unrelated." - }, - { - "left": "issue:45081", - "right": "issue:45406", - "accept": false, - "reason": "Mistral tokenizer regex patch crash vs Gemma4Processor missing _tokenizer." + "reason": "Both are Qwen-related, but they describe different models and different rope/loading failures." }, { "left": "issue:44060", "right": "issue:44368", "accept": false, - "reason": "Both involve warnings about tied weights, but they concern different models and different underlying causes." + "reason": "Different warning causes: tied-weights bug across layers vs a LoRA fine-tuning config warning." }, { "left": "issue:44610", "right": "issue:45072", "accept": false, - "reason": "OmDet input-size mismatch is unrelated to dtype mismatches in inference." + "reason": "Processor size mismatch and bfloat16 dtype mismatch are unrelated." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Different tokenizer/processor classes and different AttributeError causes." }, { "left": "issue:43749", "right": "issue:44291", "accept": false, - "reason": "FSDP CPU RAM-efficient loading and init_empty_weights unexpected argument are related only loosely by loading, not the same bug." + "reason": "Both involve loading, but one is FSDP RAM-efficient loading and the other is init_empty_weights argument handling." }, { "left": "issue:43720", "right": "issue:45072", "accept": false, - "reason": "Packed-weight unloading during accelerate loading is unrelated to dtype mismatch warnings." + "reason": "Packed-weight accelerate loading bug is unrelated to dtype mismatch issues." }, { "left": "issue:43526", "right": "issue:43697", "accept": false, - "reason": "Label reduction bug in BeitImageProcessorFast vs RTDetrV2 output changes are different model/processor issues." + "reason": "Label reduction bug in an image processor is not the same as RTDetrV2 output changes." }, { "left": "issue:43582", "right": "issue:44857", "accept": false, - "reason": "Apple Silicon allocator TypeError vs CUDA AMP crash are different platforms and failures." + "reason": "Apple Silicon warmup TypeError vs CUDA AMP crash are different platform-specific failures." }, { "left": "issue:43526", "right": "issue:44554", "accept": false, - "reason": "Label array reduction bug does not match the MPS attention correctness issue." + "reason": "Image label reduction bug is unrelated to an MPS attention correctness issue." }, { "left": "issue:43262", "right": "issue:43819", "accept": false, - "reason": "Audio chat-template sampling-rate default bug is unrelated to DAC latent reconstruction mismatch." + "reason": "Audio sampling-rate default bug is unrelated to DAC latent reconstruction mismatch." } ] }, @@ -4880,257 +4886,212 @@ "issue:34634|issue:36331", "issue:43653|issue:44912", "issue:43452|issue:44991", - "issue:43638|issue:44936", "issue:43257|issue:43329", - "issue:44568|issue:45488", - "issue:43454|issue:43577", + "issue:43638|issue:44936", "issue:38175|issue:44610", + "issue:43454|issue:43577", + "issue:44568|issue:45488", + "issue:44589|issue:45042", "issue:43854|issue:44977", "issue:43257|issue:43696", - "issue:44589|issue:45042", "issue:43746|issue:44811", "issue:39692|issue:44912", "issue:36010|issue:43645", "issue:43720|issue:44610", "issue:43819|issue:43927", + "issue:43550|issue:44912", "issue:43749|issue:44756", "issue:44568|issue:45356", - "issue:43550|issue:44912", "issue:43329|issue:44479", "issue:43262|issue:44811", "issue:43531|issue:45042", "issue:36010|issue:36331", "issue:43257|issue:43931", "issue:44849|issue:45701", + "issue:43329|issue:43450", "issue:34567|issue:40990", "issue:43257|issue:43756", - "issue:43329|issue:43450", "issue:43697|issue:45357" ], - "prepared_review_unit_hash": "22c82564074036256695c1b6d124b01a4c513c1946aa2b5c5e920be9891e66bd", + "prepared_review_unit_hash": "ded0bbb03bb2c6d86ec77777345856733e03cecbe02a085192073ef31aeb9de9", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37133, - "estimated_input_tokens": 9284, - "estimated_eval_tokens": 18824 + "serialized_chars": 37123, + "estimated_input_tokens": 9281, + "estimated_eval_tokens": 18818 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous: trainer, tokenizer, model-loading, vision, audio, and quantization regressions that do not collapse into one duplicate. The only plausible duplicate pair is the video-regression reports around multimodal/video processing.", - "confidence": 0.93, - "canonical_issue_reason": "No single canonical for the whole cluster; the closest duplicate subcluster is the video regression, and issue:43329 is the clearest root-cause report because it names the broken helper and undefined video-branch variables.", + "summary": "This cluster is not a duplicate set; the items span unrelated bugs across trainers, tokenizers, vision/audio processors, quantization, loading, and model-specific regressions. All soft edges are false positives and should be rejected.", + "confidence": 0.98, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Issue:43329 is the best representative of the only near-duplicate pair because it is the most specific, actionable description of the underlying bug.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43329", "right": "issue:45600", "accept": false, - "reason": "Different subsystems: multimodal video token counting vs auto_mappings CI/config references." + "reason": "Different subsystems: multimodal token counting vs stale auto-mapping references breaking CI. No shared bug path." }, { "left": "issue:45084", "right": "issue:45538", "accept": false, - "reason": "Unrelated bugs: compile-time template-node error vs CLIP tokenizer max length handling." + "reason": "Unrelated issues: template-node compilation error vs CLIPTokenizer max length constant. Same tokenizer/model area only at a very broad level." }, { "left": "issue:34634", "right": "issue:36331", "accept": false, - "reason": "Different code paths: BarkProcessor voice preset vs Trainer compute_loss signature mismatch." + "reason": "BarkProcessor voice_preset bug and Trainer.compute_loss signature change are entirely different code paths." }, { "left": "issue:43653", "right": "issue:44912", "accept": false, - "reason": "Tokenizer special-token registration bug vs MXFP4 quantization load fallback." + "reason": "BigBird special-token registration vs GPT-OSS MXFP4 quantization fallback are unrelated tokenizer/model loading problems." }, { "left": "issue:43452", "right": "issue:44991", "accept": false, - "reason": "Different loading failures: gguf_file handling vs tokenizer loading for est-roberta." - }, - { - "left": "issue:43638", - "right": "issue:44936", - "accept": false, - "reason": "Different trainer failures: ZeRO-3 zero-sized index error vs evaluate-after-train issue." + "reason": "Both involve loading, but one is gguf_file handling for AutoTokenizer/AutoModel and the other is a specific tokenizer regression for est-roberta." }, { "left": "issue:43257", "right": "issue:43329", "accept": false, - "reason": "Different bugs: Qwen3 MoE weight conversion/loading vs video token counting helper failure." - }, - { - "left": "issue:44568", - "right": "issue:45488", - "accept": false, - "reason": "Both are tokenizer regressions, but they affect different models and different mechanisms." + "reason": "Qwen3 MoE weight conversion/loading issue vs multimodal token counting video-branch bug; not the same defect." }, { - "left": "issue:43454", - "right": "issue:43577", + "left": "issue:43638", + "right": "issue:44936", "accept": false, - "reason": "Model-head tying bug vs dtype propagation bug; not the same underlying defect." + "reason": "DeepSpeed ZeRO3 with non-pretrained BERT index error is distinct from trainer.evaluate() failing after train()." }, { "left": "issue:38175", "right": "issue:44610", "accept": false, - "reason": "Different symptoms and components: zero probabilities vs processor output shape mismatch." + "reason": "SigLIP2 zero probabilities and OmDet-Turbo processor/model input-size mismatch are different model-specific bugs." }, { - "left": "issue:43854", - "right": "issue:44977", + "left": "issue:43454", + "right": "issue:43577", "accept": false, - "reason": "GLM-4.7 loading test failure and Qwen3.5 flash-attention generation are distinct issues." + "reason": "AyaVision weight-tying bug and BLIP2 dtype propagation bug are separate model initialization issues." }, { - "left": "issue:43257", - "right": "issue:43696", + "left": "issue:44568", + "right": "issue:45488", "accept": false, - "reason": "Loading/conversion bug and CUDA OOM are unrelated." + "reason": "mDeBERTa special-token addition regression is unrelated to LlamaTokenizer pre-tokenizer override affecting DeepSeek." }, { "left": "issue:44589", "right": "issue:45042", "accept": false, - "reason": "Storage-type lookup error vs PIL image processor dependency regression." + "reason": "Float8 storage lookup error and PIL backend requiring torchvision are unrelated runtime/loading problems." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "GLM-4.7-Flash test loading failure and Qwen3.5 flash-attention generation regression are different model bugs." }, { "left": "issue:43746", "right": "issue:44811", "accept": false, - "reason": "PEFT checkpoint loading vs Whisper batch_decode skip_special_tokens handling are different bugs." + "reason": "PEFT local checkpoint loading for GraniteSpeech is unrelated to Whisper batch_decode skip_special_tokens handling." }, { "left": "issue:39692", "right": "issue:44912", "accept": false, - "reason": "Documentation example errors are not the same bug as MXFP4 loading fallback." + "reason": "SigLIP2 doc-example/model-processor mismatch and git-oss MXFP4 loading fallback are different problems." }, { "left": "issue:36010", "right": "issue:43645", "accept": false, - "reason": "ImportError for GenerationMixin and Jupyter custom-model initialization are separate issues." + "reason": "GenerationMixin import path breakage and Jupyter custom-model initialization regression are not the same underlying issue." }, { "left": "issue:43720", "right": "issue:44610", "accept": false, - "reason": "Packed-weight unpacking during accelerate loading is unrelated to processor input shape." + "reason": "BitNet packed-weight unpacking during accelerate loading vs OmDet processor image-size mismatch are unrelated." }, { "left": "issue:43819", "right": "issue:43927", "accept": false, - "reason": "DAC forward/from_latents mismatch and DiaConfig save/load token ID loss are different defects." - }, - { - "left": "issue:43749", - "right": "issue:44756", - "accept": false, - "reason": "Different memory/loading problems: FSDP CPU RAM efficient loading vs mmap OOM on Strix Halo." - }, - { - "left": "issue:44568", - "right": "issue:45356", - "accept": false, - "reason": "Both tokenizer regressions, but different models and different failure modes." + "reason": "DAC from_latents/forward mismatch is unrelated to DiaConfig losing custom token IDs after save/load." }, { "left": "issue:43550", "right": "issue:44912", "accept": false, - "reason": "Torch.compile/SDPA issue in Bamba is unrelated to MXFP4 loading fallback." - }, - { - "left": "issue:43329", - "right": "issue:44479", - "accept": true, - "reason": "Likely the same video-input regression in the shared multimodal/video path; 43329 identifies the broken helper/undefined variables, while 44479 reports the broader model-family symptom." - }, - { - "left": "issue:43262", - "right": "issue:44811", - "accept": false, - "reason": "Audio chat-template sampling-rate default and Whisper batch_decode behavior are unrelated." - }, - { - "left": "issue:43531", - "right": "issue:45042", - "accept": false, - "reason": "Qwen3-MoE sliding_window behavior and PIL torchvision dependency regression are different problems." + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to GPT-OSS quantization loading behavior." }, { - "left": "issue:36010", - "right": "issue:36331", + "left": "issue:43749", + "right": "issue:44756", "accept": false, - "reason": "Importing GenerationMixin and custom trainer loss signature errors are distinct." + "reason": "FSDP CPU RAM efficient loading breakage and Strix Halo mmap OOM are different memory/loading scenarios." }, { - "left": "issue:43257", - "right": "issue:43931", + "left": "issue:44568", + "right": "issue:45356", "accept": false, - "reason": "Different model-loading issues: MoE weight conversion vs Qwen3-VL shape mismatch." + "reason": "mDeBERTa BOS/EOS token insertion regression and Kimi-K2.5 codec handling warning are distinct tokenizer bugs." }, { - "left": "issue:44849", - "right": "issue:45701", + "left": "issue:43329", + "right": "issue:43450", "accept": false, - "reason": "A Qwen3.5 hidden-states bug is not the same as a broad tokenization-version regression." + "reason": "Both mention video processing, but one is multimodal token counting internals and the other is batched processor output shape." }, { "left": "issue:34567", "right": "issue:40990", "accept": false, - "reason": "Trainer token counters not updating and high perplexity on gpt-oss are unrelated." + "reason": "TrainerState num_input_tokens_seen not updating is unrelated to GPT-OSS high-perplexity evaluation results." }, { "left": "issue:43257", "right": "issue:43756", "accept": false, - "reason": "Qwen3 MoE loading/conversion and Smollm3 RoPE-layer mismatch are different issues." - }, - { - "left": "issue:43329", - "right": "issue:43450", - "accept": false, - "reason": "Video token-count helper bug vs batched video processor shape bug; related area but not the same underlying defect." + "reason": "Qwen3 MoE weight conversion/loading bug and Smollm3 RoPE-layer omission are unrelated model implementation issues." }, { "left": "issue:43697", "right": "issue:45357", "accept": false, - "reason": "RTDetrV2 output differences and Qwen3.5 save_pretrained visual-key regression are separate bugs." + "reason": "RTDetrV2 output regression and Qwen3.5 visual-encoder key saving regression are different code paths and model families." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is well grounded in the packet: the cluster is genuinely heterogeneous, and the only accepted soft edge is framed cautiously as a likely shared video-input regression rather than a definitive same-bug claim. The rejections also stay conservative and mostly distinguish different subsystems or failure modes." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -5199,44 +5160,44 @@ "soft_pairs": [ "issue:44987|issue:45538", "issue:44479|issue:45381", - "issue:41720|issue:43540", "issue:43577|issue:45072", + "issue:41720|issue:43540", "issue:43452|issue:43582", - "issue:33357|issue:34567", - "issue:43540|issue:44560", - "issue:43317|issue:45538", "issue:42915|issue:43742", - "issue:42175|issue:43824", + "issue:33357|issue:34567", "issue:44291|issue:45042", - "issue:43927|issue:44912", + "issue:42175|issue:43824", + "issue:43317|issue:45538", "issue:42915|issue:44821", + "issue:43927|issue:44912", + "issue:43540|issue:44560", "issue:42915|issue:45538", "issue:43540|issue:44479", - "issue:44062|issue:44991", "issue:42175|issue:44938", + "issue:44062|issue:44991", "issue:42175|issue:44488", "issue:43526|issue:45479", - "issue:43906|issue:45381", "issue:45137|issue:45405", + "issue:43906|issue:45381", "issue:38175|issue:44898", "issue:43749|issue:44164", - "issue:44737|issue:45588", "issue:28282|issue:34689", + "issue:44737|issue:45588", "issue:43756|issue:45325", "issue:36010|issue:42175", "issue:28282|issue:29127", - "issue:43493|issue:43994", "issue:43742|issue:44291", + "issue:43493|issue:43994", "issue:43643|issue:45375" ], - "prepared_review_unit_hash": "1b10e3eb6c971bb3ab4ede953ca91ad272f6dd439bd761a9f8e3a72dd197a706", + "prepared_review_unit_hash": "a269bb6b1097d8a956bad6c7812385b5772c0440b800840963ad66331bef79d9", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, @@ -5251,204 +5212,210 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous; only two soft edges look like true duplicates: the SigLIP2 model discrepancy pair and the Qwen2.5-VL video-position-id regression pair.", - "confidence": 0.91, - "canonical_issue_reason": "Issue 44479 is the best canonical issue because it is the broadest, clearest regression report in the cluster: it names the version, the affected video-input code path, and multiple impacted Qwen VL variants, making it the best umbrella for the related video regression reports.", + "summary": "This set is mostly heterogeneous false positives, with only a couple of likely true duplicate pairs around the same concrete bug. No PRs are present.", + "confidence": 0.67, + "canonical_issue_reason": "No single canonical issue for the whole set; 44479 is the best anchor for the strongest duplicate pair because it is the broader Qwen2.5-VL video regression report and matches the same position-id bug seen in 45381.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44479 is the strongest global issue candidate: it is concrete, reproducible, version-scoped, and broad enough to anchor the small video-regression subcluster better than the narrower follow-up report.", + "best_issue_reason": "44479 is the most representative issue among the true duplicates: it describes the broader regression and has the clearest overlap with the matching Qwen2.5-VL report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44987", "right": "issue:45538", "accept": false, - "reason": "Both are loading-related, but one is a generic model load failure and the other is a tokenizer max-length bug; different code paths and symptoms." + "reason": "Both are loading-related, but one is a model-loading failure and the other is a CLIPTokenizer max-length issue; not the same bug." }, { "left": "issue:44479", "right": "issue:45381", "accept": true, - "reason": "These describe the same underlying Qwen2.5-VL video-input regression: 45381 is a narrower statement of the wrong vision_position_ids behavior covered by the broader 44479 report." + "reason": "Same Qwen2.5-VL video-input regression: both describe wrong video position IDs / temporal position handling in v5.3.0." }, { - "left": "issue:41720", - "right": "issue:43540", + "left": "issue:43577", + "right": "issue:45072", "accept": false, - "reason": "Different models and failure modes: auto device-map CUDA assert on A800 versus video-input ValueError in Qwen3OmniMoe." + "reason": "Both mention dtype, but they affect different models and different failure modes; too broad to be the same bug." }, { - "left": "issue:43577", - "right": "issue:45072", + "left": "issue:41720", + "right": "issue:43540", "accept": false, - "reason": "Both mention dtype issues, but they affect different architectures and distinct inference paths; not the same bug." + "reason": "Different Qwen-family problems: auto device mapping cuda assert versus video-input ValueError." }, { "left": "issue:43452", "right": "issue:43582", "accept": false, - "reason": "One is a gguf/from_pretrained tokenizer/model loading break, the other is an Apple Silicon allocator warmup TypeError; unrelated." + "reason": "Unrelated areas: gguf/AutoTokenizer loading versus Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Different concrete bugs: FP8 quantization failure versus MobileLLM key error on load." }, { "left": "issue:33357", "right": "issue:34567", "accept": false, - "reason": "Completely different problem areas: MacOS bus error loading CLIP versus a TrainerState counter not updating." + "reason": "CLIP MacOS bus error and TrainerState token-count tracking are unrelated." }, { - "left": "issue:43540", - "right": "issue:44560", + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "Init-empty-weights constructor-arg breakage is unrelated to the PIL/torchvision backend regression." + }, + { + "left": "issue:42175", + "right": "issue:43824", "accept": false, - "reason": "Both are video issues, but they concern different Qwen3 model families and different exceptions." + "reason": "Packaging/backend dependency issue versus missing Qwen2_5_VL class import; not the same bug." }, { "left": "issue:43317", "right": "issue:45538", "accept": false, - "reason": "Different topics: dequantized model device_map/offload failure versus CLIP tokenizer max-length handling." + "reason": "Device-map/offload loading failure is unrelated to CLIPTokenizer model_max_length." }, { - "left": "issue:42915", - "right": "issue:43742", + "left": "issue:43927", + "right": "issue:44912", "accept": false, - "reason": "Qwen3Moe FP8 config failure and MobileLLM key-error loading are unrelated model-loading bugs." + "reason": "Config save/load token-ID loss is a different bug from MXFP4 quantization falling back to bf16." }, { - "left": "issue:42175", - "right": "issue:43824", + "left": "issue:43540", + "right": "issue:44560", "accept": false, - "reason": "Backend packaging/install issue versus missing Qwen2.5-VL class import; not the same underlying defect." + "reason": "Both involve video, but they are different Qwen3 variants with different error signatures and likely different code paths." }, { - "left": "issue:44291", - "right": "issue:45042", + "left": "issue:43906", + "right": "issue:45381", "accept": false, - "reason": "init_empty_weights argument handling and PIL image processor torchvision dependency are different code paths." + "reason": "One is just an isolated reproduction of another issue; it is not the same as the Qwen2.5-VL video-position regression." }, { - "left": "issue:43927", - "right": "issue:44912", + "left": "issue:38175", + "right": "issue:44898", "accept": false, - "reason": "Config save/load losing token IDs is unrelated to MXFP4 quantization fallback behavior." + "reason": "Different models and different symptoms: SigLIP2 zero probabilities versus Perceiver non-default-resolution failure." }, { - "left": "issue:42915", - "right": "issue:44821", + "left": "issue:43749", + "right": "issue:44164", "accept": false, - "reason": "Different failures: FP8 config loading versus AutoImageProcessor URL loading." + "reason": "Both touch loading/saving, but FSDP CPU RAM efficient loading and extra_state handling are separate concrete problems." }, { - "left": "issue:42915", - "right": "issue:45538", + "left": "issue:28282", + "right": "issue:34689", "accept": false, - "reason": "No meaningful overlap beyond both being model-related issues." + "reason": "Missing PyTorch import dependency and Llama 3.2 Vision loading breakage are unrelated." }, { - "left": "issue:43540", - "right": "issue:44479", + "left": "issue:44737", + "right": "issue:45588", "accept": false, - "reason": "Both involve video inputs, but they target different model families and different regression reports; not clearly the same bug." + "reason": "XLNet CPU tensor placement bug and flash-attention s_aux=None crash are unrelated code paths." }, { - "left": "issue:44062", - "right": "issue:44991", + "left": "issue:43756", + "right": "issue:45325", "accept": false, - "reason": "Tokenizer-loading failures are similar in shape, but the titles do not show the same concrete root cause; too speculative to merge." + "reason": "Both mention RoPE, but they affect different models and different position/index logic." }, { - "left": "issue:42175", - "right": "issue:44938", + "left": "issue:36010", + "right": "issue:42175", "accept": false, - "reason": "Package/backend install mismatch versus Python 3.14 load failure; unrelated." + "reason": "GenerationMixin import failure is unrelated to TensorFlow backend packaging." }, { - "left": "issue:42175", - "right": "issue:44488", + "left": "issue:28282", + "right": "issue:29127", "accept": false, - "reason": "Install/backend packaging issue versus a model-specific loading failure; different bugs." + "reason": "Generic import error versus LayoutLMv3 box-content validation messaging; not the same bug." }, { - "left": "issue:43526", - "right": "issue:45479", + "left": "issue:43742", + "right": "issue:44291", "accept": false, - "reason": "Different processor/model semantics: Beit image processor label reduction versus sequence-classification loss degeneracy." + "reason": "Different root causes: MobileLLM key error versus init_empty_weights argument mismatch." }, { - "left": "issue:43906", - "right": "issue:45381", - "accept": false, - "reason": "An isolated reproduction of an older issue and a Qwen2.5-VL video regression are not the same defect." + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Same SigLIP2 model-family regression: both report incorrect/nonsensical outputs from the HF implementation compared with expected behavior." }, { "left": "issue:45137", "right": "issue:45405", "accept": false, - "reason": "DeepSpeed ZeRO3 deque error and a PEFT version bump are unrelated." + "reason": "DeepSpeed ZeRO3 deque error and PEFT version bump are unrelated." }, { - "left": "issue:38175", - "right": "issue:44898", + "left": "issue:43906", + "right": "issue:45381", "accept": false, - "reason": "Both are vision-model bugs, but the reported symptoms and model paths are different." + "reason": "Reproduction issue versus a specific Qwen2.5-VL video-position bug; not the same underlying change." }, { - "left": "issue:43749", - "right": "issue:44164", + "left": "issue:43756", + "right": "issue:45325", "accept": false, - "reason": "Both touch save/load behavior, but one is FSDP CPU RAM efficient loading and the other is extra_state serialization; not the same bug." + "reason": "Repeated pair: different models and distinct RoPE-related bugs." }, { - "left": "issue:44737", - "right": "issue:45588", + "left": "issue:44062", + "right": "issue:44991", "accept": false, - "reason": "XLNet CPU device placement issue and flash-attention s_aux crash are unrelated." + "reason": "Tokenizer loading failures, but the titles do not establish the same concrete root cause." }, { - "left": "issue:28282", - "right": "issue:34689", + "left": "issue:42175", + "right": "issue:44938", "accept": false, - "reason": "Generic AutoModel torch-missing import error versus a specific Llama 3.2 Vision loading regression." + "reason": "TensorFlow backend packaging versus Python 3.14 load failure are unrelated." }, { - "left": "issue:43756", - "right": "issue:45325", + "left": "issue:43526", + "right": "issue:45479", "accept": false, - "reason": "Both mention RoPE/position handling, but they are different models and different defects." + "reason": "Image-processor reduce_labels behavior is unrelated to degenerate zero-loss sequence classification." }, { - "left": "issue:36010", - "right": "issue:42175", + "left": "issue:28282", + "right": "issue:34689", "accept": false, - "reason": "GenerationMixin import error and missing TensorFlow backend are unrelated." + "reason": "Repeated pair: import dependency issue versus model-loading regression." }, { - "left": "issue:28282", - "right": "issue:29127", + "left": "issue:43742", + "right": "issue:44291", "accept": false, - "reason": "Missing PyTorch dependency and LayoutLMv3 box-content error are unrelated." + "reason": "Repeated pair: different loading bugs with different failure mechanisms." }, { "left": "issue:43493", "right": "issue:43994", "accept": true, - "reason": "Both report the same SigLIP2 model discrepancy: HF outputs are nonsensical versus the original JAX implementation, pointing to the same underlying model bug." - }, - { - "left": "issue:43742", - "right": "issue:44291", - "accept": false, - "reason": "KeyError on MobileLLM loading and _is_hf_initialized argument handling are distinct loading failures." + "reason": "Repeated pair: same SigLIP2 implementation/output discrepancy." }, { "left": "issue:43643", "right": "issue:45375", "accept": false, - "reason": "Missing fields under trust_remote_code and a missing config field in Qwen3_5MoeVisionConfig are related only at a very high level; not the same concrete bug." + "reason": "Both involve config fields, but one is generic trust_remote_code config loss and the other is a model-specific field dropped by @strict; not enough to unify them." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative overall. The canonical issue choice is reasonable for the video-regression subcluster, and the two accepted soft edges are supported by the titles/body-level evidence without overstating certainty. No mergeability or duplicate claim looks excessive." + "feedback": "The summary is grounded in the packet: all items are issues, no PRs are present, and the accepted duplicate pairs are reasonably supported by the titles (notably the Qwen2.5-VL video regression pair and the SigLIP2 discrepancy pair). The rejected pairs are generally conservative and do not overclaim same-root-cause evidence." }, "evaluator_used": true, "retried": false, @@ -5481,7 +5448,6 @@ "issue:43577", "issue:43582", "issue:43643", - "issue:43653", "issue:43688", "issue:43697", "issue:43720", @@ -5491,11 +5457,11 @@ "issue:43819", "issue:44062", "issue:44291", + "issue:44466", "issue:44484", "issue:44488", "issue:44514", "issue:44561", - "issue:44568", "issue:44589", "issue:44625", "issue:44704", @@ -5507,6 +5473,7 @@ "issue:44898", "issue:44936", "issue:44964", + "issue:45005", "issue:45042", "issue:45081", "issue:45216", @@ -5521,228 +5488,231 @@ "issue:43582|issue:43782", "issue:44062|issue:45081", "issue:34567|issue:36331", - "issue:43452|issue:44488", - "issue:44484|issue:44704", "issue:44291|issue:44821", + "issue:43452|issue:44488", "issue:44792|issue:44964", + "issue:44484|issue:44704", "issue:45405|issue:45600", "issue:45042|issue:45081", "issue:43697|issue:45216", "issue:43643|issue:44561", "issue:43688|issue:43749", - "issue:44514|issue:45381", "issue:43742|issue:44589", + "issue:44514|issue:45381", "issue:43531|issue:44821", "issue:44514|issue:45290", - "issue:43452|issue:43577", "issue:44625|issue:44877", + "issue:43452|issue:43577", + "issue:43720|issue:44898", "issue:43452|issue:44843", "issue:43688|issue:45440", "issue:41762|issue:44936", - "issue:43720|issue:44898", - "issue:43295|issue:45356", "issue:43525|issue:43643", + "issue:24643|issue:29127", + "issue:43295|issue:45356", "issue:43819|issue:44811", - "issue:43653|issue:44568", - "issue:24643|issue:29127" + "issue:44466|issue:45005" ], - "prepared_review_unit_hash": "fca4608b906edd341c86ac4974048a5821136eeee289323c5ae0aee4d7e01928", + "prepared_review_unit_hash": "ad7ab0e0c1fa3bbd365925d9aee9a605b67bc33b26d2e858996a1c4b121a270b", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 27, - "serialized_chars": 35513, - "estimated_input_tokens": 8879, - "estimated_eval_tokens": 18014 + "serialized_chars": 35496, + "estimated_input_tokens": 8874, + "estimated_eval_tokens": 18004 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are mostly unrelated bug reports across different code paths; none of the soft pairs look like true duplicates, and there are no PRs to consolidate.", - "confidence": 0.9, - "canonical_issue_reason": "issue:43749 is the strongest representative issue artifact here: it has broad loading-path impact, substantial discussion, and inbound references, making it the most actionable central issue in the set.", + "summary": "This cluster is mostly a grab bag of unrelated Transformers regression/issues, largely around loading, tokenizers, config handling, and training. The only strong duplicate-like pair is the tied-weights serialization reports (44466/45005).", + "confidence": 0.58, + "canonical_issue_reason": "issue:44466 is the clearest concrete bug report in the one credible duplicate pair: tied/lm_head weight serialization in v5.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43749 has the clearest regression framing and the most supporting activity, so it is the best single issue to keep as the primary reference point.", + "best_issue_reason": "issue:44466 is the best anchor overall because it is specific, reproducible, and most directly matches the accepted duplicate-style pair.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43582", "right": "issue:43782", "accept": false, - "reason": "Different failures: Apple Silicon allocator warmup TypeError versus Qwen3VL weight_only loading error." + "reason": "Both are TypeErrors, but they describe different code paths: Apple Silicon allocator warmup vs Qwen3VL weight_only loading." }, { "left": "issue:44062", "right": "issue:45081", "accept": false, - "reason": "Both involve tokenizers, but the concrete bugs are different: AddedToken special-arg duplication vs a Mistral regex/backend_tokenizer crash." + "reason": "Tokenizer-related, but one is AddedToken special-arg duplication and the other is a Mistral regex/backend_tokenizer crash." }, { "left": "issue:34567", "right": "issue:36331", "accept": false, - "reason": "TrainerState token counting not updating is unrelated to CustomTrainer.compute_loss receiving an unexpected keyword argument." + "reason": "Different Trainer regressions: token counting not updating vs custom compute_loss signature breaking on num_items_in_batch." }, { - "left": "issue:43452", - "right": "issue:44488", + "left": "issue:44291", + "right": "issue:44821", "accept": false, - "reason": "Both are model-loading regressions, but they affect different models and failure modes; not the same underlying bug." + "reason": "Unrelated loading failures: init_empty_weights/_is_hf_initialized vs AutoImageProcessor URL loading." }, { - "left": "issue:44484", - "right": "issue:44704", + "left": "issue:43452", + "right": "issue:44488", "accept": false, - "reason": "max_shard_size behavior in save_pretrained is unrelated to AutoProcessor kwargs not reaching cached_file." + "reason": "Both are model-loading complaints, but the bugs are different and model-specific; not the same root cause." }, { - "left": "issue:44291", - "right": "issue:44821", + "left": "issue:44792", + "right": "issue:44964", "accept": false, - "reason": "init_empty_weights/_is_hf_initialized argument handling is a different code path from loading AutoImageProcessor from a URL." + "reason": "Both involve multimodal models, but they target different models and likely different failures." }, { - "left": "issue:44792", - "right": "issue:44964", + "left": "issue:44484", + "right": "issue:44704", "accept": false, - "reason": "A failed Janus test and a Phi-4 multimodal load failure are different model-specific issues." + "reason": "save_pretrained shard sizing is unrelated to AutoProcessor forwarding kwargs to cached_file." }, { "left": "issue:45405", "right": "issue:45600", "accept": false, - "reason": "A dependency version bump is unrelated to stale auto_mappings references causing CI breakage." + "reason": "Completely different issues: dependency version bump vs stale auto_mappings config references." }, { "left": "issue:45042", "right": "issue:45081", "accept": false, - "reason": "One is an image-processor torchvision dependency issue; the other is a tokenizer regex/attribute crash." + "reason": "Different bugs in different subsystems: image processor torchvision dependency vs Mistral tokenizer regex handling." }, { "left": "issue:43697", "right": "issue:45216", "accept": false, - "reason": "RTDetrV2 output divergence and Qwen3.5 save_pretrained corruption are unrelated regressions." + "reason": "Different model regressions: RTDetrV2 output differences vs Qwen3.5 checkpoint serialization." }, { "left": "issue:43643", "right": "issue:44561", "accept": false, - "reason": "Both mention trust_remote_code, but one is missing fields in AutoConfig and the other is a v5 API removal breaking remote models." + "reason": "Both mention trust_remote_code, but one is missing config fields and the other is removal of is_torch_fx_available breaking remote code." }, { "left": "issue:43688", "right": "issue:43749", "accept": false, - "reason": "Aux-loss normalization in MoE models is unrelated to FSDP CPU RAM efficient loading." + "reason": "Different failures: MoE auxiliary-loss normalization vs FSDP CPU RAM efficient loading." }, { - "left": "issue:44514", - "right": "issue:45381", + "left": "issue:43742", + "right": "issue:44589", "accept": false, - "reason": "Both are multimodal/chat-template adjacent, but one crashes on batched padding=False and the other has wrong video vision_position_ids." + "reason": "Different load-time errors with different symptoms and code paths." }, { - "left": "issue:43742", - "right": "issue:44589", + "left": "issue:44514", + "right": "issue:45381", "accept": false, - "reason": "MobileLLM key lookup failure is unrelated to a missing Float8 storage object during loading." + "reason": "Both are Qwen2.5-VL related, but one is chat template batching and the other is video position ids." }, { "left": "issue:43531", "right": "issue:44821", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior has nothing to do with loading AutoImageProcessor from a URL." + "reason": "Sliding-window model issue and image-processor URL loading are unrelated." }, { "left": "issue:44514", "right": "issue:45290", "accept": false, - "reason": "Both involve apply_chat_template, but the triggers differ: batched padding=False versus assistant tool-call messages with no content." + "reason": "Same general chat-template area, but different crash conditions and no clear same underlying bug." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Both concern config handling, but one is num_labels propagation and the other is strict config loading for granite_speech." }, { "left": "issue:43452", "right": "issue:43577", "accept": false, - "reason": "gguf_file loading breakage and Blip2 dtype propagation are different bugs in different subsystems." + "reason": "Different bugs: gguf_file/pretrained loading vs dtype not propagating to BLIP2 qformer." }, { - "left": "issue:44625", - "right": "issue:44877", + "left": "issue:43720", + "right": "issue:44898", "accept": false, - "reason": "Qwen3.5 num_labels propagation is unrelated to strict config blocking granite_speech loading." + "reason": "Different model behaviors: packed BitNet weight loading vs Perceiver image classification with interpolation." }, { "left": "issue:43452", "right": "issue:44843", "accept": false, - "reason": "gguf_file loading regressions and offline model_info calls in _patch_mistral_regex are different tokenizer-loading failures." + "reason": "Different tokenizer/loading regressions: gguf_file handling vs offline-model_info call in Mistral regex patching." }, { "left": "issue:43688", "right": "issue:45440", "accept": false, - "reason": "MoE auxiliary-loss normalization and DeepSeekV3MoE divergence are different model-implementation issues." + "reason": "Both are MoE/model-quality issues, but not the same bug or code path." }, { "left": "issue:41762", "right": "issue:44936", "accept": false, - "reason": "DeepSpeed ZeRO-3 Gemma3 loading failure is unrelated to trainer.evaluate failing after train." + "reason": "Different Trainer/DeepSpeed problems: ZeRO-3 model loading vs evaluate failing after train." }, { - "left": "issue:43720", - "right": "issue:44898", + "left": "issue:43525", + "right": "issue:43643", "accept": false, - "reason": "BitNet packed-weight unpacking during accelerate loading is unrelated to Perceiver interpolation behavior." + "reason": "Both are config-field problems, but one is a missing pad_token_id attribute and the other is remote-code config field loss." }, { - "left": "issue:43295", - "right": "issue:45356", + "left": "issue:24643", + "right": "issue:29127", "accept": false, - "reason": "Processor.tokenizer regression and Kimi-K2.5 codec/fix_mistral_regex problems are different tokenizer regressions." + "reason": "Entirely unrelated: DeepSpeed weight-dimension runtime error vs LayoutLMV3 error-message clarity." }, { - "left": "issue:43525", - "right": "issue:43643", + "left": "issue:43295", + "right": "issue:45356", "accept": false, - "reason": "Missing pad_token_id in Llama4Config is not the same bug as trust_remote_code returning incomplete config fields." + "reason": "Both are tokenizer regressions, but they are different models and different failure modes." }, { "left": "issue:43819", "right": "issue:44811", "accept": false, - "reason": "DAC latent decoding mismatch is unrelated to Whisper batch_decode ignoring skip_special_tokens." + "reason": "Different functionality: DAC latent reconstruction vs Whisper batch_decode special-token handling." }, { - "left": "issue:43653", - "right": "issue:44568", - "accept": false, - "reason": "BigBird special-token registration and mdeberta BOS/EOS insertion are distinct tokenizer-special-token bugs." - }, - { - "left": "issue:24643", - "right": "issue:29127", - "accept": false, - "reason": "DeepSpeed training RuntimeError about 2-D weights is unrelated to LayoutLMv3 box-validation error messaging." + "left": "issue:44466", + "right": "issue:45005", + "accept": true, + "reason": "Both report v5 tied-weight/lm_head serialization problems and plausibly share the same save_pretrained code path." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative on the soft-edge pairs. The only accepted duplicate-style pair, issue:44466 and issue:45005, is plausibly the same tied-weights/save_pretrained bug class, while the other rejected pairs are not overstated and the reasons match the issue titles. Confidence is appropriately moderate." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -5774,6 +5744,7 @@ "issue:43334", "issue:43452", "issue:43504", + "issue:43525", "issue:43531", "issue:43550", "issue:43572", @@ -5791,9 +5762,9 @@ "issue:44442", "issue:44451", "issue:44464", - "issue:44466", "issue:44479", "issue:44554", + "issue:44568", "issue:44610", "issue:44625", "issue:44743", @@ -5801,7 +5772,6 @@ "issue:44936", "issue:44977", "issue:44991", - "issue:45005", "issue:45325", "issue:45356", "issue:45588", @@ -5809,19 +5779,19 @@ "issue:45704" ], "soft_pairs": [ - "issue:44466|issue:45005", + "issue:43653|issue:44568", "issue:44464|issue:44743", + "issue:38175|issue:43295", "issue:43577|issue:43697", "issue:41720|issue:43906", - "issue:38175|issue:43295", "issue:43688|issue:45704", - "issue:43646|issue:43749", "issue:44361|issue:44442", - "issue:44843|issue:44991", - "issue:43653|issue:44361", - "issue:44479|issue:44936", + "issue:43646|issue:43749", "issue:43688|issue:45701", + "issue:43653|issue:44361", "issue:28282|issue:32090", + "issue:44843|issue:44991", + "issue:44479|issue:44936", "issue:30064|issue:33666", "issue:28282|issue:33357", "issue:28282|issue:35141", @@ -5835,256 +5805,260 @@ "issue:44464|issue:44977", "issue:44479|issue:44625", "issue:43550|issue:44361", - "issue:43504|issue:43720", "issue:32090|issue:34567", + "issue:43504|issue:43720", "issue:43504|issue:43550", "issue:43452|issue:44451", + "issue:32090|issue:33666", "issue:43504|issue:44610", + "issue:41720|issue:44464", "issue:43334|issue:43572", - "issue:32090|issue:33666", - "issue:41720|issue:44464" + "issue:43525|issue:43572" ], - "prepared_review_unit_hash": "b7a8dcf100286d9add3230d35238b3ef6a015d2b22ec274eb8b61ca82a6bcdf8", + "prepared_review_unit_hash": "cfd7b06dd0bb98b4fd6ec86a9b826b941a7dec30a3f3dc9da28d0628d8e23a2c", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 34, - "serialized_chars": 38051, - "estimated_input_tokens": 9513, - "estimated_eval_tokens": 19282 + "soft_pair_count": 35, + "serialized_chars": 38343, + "estimated_input_tokens": 9586, + "estimated_eval_tokens": 19428 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Conservative triage: none of the proposed soft edges look strong enough to accept from the packet evidence alone. The tied-weights pair is only title-level similar, so issue:44466 remains the best standalone anchor, not a confirmed duplicate of issue:45005. No PRs are present.", - "confidence": 0.84, - "canonical_issue_reason": "issue:44466 is the most concrete standalone report in the tied-weights area, with a specific v5/main, device-dependent `lm_head.weight` serialization symptom. The packet does not provide enough evidence to merge it with issue:45005.", + "summary": "These items are mostly separate bug reports with only superficial overlap (same library/version or same subsystem). None of the soft pairs look like the same underlying defect, so I would not merge any of them as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43646 is the broadest representative of the cluster: a generic Transformers 5.0 initialization regression that could umbrella several nearby version-breakage reports, unlike the narrower model- or tokenizer-specific issues.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44466 is the best issue anchor here because it is the clearest and most specific report of the tied-weights regression.", + "best_issue_reason": "issue:43646 is the most useful standalone umbrella issue in this set because it describes a widespread initialization regression rather than a single model-specific symptom.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44466", - "right": "issue:45005", + "left": "issue:43653", + "right": "issue:44568", "accept": false, - "reason": "Title overlap is not enough here; the packet only shows a broad tied-weights similarity and `deterministic_accept` is false." + "reason": "Both are tokenizer bugs, but one is about BigBird mask-token registration/decoding and the other is about BOS/EOS insertion for mdeberta-v3; different failure modes and fixes." }, { "left": "issue:44464", "right": "issue:44743", "accept": false, - "reason": "Different code paths: compiled chunked generation inconsistency vs recurrent-state reset in modular Qwen3.5." + "reason": "One is about chunked generation with compiled forward, the other about recurrent state reset with cache; different code paths." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "Siglip2 zero-probabilities and processor.tokenizer/image-passing regression are unrelated symptoms and components." }, { "left": "issue:43577", "right": "issue:43697", "accept": false, - "reason": "Different bugs: Blip2 dtype propagation vs RTDetrV2 output drift." + "reason": "One is a dtype propagation issue in Blip2 loading, the other is a model output mismatch in RTDetrV2; not the same bug." }, { "left": "issue:41720", "right": "issue:43906", "accept": false, - "reason": "No clear shared underlying defect; these are separate Qwen3-related reports." - }, - { - "left": "issue:38175", - "right": "issue:43295", - "accept": false, - "reason": "SigLIP2 zero-probability behavior is not the same as the processor.tokenizer/images regression." + "reason": "The second is only an isolated reproduction of another issue number; there is no evidence it targets the same Qwen3 auto-device-map cudaErrorAssert." }, { "left": "issue:43688", "right": "issue:45704", "accept": false, - "reason": "Unrelated subsystems: MoE aux-loss normalization vs T5/apex RMSNorm memory leak." + "reason": "Aux-loss normalization in MoE models and a T5 RMSNorm memory leak are separate implementation defects." }, { - "left": "issue:43646", - "right": "issue:43749", + "left": "issue:44361", + "right": "issue:44442", "accept": false, - "reason": "Custom model initialization breakage and FSDP CPU RAM-efficient loading are different issues." + "reason": "MLukeTokenizer task AttributeError and FastSpeech2ConformerTokenizer loading failure affect different tokenizers and failure points." }, { - "left": "issue:44361", - "right": "issue:44442", + "left": "issue:43646", + "right": "issue:43749", "accept": false, - "reason": "Different tokenizer failures with different models and symptoms." + "reason": "Generic custom-model initialization breakage is not the same as FSDP CPU RAM-efficient loading being broken." }, { - "left": "issue:44843", - "right": "issue:44991", + "left": "issue:43688", + "right": "issue:45701", "accept": false, - "reason": "Offline `model_info()` access in patch logic is not the same bug as tokenizer loading failure." + "reason": "Aux-loss normalization and a generic tokenization-change report do not describe the same underlying defect." }, { "left": "issue:43653", "right": "issue:44361", "accept": false, - "reason": "BigBird special-token decoding bug is unrelated to MLukeTokenizer task-time AttributeError." + "reason": "Different tokenizer classes and different symptoms: empty decode output vs task-time AttributeError." }, { - "left": "issue:44479", - "right": "issue:44936", + "left": "issue:28282", + "right": "issue:32090", "accept": false, - "reason": "Video-input regression and trainer.evaluate() failure are different code paths." + "reason": "PyTorch import error and Trainer broadcast TypeError are unrelated environment/runtime problems." }, { - "left": "issue:43688", - "right": "issue:45701", + "left": "issue:44843", + "right": "issue:44991", "accept": false, - "reason": "Aux-loss normalization and tokenization-version changes are unrelated." + "reason": "Offline hub lookup in mistral regex patch is a different code path from tokenizer loading failure for EMBEDDIA/est-roberta." }, { - "left": "issue:28282", - "right": "issue:32090", + "left": "issue:44479", + "right": "issue:44936", "accept": false, - "reason": "Missing-PyTorch ImportError and `_gpu_broadcast_one` NoneType TypeError are unrelated." + "reason": "Video-input regression for several VLMs and trainer.evaluate() failing after train() are unrelated." }, { "left": "issue:30064", "right": "issue:33666", "accept": false, - "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are different issues." + "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are different domains and bugs." }, { "left": "issue:28282", "right": "issue:33357", "accept": false, - "reason": "PyTorch import failure is unrelated to the MacOS bus error report." + "reason": "Missing PyTorch import and MacOS bus error in CLIP loading are not the same issue." }, { "left": "issue:28282", "right": "issue:35141", "accept": false, - "reason": "Missing-PyTorch ImportError is unrelated to tied-embedding reinitialization during `post_init`." + "reason": "AutoModel import failure and token embedding resize reinitialization are unrelated." }, { "left": "issue:44977", "right": "issue:45356", "accept": false, - "reason": "Flash-attention generation regression and tokenizer codec/regex regression are different problems." + "reason": "Flash-attention generation bug in Qwen3.5 and tokenizer codec/regex regression in Kimi-K2.5 are separate." }, { "left": "issue:43577", "right": "issue:45588", "accept": false, - "reason": "Blip2 dtype handling is unrelated to the `s_aux=None` crash in flash_attention." + "reason": "Blip2 dtype handling and flash_attention.py crashing on s_aux=None affect different subsystems." }, { "left": "issue:43122", "right": "issue:43906", "accept": false, - "reason": "Different issues: tokenization-version differences vs an isolated reproduction of another bug." + "reason": "Tokenizer output changes across versions and an isolated reproduction of another issue are not enough to conclude the same bug." }, { "left": "issue:43577", "right": "issue:44554", "accept": false, - "reason": "Blip2 dtype propagation and MPS attention correctness are different code paths." + "reason": "Blip2 dtype propagation and an MPS attention correctness issue are distinct." }, { "left": "issue:43531", "right": "issue:43742", "accept": false, - "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to loading MobileLLM-125M." + "reason": "Qwen3-MoE sliding_window behavior and a MobileLLM-125M load key error are unrelated." }, { "left": "issue:43653", "right": "issue:43927", "accept": false, - "reason": "BigBird mask-token registration/decoding bug is unrelated to DiaConfig losing custom token IDs." + "reason": "Tokenizer special-token registration and DiaConfig custom token IDs lost on save/load are different defects." }, { "left": "issue:43329", "right": "issue:45325", "accept": false, - "reason": "Different multimodal/video bugs: undefined helpers in token counting vs rope-index temporal scaling." + "reason": "Undefined video-path helper variables in multimodal token counting and Qwen2.5-VL rope index scaling are different bugs." }, { "left": "issue:44464", "right": "issue:44977", "accept": false, - "reason": "Compiled-forward chunked generation inconsistency is not the same as the flash-attention generation regression." + "reason": "Compiled-forward generation inconsistency and flash-attention generation failure are separate generation-path issues." }, { "left": "issue:44479", "right": "issue:44625", "accept": false, - "reason": "Video-input regression and Qwen3.5 `num_labels` propagation are unrelated." + "reason": "Video-input regression and num_labels propagation into text config are unrelated Qwen3.5 problems." }, { "left": "issue:43550", "right": "issue:44361", "accept": false, - "reason": "Bamba torch.compile/SDPA bug is unrelated to MLukeTokenizer AttributeError." + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to MLukeTokenizer task AttributeError." }, { - "left": "issue:43504", - "right": "issue:43720", + "left": "issue:32090", + "right": "issue:34567", "accept": false, - "reason": "Legacy-field BEiT load failure and BitNet packed-weight unpacking are different bugs." + "reason": "NoneType broadcast TypeError in Trainer and num_input_tokens_seen not updating are different Trainer bugs." }, { - "left": "issue:32090", - "right": "issue:34567", + "left": "issue:43504", + "right": "issue:43720", "accept": false, - "reason": "Trainer broadcast TypeError and `num_input_tokens_seen` not updating are different trainer issues." + "reason": "Legacy field loading failure in BeitForSemanticSegmentation and packed-weight unpacking in BitNet loading are different model-load paths." }, { "left": "issue:43504", "right": "issue:43550", "accept": false, - "reason": "Different model families and failure modes." + "reason": "Legacy-field pretrained loading and Bamba torch.compile SDPA failure are unrelated." }, { "left": "issue:43452", "right": "issue:44451", "accept": false, - "reason": "gguf_file loading behavior is not the same as failing to load a specific model." + "reason": "gguf_file handling for AutoTokenizer/AutoModelForCausalLM and inability to load a specific tokenizer are different loading issues." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "Trainer _gpu_broadcast_one NoneType failure and Qwen2-VL multi-GPU training are not the same problem." }, { "left": "issue:43504", "right": "issue:44610", "accept": false, - "reason": "Legacy preset loading bug is unrelated to processor output-size mismatch." + "reason": "Model preset loading with a legacy field and OmDet-Turbo processor/model input-size mismatch are separate." }, { - "left": "issue:43334", - "right": "issue:43572", + "left": "issue:41720", + "right": "issue:44464", "accept": false, - "reason": "Different config fields (`pad_token_id` vs `pad_token_idx`) and different models." + "reason": "CUDA assert on auto device mapping and compiled-forward generation inconsistency are different runtime failures." }, { - "left": "issue:32090", - "right": "issue:33666", + "left": "issue:43334", + "right": "issue:43572", "accept": false, - "reason": "Trainer NoneType broadcast error is unrelated to Qwen2-VL multi-GPU training." + "reason": "Qwen3-VL missing pad_token_id and StableLmConfig missing pad_token_idx are similar naming issues but on different configs and code paths." }, { - "left": "issue:41720", - "right": "issue:44464", + "left": "issue:43525", + "right": "issue:43572", "accept": false, - "reason": "CUDA assert in auto device mapping is unrelated to chunked generation inconsistency." + "reason": "Llama4Config missing pad_token_id and StableLmConfig missing pad_token_idx are separate model config regressions, not one duplicate bug." } ] }, - "evaluator_result": { - "accept": false, - "feedback": "The output is mostly conservative on the false pairs, but it overstates the tied-weights link: the packet only shows title-level similarity for issue:44466 vs issue:45005, and `deterministic_accept` is false. Phrases like \"strong duplicate pair\" and \"likely root code path\" are stronger than the evidence in the packet. No PR pairs are present, so that part is fine." - }, - "evaluator_used": true, - "retried": true, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null @@ -6114,9 +6088,9 @@ "issue:43299", "issue:43493", "issue:43504", - "issue:43525", "issue:43540", "issue:43572", + "issue:43575", "issue:43606", "issue:43646", "issue:43688", @@ -6152,252 +6126,259 @@ "issue:45584" ], "soft_pairs": [ - "issue:43525|issue:43572", - "issue:44849|issue:45357", "issue:43278|issue:43688", + "issue:44849|issue:45357", "issue:43756|issue:44913", "issue:43493|issue:43825", "issue:43742|issue:44821", "issue:43262|issue:45084", - "issue:43540|issue:43716", "issue:33666|issue:36010", + "issue:43540|issue:43716", "issue:43720|issue:43927", "issue:43606|issue:43927", "issue:43257|issue:43299", "issue:43278|issue:43844", - "issue:43493|issue:43844", "issue:39692|issue:43493", - "issue:44964|issue:45405", + "issue:43493|issue:43844", "issue:43720|issue:44361", - "issue:44488|issue:44843", + "issue:44964|issue:45405", "issue:33666|issue:36331", + "issue:44488|issue:44843", "issue:43540|issue:44977", "issue:43756|issue:45083", "issue:45200|issue:45229", "issue:44625|issue:45584", "issue:43824|issue:44589", "issue:43646|issue:44466", - "issue:44423|issue:44734", "issue:44589|issue:45520", "issue:39692|issue:43295", + "issue:44423|issue:44734", "issue:33357|issue:33666", "issue:43572|issue:44291", - "issue:43504|issue:43606" + "issue:43504|issue:43606", + "issue:43575|issue:45405", + "issue:43927|issue:44361" ], - "prepared_review_unit_hash": "a6feab50eceda342c1281fa904a6372d78a36384ddc63716fde7281b606e283a", + "prepared_review_unit_hash": "c53fb539f95a512eb6a0c34f804b1cd37c43bb67b92baaef1b65c2c3348f09ea", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 37124, - "estimated_input_tokens": 9281, - "estimated_eval_tokens": 18818 + "soft_pair_count": 32, + "serialized_chars": 37401, + "estimated_input_tokens": 9351, + "estimated_eval_tokens": 18958 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a loose semantic grouping of many unrelated Transformers issues across different models, configs, and code paths; I do not see a true duplicate set or a single canonical issue/PR to merge onto.", - "confidence": 0.97, - "canonical_issue_reason": "No canonical issue: the items describe distinct bugs/regressions in unrelated subsystems (model loading, training, multimodal processing, config serialization, offload, etc.), so consolidating them would be incorrect.", + "summary": "This cluster is heterogeneous: the soft pairs share broad themes like loading, multimodal models, or config regressions, but they describe different concrete bugs and code paths. None of the proposed duplicate pairs should be merged. No PRs are present.", + "confidence": 0.98, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No single issue is a good global canonical candidate because the cluster is heterogeneous and the problems are not the same underlying bug.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43525", - "right": "issue:43572", + "left": "issue:43278", + "right": "issue:43688", "accept": false, - "reason": "Both are missing-token-id/config regressions, but one is Llama4 pad_token_id and the other is StableLm pad_token_idx; different models and failure modes." + "reason": "Different bugs: BF16/FP32 dtype drift between training and eval vs auxiliary-loss normalization in MoE models." }, { "left": "issue:44849", "right": "issue:45357", "accept": false, - "reason": "Both involve Qwen3.5, but one is output_hidden_states behavior and the other is save_pretrained visual encoder key serialization; different code paths." - }, - { - "left": "issue:43278", - "right": "issue:43688", - "accept": false, - "reason": "Both mention loss-related training behavior, but one is dtype drift during evaluate and the other is auxiliary-loss normalization; unrelated bugs." + "reason": "Both mention Qwen3.5, but one is an output_hidden_states bug and the other is incorrect visual-encoder key serialization." }, { "left": "issue:43756", "right": "issue:44913", "accept": false, - "reason": "One is about Smollm3 RoPE layer dropping, the other about GPTNeoX rotary_pct reload defaults; both touch rotary settings but are different models and issues." + "reason": "Different models and failures: Smollm3 RoPE-layer dropping vs GPTNeoX rotary_pct not persisting on reload." }, { "left": "issue:43493", "right": "issue:43825", "accept": false, - "reason": "SigLIP2 implementation discrepancy and a pipeline translation-task error are unrelated; no shared code path." + "reason": "SigLIP2 implementation discrepancy is unrelated to the pipeline() translation-task error message regression." }, { "left": "issue:43742", "right": "issue:44821", "accept": false, - "reason": "Both are loading-related, but one is a specific model key error and the other is AutoImageProcessor URL loading; different failures." + "reason": "Different loading problems: MobileLLM key error vs AutoImageProcessor URL loading." }, { "left": "issue:43262", "right": "issue:45084", "accept": false, - "reason": "Audio processor chat-template sampling-rate bug is unrelated to a template compilation error." + "reason": "Audio chat-template sampling-rate default bug is unrelated to the non-template-node compile error." }, { - "left": "issue:43540", - "right": "issue:43716", + "left": "issue:33666", + "right": "issue:36010", "accept": false, - "reason": "Different multimodal/video vs image-preprocessor dtype issues in different Qwen/Mistral model families; not the same bug." + "reason": "Qwen2-VL multi-GPU training is unrelated to the GenerationMixin import failure." }, { - "left": "issue:33666", - "right": "issue:36010", + "left": "issue:43540", + "right": "issue:43716", "accept": false, - "reason": "Multi-GPU training for Qwen2-VL is unrelated to a GenerationMixin import error." + "reason": "Different model families and code paths: Qwen3OmniMoe video processing vs Mistral-3 image-preprocessor dtype mismatch." }, { "left": "issue:43720", "right": "issue:43927", "accept": false, - "reason": "Both are initialization/loading regressions, but one is packed-weight unpacking and the other is custom token ID persistence causing generation failure; different root causes." + "reason": "BitNet packed-weight unpacking during load is unrelated to DiaConfig losing custom token IDs on save/load." }, { "left": "issue:43606", "right": "issue:43927", "accept": false, - "reason": "CPU offload device mismatch is unrelated to DiaConfig token-id loss on save/load." + "reason": "CPU offload device mismatch for bark-small is a different issue than DiaConfig token-ID persistence." }, { "left": "issue:43257", "right": "issue:43299", "accept": false, - "reason": "Qwen3 MOE accelerate/deepspeed loading and Qwen3VL Moe model loading are related by family name only; distinct loading paths and reported symptoms." + "reason": "Both involve Qwen MOE models, but one is an accelerate+deepspeed conversion problem and the other is a Transformers 5.0 load regression." }, { "left": "issue:43278", "right": "issue:43844", "accept": false, - "reason": "Gradient explosion under ZeRO-3 is not the same as embedding dtype changing between train/eval." - }, - { - "left": "issue:43493", - "right": "issue:43844", - "accept": false, - "reason": "SigLIP2 implementation mismatch is unrelated to gradient instability in DeepSpeed training." + "reason": "Embedding dtype mismatch is not the same as abnormal gradient growth under HfDeepSpeedConfig/ZeRO-3." }, { "left": "issue:39692", "right": "issue:43493", "accept": false, - "reason": "Documentation/example errors and a core implementation discrepancy are different classes of issues." + "reason": "Documentation example mistakes are not the same as the SigLIP2 HF-vs-JAX implementation discrepancy." }, { - "left": "issue:44964", - "right": "issue:45405", + "left": "issue:43493", + "right": "issue:43844", "accept": false, - "reason": "Model load failure and a bumped minimum PEFT version are unrelated; one is runtime compatibility, the other is dependency metadata." + "reason": "SigLIP2 fidelity mismatch is unrelated to the ZeRO-3 gradient issue." }, { "left": "issue:43720", "right": "issue:44361", "accept": false, - "reason": "Both are loading failures, but one concerns packed weights in accelerate and the other a tokenizer AttributeError; not the same bug." + "reason": "BitNet accelerate-loading bug is unrelated to MLukeTokenizer AttributeError on tasks." }, { - "left": "issue:44488", - "right": "issue:44843", + "left": "issue:44964", + "right": "issue:45405", "accept": false, - "reason": "Loading cjvt/sleng-bert and offline-mode Mistral regex patching are unrelated loader problems." + "reason": "Model-load failure for Phi-4 multimodal is unrelated to the unreleased MIN_PEFT_VERSION bump." }, { "left": "issue:33666", "right": "issue:36331", "accept": false, - "reason": "Multi-GPU training for Qwen2-VL and a custom trainer loss signature mismatch are separate training issues." + "reason": "Qwen2-VL multi-GPU training and CustomTrainer.compute_loss signature break are different problems." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Loading cjvt/sleng-bert is unrelated to the offline-mode failure in _patch_mistral_regex." }, { "left": "issue:43540", "right": "issue:44977", "accept": false, - "reason": "Video input validation in Qwen3OmniMoe and flash-attention generation issues in Qwen3.5 are different runtime paths." + "reason": "Different Qwen3 issues: video-input processing error vs flash-attention generation bug." }, { "left": "issue:43756", "right": "issue:45083", "accept": false, - "reason": "RoPE layer reduction in Smollm3 and feat-extract length helper behavior in qwen3_omni_moe are unrelated implementation bugs." + "reason": "Smollm3 RoPE-layer count bug does not match the qwen3_omni_moe helper-function behavior issue." }, { "left": "issue:45200", "right": "issue:45229", "accept": false, - "reason": "mm_token_type_ids defaulting and Gemma4 multi-GPU CUDA OOM are different problems with different fixes." + "reason": "Gemma 4 mm_token_type_ids defaulting bug is unrelated to multi-GPU inference CUDA OOM." }, { "left": "issue:44625", "right": "issue:45584", "accept": false, - "reason": "Qwen3.5 num_labels propagation and Whisper empty-transcription generation failure are unrelated model-specific bugs." + "reason": "Qwen3.5 num_labels propagation and Whisper empty-transcription handling are distinct bugs." }, { "left": "issue:43824", "right": "issue:44589", "accept": false, - "reason": "Missing Qwen2.5-VL export and missing Float8 storage support are distinct import/runtime compatibility issues." + "reason": "Qwen2.5-VL import error is unrelated to the Float8 storage-object lookup failure." }, { "left": "issue:43646", "right": "issue:44466", "accept": false, - "reason": "Custom model initialization breakage and inconsistent lm_head.weight serialization are different v5 regressions." - }, - { - "left": "issue:44423", - "right": "issue:44734", - "accept": false, - "reason": "Both concern serving, but one crashes on multimodal tensors and the other on KV-cache continuation indexing; not the same code-path bug." + "reason": "Custom model initialization regression is different from inconsistent lm_head.weight serialization." }, { "left": "issue:44589", "right": "issue:45520", "accept": false, - "reason": "Float8 storage resolution and Python 3.13 flash_attn KeyError are unrelated backend/import issues." + "reason": "Float8 storage lookup failure is unrelated to the flash_attn KeyError on Python 3.13." }, { "left": "issue:39692", "right": "issue:43295", "accept": false, - "reason": "A SigLIP2 docs/example bug is not the same as a regression in processor.tokenizer access and image-to-tokenizer handling." + "reason": "Docs example errors are not the same as the regression around processor.tokenizer access and image inputs to tokenizer." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Continuous-batching multimodal crash from str.to() is a different failure than KV-cache continuation tensor indexing." }, { "left": "issue:33357", "right": "issue:33666", "accept": false, - "reason": "MacOS bus error with CLIP model and Qwen2-VL multi-GPU training are unrelated model/runtime problems." + "reason": "MacOS CLIP bus error and Qwen2-VL multi-GPU training are unrelated." }, { "left": "issue:43572", "right": "issue:44291", "accept": false, - "reason": "StableLm missing pad_token_idx after v5 update and init_empty_weights unexpected argument are separate config/init regressions." + "reason": "StableLmConfig pad_token_idx loss is unrelated to init_empty_weights/_is_hf_initialized argument handling." }, { "left": "issue:43504", "right": "issue:43606", "accept": false, - "reason": "BEiT legacy-field loading failure and bark-small CPU offload device mismatch do not share the same underlying bug." + "reason": "BEiT legacy-field load failure is a different bug from bark-small CPU offload device mismatch." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "Tensor-parallel OOM for Qwen2-57B is unrelated to the unreleased PEFT version bump." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "DiaConfig save/load token-ID corruption is not the same as MLukeTokenizer task-time AttributeError." } ] }, @@ -6431,21 +6412,19 @@ "issue:43278", "issue:43295", "issue:43334", + "issue:43493", "issue:43504", "issue:43525", "issue:43540", - "issue:43575", "issue:43582", "issue:43606", "issue:43653", "issue:43701", "issue:43844", - "issue:43927", "issue:43957", "issue:43994", "issue:44112", "issue:44315", - "issue:44361", "issue:44464", "issue:44493", "issue:44521", @@ -6454,6 +6433,7 @@ "issue:44610", "issue:44792", "issue:44849", + "issue:44898", "issue:44964", "issue:44977", "issue:45072", @@ -6464,6 +6444,7 @@ "issue:45325", "issue:45356", "issue:45405", + "issue:45412", "issue:45446", "issue:45520", "issue:45584", @@ -6471,18 +6452,16 @@ ], "soft_pairs": [ "issue:43504|issue:43653", - "issue:43575|issue:45405", - "issue:43927|issue:44361", "issue:44521|issue:45290", - "issue:44792|issue:45405", "issue:44464|issue:45584", "issue:43994|issue:44493", + "issue:44792|issue:45405", "issue:43278|issue:43701", "issue:43606|issue:44112", "issue:29942|issue:30333", "issue:44554|issue:45446", - "issue:44568|issue:45701", "issue:43844|issue:44964", + "issue:44568|issue:45701", "issue:43540|issue:45200", "issue:45084|issue:45520", "issue:43582|issue:44977", @@ -6494,203 +6473,202 @@ "issue:44315|issue:44849", "issue:43334|issue:43525", "issue:44554|issue:45356", + "issue:43295|issue:44610", "issue:45200|issue:45325", - "issue:43295|issue:44610" + "issue:43493|issue:43701", + "issue:44898|issue:45412" ], - "prepared_review_unit_hash": "d380683ccb105932a721cf08d16c16a2bba54cacd3e52b1550efbf83ca7e3941", + "prepared_review_unit_hash": "f6567a3845a819456f775998384c9b61689ac42295ea8e0421a1a8fefdc1ec96", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 26, - "serialized_chars": 34992, - "estimated_input_tokens": 8748, - "estimated_eval_tokens": 17752 + "serialized_chars": 35029, + "estimated_input_tokens": 8758, + "estimated_eval_tokens": 17772 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated issue reports with a few superficial keyword overlaps; only the `pad_token_id` config-attribute failures look like a likely duplicate pair.", - "confidence": 0.93, - "canonical_issue_reason": "No strong canonical issue: the set is too heterogeneous, and only one soft pair appears to be a true duplicate.", + "summary": "This set is a collection of mostly unrelated transformer bugs spanning tokenization, multimodal chat templates, generation, dtype/device issues, and CI failures. I don\u2019t see any soft pair that appears to be the same concrete underlying bug/change.", + "confidence": 0.95, + "canonical_issue_reason": "issue:44521 is the most active open issue here and has the strongest cross-reference signal, so it works best as a cluster anchor even though the items are not true duplicates.", "canonical_pr_reason": null, - "best_issue_reason": "If forced, issue:45701 is the broadest umbrella report, but it is still too general to serve as a real canonical for this cluster.", + "best_issue_reason": "issue:44521 is the best representative issue because it has the highest discussion/inbound activity and a concrete reproducible regression report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43504", "right": "issue:43653", "accept": false, - "reason": "Different models and different bugs: legacy field loading vs tokenizer special-token registration." - }, - { - "left": "issue:43575", - "right": "issue:45405", - "accept": false, - "reason": "OOM during tensor-parallel model load is unrelated to a PEFT version pin." - }, - { - "left": "issue:43927", - "right": "issue:44361", - "accept": false, - "reason": "Both are config/tokenizer-related, but the concrete failures are different and not the same code path." + "reason": "Both are loading/token-related, but one is a Beit legacy-field load failure and the other is a BigBirdTokenizer special-token decode bug; different code paths." }, { "left": "issue:44521", "right": "issue:45290", "accept": false, - "reason": "Both involve `apply_chat_template`, but one is bad assistant masks and the other is a crash on tool-call messages." - }, - { - "left": "issue:44792", - "right": "issue:45405", - "accept": false, - "reason": "Janus image-generation test failure is unrelated to the PEFT version bump." + "reason": "Both involve apply_chat_template, but one is multimodal assistant mask generation and the other is a tool-call tokenization crash; not the same bug." }, { "left": "issue:44464", "right": "issue:45584", "accept": false, - "reason": "Compiled-forward chunked generation and Whisper empty-transcription handling are distinct bugs." + "reason": "Different generation failures in different models/features: compiled-forward chunked generation vs Whisper empty-transcription alignment handling." }, { "left": "issue:43994", "right": "issue:44493", "accept": false, - "reason": "Different symptoms and likely different code paths; one is bad model outputs, the other is a position-id key issue." + "reason": "A SigLIP2 output regression is unrelated to the broad 'unexpected key with position id' issue, which points to a different model/state handling problem." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "A janus image-generation test failure and a PEFT version-pin issue are unrelated." }, { "left": "issue:43278", "right": "issue:43701", "accept": false, - "reason": "Training/eval dtype drift is unrelated to checkpoint resume key mismatch." + "reason": "Embedding dtype drift in eval and a resume_from_checkpoint key mismatch are separate behaviors with different causes." }, { "left": "issue:43606", "right": "issue:44112", "accept": false, - "reason": "Both mention device handling, but they affect different models and failure modes." + "reason": "Both mention devices, but one is a real CPU-offload mismatch and the other is a stale CI test; not the same underlying defect." }, { "left": "issue:29942", "right": "issue:30333", "accept": false, - "reason": "FlashAttention test failures and MLflow reporting are unrelated." + "reason": "FlashAttention2 test failures and MLflow job-status reporting are unrelated subsystems." }, { "left": "issue:44554", "right": "issue:45446", "accept": false, - "reason": "Different attention-related problems with different triggers and fixes." + "reason": "An MPS attention correctness bug and a PyTorch-version import guard in flex_attention are different issues." }, { - "left": "issue:44568", - "right": "issue:45701", + "left": "issue:43844", + "right": "issue:44964", "accept": false, - "reason": "`add_special_tokens` BOS/EOS omission is a specific tokenizer bug, while the other is only a broad version-regression report." + "reason": "ZeRO-3 gradient growth and a latest-transformers model-load failure are unrelated." }, { - "left": "issue:43844", - "right": "issue:44964", + "left": "issue:44568", + "right": "issue:45701", "accept": false, - "reason": "ZeRO-3 gradient growth and Phi-4 multimodal loading are unrelated." + "reason": "A specific mdeberta BOS/EOS regression is not the same as a vague tokenizer-version-change report." }, { "left": "issue:43540", "right": "issue:45200", "accept": false, - "reason": "Both are multimodal-input issues, but they hit different models and different processing logic." + "reason": "Video-input processing in Qwen3OmniMoe and Gemma 4 mm_token_type_ids defaults are different multimodal bugs." }, { "left": "issue:45084", "right": "issue:45520", "accept": false, - "reason": "Template-compilation failure and Python 3.13 flash-attn import error are unrelated." + "reason": "A compile-time template-node error and a Python 3.13 flash_attn import KeyError are unrelated." }, { "left": "issue:43582", "right": "issue:44977", "accept": false, - "reason": "Apple Silicon allocator TypeError is not the same as a Qwen3.5 flash-attention generation bug." + "reason": "Apple Silicon warmup TypeError and Qwen3.5 flash-attention generation problems do not share the same code path." }, { "left": "issue:43232", "right": "issue:44849", "accept": false, - "reason": "Generation kwargs sync-gpu handling and hidden-states output behavior are separate issues." + "reason": "These are different generation-related regressions: sync_gpus kwargs handling vs output_hidden_states behavior." }, { "left": "issue:15354", "right": "issue:38175", "accept": false, - "reason": "TorchScript export failure and zero-probability inference on SigLIP2 are unrelated." + "reason": "TorchScript GeneratorExp export failure is unrelated to SigLIP2 zero-probability outputs." }, { "left": "issue:37428", "right": "issue:43957", "accept": false, - "reason": "Flash-attention import symbol regression is distinct from `torch.device('meta')` model-loading failures." + "reason": "A missing flash attention helper import is unrelated to meta-device model loading regressions." }, { "left": "issue:45072", "right": "issue:45198", "accept": false, - "reason": "bfloat16 dtype mismatches and Wav2Vec2 save/tokenization failures are unrelated." + "reason": "bfloat16 dtype mismatches in inference and Wav2Vec2 save/tokenization failures are different bugs." }, { "left": "issue:39401", "right": "issue:44568", "accept": false, - "reason": "Both are tokenizer bugs, but offset mapping and BOS/EOS insertion are different concrete failures." + "reason": "Wrong offset_mapping in Qwen3 tokenizer and missing BOS/EOS insertion in mdeberta are separate tokenizer defects." }, { "left": "issue:44315", "right": "issue:44849", "accept": false, - "reason": "Liger Kernel not applied via `model_init` is unrelated to Qwen3.5 hidden-states generation." + "reason": "Liger Kernel not applied during model_init is unrelated to the Qwen3.5 hidden-states regression." }, { "left": "issue:43334", "right": "issue:43525", - "accept": true, - "reason": "Both report the same missing-`pad_token_id` AttributeError on config objects during model loading; this looks like one shared config-handling bug." - }, - { - "left": "issue:43295", - "right": "issue:44610", "accept": false, - "reason": "Processor/tokenizer API regression and input-size mismatch are different problems." + "reason": "Both are pad_token_id attribute errors, but they affect different model configs and load paths; not enough to treat as the same bug." }, { "left": "issue:44554", "right": "issue:45356", "accept": false, - "reason": "MPS attention correctness and tokenizer codec/regression issues are unrelated." + "reason": "An MPS attention correctness issue and a Kimi-K2.5 tokenizer codec regression are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Processor.tokenizer access breakage and OmDet-Turbo image-size mismatch are different model/processor integration failures." }, { "left": "issue:45200", "right": "issue:45325", "accept": false, - "reason": "Both touch multimodal internals, but one is `mm_token_type_ids` defaults and the other is RoPE position scaling." + "reason": "Gemma 4 text-only mm_token_type_ids handling and Qwen2.5-VL rope-index scaling are separate multimodal position-id issues." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "A SigLIP2 implementation discrepancy is not the same as a checkpoint resume key mismatch." + }, + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Perceiver interpolation failure and RT-DETR memory reclamation are unrelated." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The lone accepted soft pair, issue:43334 and issue:43525, is plausibly the same pad_token_id config bug, and the other rejections are appropriately cautious given the title-level evidence. No overstatement stands out." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -6720,7 +6698,6 @@ "issue:43572", "issue:43575", "issue:43653", - "issue:43701", "issue:43819", "issue:43824", "issue:43827", @@ -6741,9 +6718,10 @@ "issue:44514", "issue:44521", "issue:44561", + "issue:44610", "issue:44779", "issue:44792", - "issue:44898", + "issue:44857", "issue:44936", "issue:44987", "issue:45083", @@ -6758,265 +6736,252 @@ ], "soft_pairs": [ "issue:44466|issue:45127", - "issue:44898|issue:45412", - "issue:43493|issue:43701", "issue:44792|issue:45563", "issue:37428|issue:43824", - "issue:43388|issue:43526", "issue:43873|issue:44464", "issue:43572|issue:44987", - "issue:43844|issue:44936", + "issue:43388|issue:43526", "issue:43572|issue:43957", + "issue:43844|issue:44936", "issue:43824|issue:43975", - "issue:44514|issue:44521", "issue:43295|issue:43450", + "issue:44514|issue:44521", "issue:38175|issue:45412", "issue:43295|issue:43873", "issue:42915|issue:43824", - "issue:44360|issue:44561", "issue:43295|issue:43994", + "issue:44360|issue:44561", "issue:44442|issue:45198", "issue:38175|issue:43493", - "issue:43827|issue:43901", "issue:43278|issue:43856", - "issue:44206|issue:45584", + "issue:43827|issue:43901", "issue:43653|issue:44112", + "issue:44206|issue:45584", "issue:43450|issue:43493", "issue:45127|issue:45245", + "issue:43450|issue:43575", "issue:43819|issue:45083", "issue:44561|issue:44779", - "issue:43450|issue:43575", - "issue:44462|issue:44561", "issue:45290|issue:45381", "issue:37428|issue:43425", + "issue:44462|issue:44561", + "issue:44610|issue:44857", "issue:43819|issue:44112" ], - "prepared_review_unit_hash": "41eaeea378b8ffebb6c0707cfa5c9e57100a0ba6a539f4a114096d69e3956ad3", + "prepared_review_unit_hash": "8900d69b755feb63dabd49649524c143250a9cc91ada5dd59bf094b15aa70ebe", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37586, - "estimated_input_tokens": 9397, - "estimated_eval_tokens": 19050 + "soft_pair_count": 32, + "serialized_chars": 37287, + "estimated_input_tokens": 9322, + "estimated_eval_tokens": 18900 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: the items span unrelated regressions, import errors, tokenizer/model-parity bugs, and training/runtime issues. The soft edges mostly reflect superficial similarity (e.g. v5 regressions, tokenizer/processor bugs, or model-specific failures) rather than the same underlying defect.", - "confidence": 0.91, - "canonical_issue_reason": "issue:43824 has the strongest signal and highest engagement, and it is a concrete, well-scoped regression. If a single issue must anchor the cluster, this is the most defensible representative, though the set is not a true duplicate cluster.", + "summary": "The cluster is heterogeneous: the soft pairs share broad topical similarity but not the same concrete bug/change, so none should be merged as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue stands out; the items span unrelated regressions across import errors, tokenizers, chat templates, processors, training, docs, and memory/loading behavior.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43824 is the most actionable standalone issue: clear symptom, broad impact, and the most discussion/inbound references among the candidates.", + "best_issue_reason": "No single issue is a good global representative because the cluster is not a single underlying defect but many distinct problems with only superficial similarity.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44466", "right": "issue:45127", "accept": false, - "reason": "Both involve model output/weight behavior, but one is device-dependent serialization of lm_head.weight and the other is a LoRA-merge collapse with extended vocab. Different failure modes and code paths." - }, - { - "left": "issue:44898", - "right": "issue:45412", - "accept": false, - "reason": "Perceiver interpolation failure and RT-DETR memory not being released are unrelated bugs in different subsystems." - }, - { - "left": "issue:43493", - "right": "issue:43701", - "accept": false, - "reason": "SigLIP2 HF-vs-JAX discrepancy is a model behavior bug; resume_from_checkpoint key mismatch is a training-state loading bug. No same underlying defect." + "reason": "Different bugs: one is about inconsistent lm_head.weight serialization, the other about LoRA merge collapse with extended vocabulary and tied embeddings." }, { "left": "issue:44792", "right": "issue:45563", "accept": false, - "reason": "Janus image-generation test failure and a stale paged-generate warning are different issues; one is a functional regression, the other a warning-message mismatch." + "reason": "Different symptoms and code paths: janus image-generation test failure vs a stale num_return_sequences warning in paged generate()." }, { "left": "issue:37428", "right": "issue:43824", "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and different model families. No evidence of the same broken API surface." - }, - { - "left": "issue:43388", - "right": "issue:43526", - "accept": false, - "reason": "Both mention labels, but one is gather_for_metrics dropping tuple elements in the last batch while the other is BeitImageProcessorFast reduce_labels returning only one label. Different components and bugs." + "reason": "Both are import errors, but for different missing symbols and likely different regressions; not the same underlying fix." }, { "left": "issue:43873", "right": "issue:44464", "accept": false, - "reason": "Quantization/offloading behavior and compiled-forward chunked generation inconsistency are distinct code paths with different symptoms." + "reason": "Quantization offloading behavior and chunked generation with compiled forward are unrelated concrete problems." }, { "left": "issue:43572", "right": "issue:44987", "accept": false, - "reason": "Missing pad_token_idx in StableLmConfig and loading physical-intelligence/fast failing on newer transformers are not the same defect." + "reason": "Version-related loading failures, but with different models and failure modes; too broad to treat as one bug." }, { - "left": "issue:43844", - "right": "issue:44936", + "left": "issue:43388", + "right": "issue:43526", "accept": false, - "reason": "DeepSpeed ZeRO-3 gradient growth and trainer.evaluate() failing after trainer.train() are unrelated training/runtime problems." + "reason": "Both involve labels, but one is a gather_for_metrics truncation issue and the other is a BeitImageProcessorFast reduce_labels bug." }, { "left": "issue:43572", "right": "issue:43957", "accept": false, - "reason": "StableLmConfig missing pad_token_idx is unrelated to meta-device loading breakage." + "reason": "Different regressions: missing StableLmConfig field vs meta-device loading failures in some models." }, { - "left": "issue:43824", - "right": "issue:43975", + "left": "issue:43844", + "right": "issue:44936", "accept": false, - "reason": "Qwen2_5_VL import failure and Deepseek incorrect detokenization are different bugs in different model families." + "reason": "Training instability under ZeRO-3 is unrelated to trainer.evaluate() failing after train()." }, { - "left": "issue:44514", - "right": "issue:44521", + "left": "issue:43824", + "right": "issue:43975", "accept": false, - "reason": "Both involve chat templates, but one is a batched padding=False crash and the other is all-zero assistant masks for multimodal inputs. Not the same concrete bug." + "reason": "Qwen2.5-VL import failure and DeepSeek detokenization regression are separate issues." }, { "left": "issue:43295", "right": "issue:43450", "accept": false, - "reason": "Processor.tokenizer regression and batched video shape handling are separate processor issues with different symptoms." + "reason": "Processor/tokenizer regression and batched video processor shape bug are different code paths." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both are apply_chat_template-related, but one is a batching crash with padding=False and the other is incorrect assistant masks for multimodal inputs." }, { "left": "issue:38175", "right": "issue:45412", "accept": false, - "reason": "SigLIP2 zero probabilities and RT-DETR memory leaks are unrelated." + "reason": "SigLIP2 zero probabilities and RT-DETR memory not releasing are unrelated." }, { "left": "issue:43295", "right": "issue:43873", "accept": false, - "reason": "Custom processor/tokenizer regression and quantization/offloading problems are different code paths and fixes." + "reason": "Custom processor/tokenizer regression vs quantization offloading issue; no shared concrete bug." }, { "left": "issue:42915", "right": "issue:43824", "accept": false, - "reason": "Qwen3Moe FineGrainedFP8Config failure and Qwen2_5_VL import error are unrelated model-specific issues." + "reason": "FineGrainedFP8Config failure in Qwen3Moe is unrelated to a missing Qwen2.5-VL export." }, { - "left": "issue:44360", - "right": "issue:44561", + "left": "issue:43295", + "right": "issue:43994", "accept": false, - "reason": "A missing ReLU in the DSA indexer and removal of is_torch_fx_available breaking trust_remote_code are clearly unrelated." + "reason": "Different regressions in different models: processor/tokenizer access vs SigLIP2 inference correctness." }, { - "left": "issue:43295", - "right": "issue:43994", + "left": "issue:44360", + "right": "issue:44561", "accept": false, - "reason": "Processor/tokenizer regression and SigLIP2 nonsensical outputs are both v5-era issues, but not the same bug or code path." + "reason": "DSA indexer activation issue and removal of is_torch_fx_available breaking remote-code models are unrelated." }, { "left": "issue:44442", "right": "issue:45198", "accept": false, - "reason": "FastSpeech2ConformerTokenizer loading and Wav2Vec2 save/tokenization failures affect different tokenizers with different root causes." + "reason": "AutoTokenizer failing for FastSpeech2ConformerTokenizer and Wav2Vec2 save/tokenization failure are distinct tokenizer problems." }, { - "left": "issue:38175", - "right": "issue:43493", + "left": "issue:43278", + "right": "issue:43856", "accept": false, - "reason": "Both touch SigLIP2, but one is zero probabilities in a specific model and the other is a broader HF-vs-JAX parity discrepancy. Insufficient evidence they are the same bug." + "reason": "Embedding dtype drift in eval and Qwen3 MoE memory inefficiency are different problems." }, { "left": "issue:43827", "right": "issue:43901", "accept": false, - "reason": "Both are docs issues, but one is about pipeline() references after v5 removals and the other about return_all_scores wording; different documentation problems." + "reason": "Both are docs issues, but they cover different removed/deprecated pipeline APIs and are not the same change." }, { - "left": "issue:43278", - "right": "issue:43856", + "left": "issue:43653", + "right": "issue:44112", "accept": false, - "reason": "Embedding dtype drift in eval and Qwen3 MoE memory inefficiency are unrelated." + "reason": "BigBirdTokenizer special-token registration and a GraniteSpeech CI device-override test are unrelated." }, { "left": "issue:44206", "right": "issue:45584", "accept": false, - "reason": "Unsupported center argument in LasrFeatureExtractor and Whisper failing on empty transcription after align_special_tokens are unrelated." - }, - { - "left": "issue:43653", - "right": "issue:44112", - "accept": false, - "reason": "BigBirdTokenizer special-token registration and a GraniteSpeech CI device-override test failure are different problems." + "reason": "LASR feature extractor center-arg crash and Whisper empty-transcription failure after align_special_tokens are different audio bugs." }, { "left": "issue:43450", "right": "issue:43493", "accept": false, - "reason": "Batched video processor shape issues and SigLIP2 implementation mismatch are different subsystems." + "reason": "Video batch shape bug and SigLIP2 HF-vs-JAX discrepancy are separate issues." }, { "left": "issue:45127", "right": "issue:45245", "accept": false, - "reason": "LoRA merge collapse with extended vocab and a categories>2^24 runtime error are unrelated." + "reason": "LoRA merge collapse with tied embeddings and a categories-count runtime error are unrelated." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Batched video processor shape error and Qwen2-57B-A14B-Instruct TP OOM are unrelated." }, { "left": "issue:43819", "right": "issue:45083", "accept": false, - "reason": "DAC.from_latents mismatch and qwen3_omni_moe feature-extract length helper behavior are different bugs." + "reason": "DAC.from_latents mismatch and a Qwen3 Omni MoE feature-length helper issue are different code paths." }, { "left": "issue:44561", "right": "issue:44779", "accept": false, - "reason": "Removal of is_torch_fx_available breaking remote-code models and Deepseek tokenizer detokenization are unrelated." + "reason": "Removal of is_torch_fx_available and DeepSeek tokenizer regression are unrelated." }, { - "left": "issue:43450", - "right": "issue:43575", + "left": "issue:45290", + "right": "issue:45381", "accept": false, - "reason": "Video processor batched-shape bug and Qwen2-57B-A14B-Instruct OOM on load are different issues." + "reason": "Chat-template tool-call crash and Qwen2.5-VL video vision_position_ids bug are distinct multimodal issues." }, { - "left": "issue:44462", - "right": "issue:44561", + "left": "issue:37428", + "right": "issue:43425", "accept": false, - "reason": "AutoTokenizer ignoring tokenizer.json and removal of is_torch_fx_available are unrelated APIs and fixes." + "reason": "Missing flash-attention helper import and Torch 2.10 incompatibility are not the same bug." }, { - "left": "issue:45290", - "right": "issue:45381", + "left": "issue:44462", + "right": "issue:44561", "accept": false, - "reason": "Chat-template tool-call crash and qwen2.5-vl video vision_position_ids mismatch are different multimodal bugs." + "reason": "Tokenizer.json lookup behavior and trust_remote_code breakage from is_torch_fx_available removal are unrelated." }, { - "left": "issue:37428", - "right": "issue:43425", + "left": "issue:44610", + "right": "issue:44857", "accept": false, - "reason": "An import error for a missing flash-attention helper and general Torch 2.10 incompatibility are not the same defect." + "reason": "OmDet-Turbo input-size mismatch and LwDetrImageLoss AMP crash are different failures." }, { "left": "issue:43819", "right": "issue:44112", "accept": false, - "reason": "DAC forward/latents mismatch and a stale CI device-override test are unrelated." + "reason": "DAC latent conversion mismatch and GraniteSpeech CI test failure do not share a concrete underlying defect." } ] }, @@ -7069,7 +7034,6 @@ "issue:44493", "issue:44554", "issue:44568", - "issue:44610", "issue:44743", "issue:44779", "issue:44855", @@ -7090,16 +7054,15 @@ "issue:45712" ], "soft_pairs": [ - "issue:44610|issue:44857", "issue:44857|issue:45593", - "issue:43278|issue:43825", "issue:43262|issue:43450", + "issue:43278|issue:43825", "issue:43606|issue:45083", "issue:45200|issue:45588", - "issue:45310|issue:45478", "issue:44448|issue:44743", - "issue:29942|issue:32090", + "issue:45310|issue:45478", "issue:37428|issue:44351", + "issue:29942|issue:32090", "issue:43335|issue:43550", "issue:43611|issue:43931", "issue:39692|issue:43526", @@ -7116,247 +7079,228 @@ "issue:43550|issue:44112", "issue:38175|issue:44493", "issue:44977|issue:45229", - "issue:45459|issue:45712", - "issue:43723|issue:44554", "issue:43335|issue:45072", + "issue:45459|issue:45712", "issue:40444|issue:43526", - "issue:44568|issue:45491", + "issue:43723|issue:44554", "issue:43335|issue:45593", "issue:43720|issue:44112", - "issue:44448|issue:44977" + "issue:44568|issue:45491" ], - "prepared_review_unit_hash": "4b89467127ff217bcb6338604d1e22cc3bf26461a7d63bde947b19e96f36e98b", + "prepared_review_unit_hash": "4893f886797557ded8acb589c8cb11294498b33c3967d3abc36fb83bb9e3b9d7", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 34, - "serialized_chars": 38141, - "estimated_input_tokens": 9536, - "estimated_eval_tokens": 19328 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 32, + "serialized_chars": 36962, + "estimated_input_tokens": 9241, + "estimated_eval_tokens": 18738 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated issues; only one soft edge is a true duplicate pair around the Qwen3.5 MoE `from_pretrained` loading error. All other candidate links look like false positives across different models, symptoms, or code paths.", - "confidence": 0.86, - "canonical_issue_reason": "Issue 45310 is the earlier report of the exact same Qwen3.5 MoE `from_pretrained` failure and is the best canonical anchor for the duplicate pair with 45478.", + "summary": "This cluster is mostly a grab-bag of unrelated Transformers issues across many model families and subsystems. The only clear duplicate is the repeated Qwen3.5 MoE `from_pretrained` regression; the rest are only loosely similar by broad theme and should stay separate.", + "confidence": 0.95, + "canonical_issue_reason": "Issue:45310 is the best canonical issue because it is the cleaner, earlier report of the Qwen3.5 MoE `from_pretrained` regression, and it is an exact duplicate of issue:45478.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45310 is the clearest representative because it matches 45478 exactly in title and underlying bug, making it the strongest issue to keep as the cluster anchor.", + "best_issue_reason": "Issue:45310 is the strongest representative issue in the cluster since it has a clear, concrete failure mode and matches another report verbatim.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:44610", - "right": "issue:44857", - "accept": false, - "reason": "Different products and failures: OmDet input-size mismatch vs LwDetrImageLoss AMP/CUDA crash." - }, { "left": "issue:44857", "right": "issue:45593", "accept": false, - "reason": "Unrelated bugs: AMP/CUDA loss crash vs D-FINE missing auxiliary losses." + "reason": "Both are vision/detection losses, but they describe different failures: AMP/CUDA crash vs missing auxiliary losses." }, { - "left": "issue:43278", - "right": "issue:43825", + "left": "issue:43262", + "right": "issue:43450", "accept": false, - "reason": "Embedding dtype regression and pipeline translation-task messaging are different code paths." + "reason": "Different media pipelines and different bugs: audio chat-template sampling rate vs batched video output shape." }, { - "left": "issue:43262", - "right": "issue:43450", + "left": "issue:43278", + "right": "issue:43825", "accept": false, - "reason": "Audio chat-template sampling-rate default vs video batch-shape bug; different modalities." + "reason": "One is a dtype regression in evaluation, the other is a v5 pipeline error-message issue; no shared code-path bug." }, { "left": "issue:43606", "right": "issue:45083", "accept": false, - "reason": "Bark CPU offload device mismatch is unrelated to qwen3_omni_moe feature-length helper behavior." + "reason": "Completely different components: CPU offload device mismatch vs a Qwen3 Omni MoE feature-length helper behavior." }, { "left": "issue:45200", "right": "issue:45588", "accept": false, - "reason": "Gemma4 `mm_token_type_ids` defaulting issue is not the same as the flash-attention `s_aux=None` crash." + "reason": "Different models and failures: Gemma4 token-type defaults vs flash-attention crash on sink-less models." + }, + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Both affect generation behavior, but the concrete bugs differ: Pegasus output drift vs Qwen3.5 recurrent-state reset." }, { "left": "issue:45310", "right": "issue:45478", "accept": true, - "reason": "Exact same title and same Qwen3.5 MoE `from_pretrained` error; 45478 is a duplicate of 45310." + "reason": "Same exact reported bug: Qwen3.5 MoE `from_pretrained` failure on transformers>=5.4.0." }, { - "left": "issue:44448", - "right": "issue:44743", + "left": "issue:37428", + "right": "issue:44351", "accept": false, - "reason": "Pegasus output mismatch and Qwen3.5 recurrent-state reset bug are different model behaviors." + "reason": "Both are import errors, but they involve different symbols and different failure causes." }, { "left": "issue:29942", "right": "issue:32090", "accept": false, - "reason": "Flash Attention 2 test failures and `_gpu_broadcast_one` NoneType errors are different issues." - }, - { - "left": "issue:37428", - "right": "issue:44351", - "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and different modules/versions." + "reason": "Flash Attention 2 test failures and a Trainer `_gpu_broadcast_one` NoneType error are unrelated." }, { "left": "issue:43335", "right": "issue:43550", "accept": false, - "reason": "SwitchTransformers sparse-layer config bug is unrelated to Bamba torch.compile SDPA failure." + "reason": "Different models and paths: SwitchTransformers sparse-layer config bug vs Bamba torch.compile/SDPA failure." }, { "left": "issue:43611", "right": "issue:43931", "accept": false, - "reason": "Base-model-prefix loading regression and Qwen3-VL weight-shape mismatch are distinct loading problems." + "reason": "Both are loading problems, but one concerns `base_model_prefix` and the other a specific Qwen3-VL weight-shape mismatch." }, { "left": "issue:39692", "right": "issue:43526", "accept": false, - "reason": "SigLIP2 docs example errors do not match the BeitImageProcessorFast `reduce_labels` bug." + "reason": "SigLIP2 doc-example issues and BeitImageProcessorFast label reduction are unrelated processor/model bugs." }, { "left": "issue:43819", "right": "issue:44493", "accept": false, - "reason": "DAC latent/forward mismatch and unexpected position-id key are different symptoms in different models." + "reason": "Different code paths: DAC latent decoding mismatch vs widespread unexpected position-id key warnings." }, { "left": "issue:45200", "right": "issue:45381", "accept": false, - "reason": "Gemma4 text-only fine-tuning defaults and Qwen2.5-VL video position IDs are separate bugs." + "reason": "Both involve multimodal inputs, but one is token-type defaults and the other is incorrect vision position IDs." }, { "left": "issue:43335", "right": "issue:43653", "accept": false, - "reason": "SwitchTransformers config creation and BigBirdTokenizer special-token registration are unrelated." + "reason": "Different tokenizer/model bugs: sparse-layer creation vs BigBird mask token registration." }, { "left": "issue:29127", "right": "issue:32090", "accept": false, - "reason": "LayoutLMv3 error-message clarity and trainer broadcast TypeError are not the same bug." + "reason": "A model-specific layoutlmv3 error message issue is not the same bug as Trainer GPU broadcast failing on NoneType." }, { "left": "issue:44292", "right": "issue:45520", "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime failure and Python 3.13 flash_attn import KeyError are different failures." + "reason": "Different runtime failures: Qwen-3-8B-NVFP4 execution vs a Python 3.13 `flash_attn` import key error." }, { "left": "issue:44855", "right": "issue:45083", "accept": false, - "reason": "Python 3.13 IndentationError in DebertaV2 import is unrelated to the qwen3_omni_moe helper behavior." - }, - { - "left": "issue:43335", - "right": "issue:45083", - "accept": false, - "reason": "SwitchTransformers sparse-layer config and qwen3_omni_moe feature-length helper issues do not share the same code path." - }, - { - "left": "issue:44448", - "right": "issue:44779", - "accept": false, - "reason": "Pegasus v4/v5 output difference and Deepseek tokenizer regression are different model-specific bugs." + "reason": "DebertaV2 Python 3.13 parsing/import issue is unrelated to the Qwen3 Omni MoE helper function behavior." }, { "left": "issue:43335", "right": "issue:43504", "accept": false, - "reason": "SwitchTransformers config bug and Beit semantic-segmentation legacy-field load failure are unrelated." + "reason": "Both touch model setup, but one is SwitchTransformers config creation and the other is BEiT pretrained loading legacy-field handling." }, { "left": "issue:39692", "right": "issue:44493", "accept": false, - "reason": "SigLIP2 documentation/model mismatch does not match the position-id key regression." + "reason": "A SigLIP2 documentation/example regression is not the same as a broad position-id key issue across many models." }, { "left": "issue:43550", "right": "issue:44112", "accept": false, - "reason": "Bamba SDPA/torch.compile bug and GraniteSpeech stale CI device override test are different issues." + "reason": "A model compilation/SDPA bug and a stale CI device-override test are different problems." }, { "left": "issue:38175", "right": "issue:44493", "accept": false, - "reason": "Zero probabilities in SigLIP2 and unexpected position-id keys are unrelated symptoms." + "reason": "Zero probabilities in SigLIP2 are unrelated to unexpected position-id keys." }, { "left": "issue:44977", "right": "issue:45229", "accept": false, - "reason": "Qwen3.5 flash-attention generation failure and Gemma4 multi-GPU OOM are different problems." + "reason": "Qwen3.5 flash-attention generation failure and Gemma4 multi-GPU OOM are distinct performance/behavior issues." }, { - "left": "issue:45459", - "right": "issue:45712", + "left": "issue:43335", + "right": "issue:45072", "accept": false, - "reason": "Tokenizer protobuf error handling and leftover dummy classes are unrelated maintenance issues." + "reason": "Different model families and symptoms: SwitchTransformers sparse-layer config vs bfloat16 inference dtype mismatches." }, { - "left": "issue:43723", - "right": "issue:44554", + "left": "issue:45459", + "right": "issue:45712", "accept": false, - "reason": "AutoTokenizer v5 loading regression and MPS attention correctness bug are different code paths." + "reason": "Tokenizer protobuf error hiding and dummy-class cleanup are unrelated repository issues." }, { - "left": "issue:43335", - "right": "issue:45072", + "left": "issue:40444", + "right": "issue:43526", "accept": false, - "reason": "SwitchTransformers sparse-layer config and bfloat16 dtype mismatch in inference are not the same bug." + "reason": "Qwen2.5-VL iterable dataset multi-image failure is unrelated to BeitImageProcessorFast label reduction." }, { - "left": "issue:40444", - "right": "issue:43526", + "left": "issue:43723", + "right": "issue:44554", "accept": false, - "reason": "Qwen2.5-VL iterable-dataset multi-image finetuning failure is unrelated to BeitImageProcessorFast label reduction." + "reason": "Tokenizer loading in v5 and an MPS attention correctness issue are unrelated." }, { - "left": "issue:44568", - "right": "issue:45491", + "left": "issue:43335", + "right": "issue:45593", "accept": false, - "reason": "Tokenizer special-token regression and Gemma3 NaN embeddings are different failure modes." + "reason": "SwitchTransformers sparse-layer config bug and D-FINE auxiliary-loss behavior do not share the same code-path." }, { "left": "issue:43720", "right": "issue:44112", "accept": false, - "reason": "BitNet packed-weight loading issue and GraniteSpeech CI test failure are unrelated." + "reason": "BitNet packed-weight loading and a GraniteSpeech CI device override test are different failures." }, { - "left": "issue:44448", - "right": "issue:44977", + "left": "issue:44568", + "right": "issue:45491", "accept": false, - "reason": "Pegasus output drift and Qwen3.5 flash-attention generation bug are different model issues." + "reason": "Tokenizer special-token handling and Gemma3 NaN embeddings under sliding-window attention are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is well grounded in the packet: the only clearly supported duplicate is issue:45310 vs issue:45478, which have identical titles. The other pair verdicts are conservative and do not overclaim shared root causes beyond what the titles suggest." + "feedback": "Grounded in the packet: the cluster is indeed dominated by disparate issue titles across unrelated subsystems, and the Qwen3.5 MoE `from_pretrained` pair is the only clear duplicate based on identical titles and chronology. The soft-edge rejections are conservative overall and do not overstate linkage." }, "evaluator_used": true, "retried": false, @@ -7393,7 +7337,6 @@ "issue:43329", "issue:43381", "issue:43412", - "issue:43421", "issue:43550", "issue:43606", "issue:43653", @@ -7402,6 +7345,7 @@ "issue:43927", "issue:43976", "issue:43994", + "issue:44186", "issue:44448", "issue:44451", "issue:44462", @@ -7413,6 +7357,7 @@ "issue:44987", "issue:45072", "issue:45083", + "issue:45198", "issue:45229", "issue:45245", "issue:45372", @@ -7420,7 +7365,6 @@ "issue:45412", "issue:45440", "issue:45459", - "issue:45491", "issue:45520", "issue:45563", "issue:45588", @@ -7428,21 +7372,22 @@ ], "soft_pairs": [ "issue:34689|issue:35141", - "issue:43066|issue:44462", - "issue:44977|issue:45563", - "issue:43927|issue:45698", + "issue:44448|issue:44977", "issue:43606|issue:44857", + "issue:44857|issue:45072", + "issue:43066|issue:44462", "issue:37428|issue:44987", + "issue:43927|issue:45698", "issue:45372|issue:45588", - "issue:44857|issue:45072", + "issue:44977|issue:45563", "issue:43701|issue:45083", - "issue:43976|issue:45440", + "issue:43873|issue:44493", "issue:43550|issue:45083", + "issue:43976|issue:45440", "issue:43412|issue:45588", - "issue:45245|issue:45563", - "issue:43873|issue:44493", - "issue:43873|issue:45229", "issue:29942|issue:35141", + "issue:43873|issue:45229", + "issue:45245|issue:45563", "issue:43278|issue:43329", "issue:32090|issue:33357", "issue:45372|issue:45406", @@ -7453,233 +7398,239 @@ "issue:44448|issue:44466", "issue:44451|issue:45520", "issue:25251|issue:32090", - "issue:43653|issue:44743", "issue:44857|issue:45588", - "issue:43994|issue:45412", + "issue:43653|issue:44743", "issue:43278|issue:43381", + "issue:43994|issue:45412", "issue:42915|issue:42994", - "issue:43421|issue:45491" + "issue:44186|issue:45198" ], - "prepared_review_unit_hash": "96a750d3b4514a539658e6e99369c44a2e75fa7426dbe81084a8e97749ee4096", + "prepared_review_unit_hash": "6205f2b30e277a15cc26d0234eec7189de6b5085e8cb8bf75bb5b50676e7cd96", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 32, - "serialized_chars": 37256, - "estimated_input_tokens": 9314, - "estimated_eval_tokens": 18884 + "soft_pair_count": 33, + "serialized_chars": 37489, + "estimated_input_tokens": 9373, + "estimated_eval_tokens": 19002 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a grab-bag of unrelated Transformers issues across loading, quantization, attention, tokenizers, and multimodal models. None of the soft pairs look like the same underlying bug/change.", - "confidence": 0.95, - "canonical_issue_reason": "No canonical issue stands out: the items in this cluster are not duplicates of one another, but separate bug reports.", + "summary": "These issues are superficially similar in wording but describe different models, features, and failure modes; I found no strong duplicate pairs. The most central issue is the open offloading/quantization report.", + "confidence": 0.74, + "canonical_issue_reason": "issue:43873 is the most central/open item in the set, with the most inbound references and discussion; its offloading/quantization scope is broad enough to act as the cluster representative.", "canonical_pr_reason": null, - "best_issue_reason": "No single issue is a good global representative because the cluster spans many unrelated code paths and failure modes.", + "best_issue_reason": "issue:43873 is the best representative issue because it is open, broadly scoped, and appears to be the most referenced problem in the group.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:34689", "right": "issue:35141", "accept": false, - "reason": "Both concern model loading/post-init behavior, but one is a Llama 3.2 vision load regression and the other is lm_head reinitialization when resizing embeddings." + "reason": "Different bug classes: model loading regression for Llama 3.2 vision vs. output embedding reinitialization after resize/tied weights." }, { - "left": "issue:43066", - "right": "issue:44462", + "left": "issue:44448", + "right": "issue:44977", "accept": false, - "reason": "Different tokenizer bugs: wrong decoder type in v5 versus AutoTokenizer ignoring tokenizer.json from the repo." + "reason": "Both mention output differences, but one is Pegasus v4/v5 behavior and the other is Qwen3.5 flash-attention generation; different code paths." }, { - "left": "issue:44977", - "right": "issue:45563", + "left": "issue:43606", + "right": "issue:44857", "accept": false, - "reason": "One is a flash-attention generation failure; the other is a stale warning in paged generate(). Different code paths and symptoms." + "reason": "Device-mismatch offload failure for bark-small vs. AMP float16 crash in LwDetrImageLoss; unrelated symptoms and subsystems." }, { - "left": "issue:43927", - "right": "issue:45698", + "left": "issue:44857", + "right": "issue:45072", "accept": false, - "reason": "Both involve save/load, but one loses custom token IDs and the other loads the wrong custom module. Not the same bug." + "reason": "One is a loss/AMP CUDA crash, the other is a dtype mismatch during bfloat16 inference in different models." }, { - "left": "issue:43606", - "right": "issue:44857", + "left": "issue:43066", + "right": "issue:44462", "accept": false, - "reason": "Different failures: CPU offload device mismatch versus float16 AMP crash on CUDA." + "reason": "Tokenizer decoder type mismatch in v5 vs. AutoTokenizer ignoring tokenizer.json; related area, but not the same underlying bug." }, { "left": "issue:37428", "right": "issue:44987", "accept": false, - "reason": "ImportError for a missing flash-attention helper is unrelated to failing to load a specific model." + "reason": "Different import/version issues: missing flash attention symbol vs. failing model load for physical-intelligence/fast." + }, + { + "left": "issue:43927", + "right": "issue:45698", + "accept": false, + "reason": "Both involve save/load and custom module handling, but one is config token IDs causing generation IndexError and the other is loading the wrong custom module after save_pretrained." }, { "left": "issue:45372", "right": "issue:45588", "accept": false, - "reason": "Gemma4 processor import/loading breakage is unrelated to an AttributeError in flash_attention.py for sink-less models." + "reason": "Gemma 4 processor import failure from mistral_common vs. flash_attention.py crashing on s_aux=None; different failure points." }, { - "left": "issue:44857", - "right": "issue:45072", + "left": "issue:44977", + "right": "issue:45563", "accept": false, - "reason": "Both are dtype-related, but one crashes in LwDetrImageLoss under AMP and the other is a bfloat16 mismatch in different models." + "reason": "Qwen3.5 flash-attention generation bug is unrelated to a stale warning in paged generate()." }, { "left": "issue:43701", "right": "issue:45083", "accept": false, - "reason": "resume_from_checkpoint key mismatch is unrelated to qwen3_omni_moe feature-length helper behavior." + "reason": "Checkpoint resume key mismatch vs. unexpected helper output lengths in qwen3_omni_moe; unrelated bugs." }, { - "left": "issue:43976", - "right": "issue:45440", + "left": "issue:43873", + "right": "issue:44493", "accept": false, - "reason": "Python version incompatibility is not the same as DeepseekV3MoE diverging from a remote implementation." + "reason": "Offloading/quantization behavior vs. unexpected position-id key warnings; not the same code-path problem." }, { "left": "issue:43550", "right": "issue:45083", "accept": false, - "reason": "torch.compile + SDPA on Bamba is a different bug from qwen3_omni_moe output-length computation." + "reason": "torch.compile with SDPA in Bamba vs. a feature-extraction length helper bug in qwen3_omni_moe; different models and failures." }, { - "left": "issue:43412", - "right": "issue:45588", + "left": "issue:43976", + "right": "issue:45440", "accept": false, - "reason": "Executorch export guards and unsupported ops are unrelated to an s_aux=None flash_attention crash." + "reason": "Python version compatibility issue vs. DeepseekV3MoE divergence from remote implementation; unrelated." }, { - "left": "issue:45245", - "right": "issue:45563", + "left": "issue:43412", + "right": "issue:45588", "accept": false, - "reason": "Category-count overflow is unrelated to a stale warning in paged generate()." + "reason": "Executorch export issues and flash-attention sinkless-model crash are distinct export/runtime paths." }, { - "left": "issue:43873", - "right": "issue:44493", + "left": "issue:29942", + "right": "issue:35141", "accept": false, - "reason": "Quantization/offloading behavior is unrelated to unexpected position-id key errors." + "reason": "Flash Attention 2 test failures are unrelated to embedding reinitialization after resize." }, { "left": "issue:43873", "right": "issue:45229", "accept": false, - "reason": "Offloading with quantization and Gemma4 multi-GPU OOM are different problems." + "reason": "Quantization/offloading bug vs. multi-GPU CUDA OOM for Gemma4; different resource problems." }, { - "left": "issue:29942", - "right": "issue:35141", + "left": "issue:45245", + "right": "issue:45563", "accept": false, - "reason": "Flash Attention 2 test failures are unrelated to embedding reinitialization after resizing." + "reason": "Category-cardinality runtime error is unrelated to a generate() warning." }, { "left": "issue:43278", "right": "issue:43329", "accept": false, - "reason": "Embedding dtype drift between train/eval is unrelated to the undefined video-branch variables in multimodal token counting." + "reason": "Embedding dtype drift between train/eval vs. multimodal token counting bug in video branch; different components." }, { "left": "issue:32090", "right": "issue:33357", "accept": false, - "reason": "Trainer broadcast NoneType error and MacOS bus error are distinct failures." + "reason": "Trainer broadcast NoneType error vs. MacOS bus error with CLIP; different platforms and failure modes." }, { "left": "issue:45372", "right": "issue:45406", "accept": false, - "reason": "Both mention Gemma4, but one is processor loading import failure and the other is transformers serve missing _tokenizer." + "reason": "Both affect Gemma 4, but one is a processor import dependency issue and the other is a missing _tokenizer attribute in transformers serve." }, { "left": "issue:41720", "right": "issue:45229", "accept": false, - "reason": "Auto device mapping cudaErrorAssert and multi-GPU OOM are different runtime issues." + "reason": "Auto device mapping CUDA assert for Qwen3 vs. Gemma4 multi-GPU OOM; not the same bug." }, { "left": "issue:43065", "right": "issue:43994", "accept": false, - "reason": "Sam3PixelDecoder dummy Conv2d is unrelated to SigLIP2 producing nonsensical outputs." + "reason": "Sam3PixelDecoder dummy Conv2d issue vs. SigLIP2 nonsensical AutoModel/pipeline results; different models and causes." }, { "left": "issue:45372", "right": "issue:45459", "accept": false, - "reason": "Gemma4 processor import breakage is unrelated to tokenizer errors being masked when protobuf is absent." + "reason": "Gemma 4 processor import regression vs. protobuf-related tokenizer error masking; unrelated." }, { "left": "issue:43065", "right": "issue:45412", "accept": false, - "reason": "A dummy Conv2d in Sam3PixelDecoder and RT-DETR memory not being released are unrelated bugs." + "reason": "Dummy Conv2d in Sam3PixelDecoder is not the same as RT-DETR memory not being released." }, { "left": "issue:44448", "right": "issue:44466", "accept": false, - "reason": "Different regressions: changed Pegasus generation output versus inconsistent lm_head.weight serialization." + "reason": "Pegasus output changes across versions vs. lm_head.weight serialization inconsistency; both are version-related but different underlying defects." }, { "left": "issue:44451", "right": "issue:45520", "accept": false, - "reason": "Cannot load a specific model is unrelated to a Python 3.13 flash_attn KeyError." + "reason": "ScandiBERT loading problem vs. Python 3.13 flash_attn import KeyError; unrelated loader vs. optional-dependency issue." }, { "left": "issue:25251", "right": "issue:32090", "accept": false, - "reason": "Pipeline top_k nesting change and Trainer _gpu_broadcast_one NoneType error are unrelated." - }, - { - "left": "issue:43653", - "right": "issue:44743", - "accept": false, - "reason": "Tokenizer mask-token registration and qwen3_5 recurrent-state reset are different model/subsystem bugs." + "reason": "Pipeline top_k nesting bug is unrelated to Trainer _gpu_broadcast_one NoneType failures." }, { "left": "issue:44857", "right": "issue:45588", "accept": false, - "reason": "AMP crash in LwDetrImageLoss is unrelated to flash_attention.py assuming s_aux is not None." + "reason": "AMP loss crash and flash_attention sinkless-model crash are different runtime failures." }, { - "left": "issue:43994", - "right": "issue:45412", + "left": "issue:43653", + "right": "issue:44743", "accept": false, - "reason": "SigLIP2 bad outputs and RT-DETR memory leaks are unrelated." + "reason": "Tokenizer special-token registration bug vs. recurrent state reset in modular_qwen3_5; different subsystems." }, { "left": "issue:43278", "right": "issue:43381", "accept": false, - "reason": "Embedding dtype mismatch and gradient-checkpointing-in-eval-mode are separate issues." + "reason": "Embedding dtype change in eval vs. gradient checkpointing disallowed in eval mode; not the same issue." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 wrong outputs vs. RT-DETR memory leak; separate problems." }, { "left": "issue:42915", "right": "issue:42994", "accept": false, - "reason": "Both involve quantization, but one is FineGrainedFP8 runtime failure and the other is quantized model saving." + "reason": "Qwen3Moe FP8 config failure vs. quantized model saving failure; both quantization-related but not the same concrete bug." }, { - "left": "issue:43421", - "right": "issue:45491", + "left": "issue:44186", + "right": "issue:45198", "accept": false, - "reason": "Runtime post-processor updates for tokenizer special tokens are unrelated to Gemma3 NaN embeddings." + "reason": "LayoutLMv2Tokenizer NER/padding crash vs. Wav2Vec2 save_pretrained/tokenization failure; unrelated tokenizer behaviors." } ] }, @@ -7721,15 +7672,14 @@ "issue:43334", "issue:43381", "issue:43388", + "issue:43421", "issue:43526", "issue:43531", "issue:43653", "issue:43701", "issue:43756", - "issue:43761", "issue:43824", "issue:43931", - "issue:44079", "issue:44186", "issue:44206", "issue:44265", @@ -7739,6 +7689,7 @@ "issue:44464", "issue:44479", "issue:44610", + "issue:44857", "issue:44871", "issue:44977", "issue:45072", @@ -7753,202 +7704,197 @@ "issue:45593" ], "soft_pairs": [ - "issue:44186|issue:45198", + "issue:43421|issue:45491", + "issue:44361|issue:45198", "issue:43756|issue:45440", "issue:44206|issue:44479", - "issue:44361|issue:45198", - "issue:44871|issue:44977", "issue:44265|issue:45198", + "issue:44871|issue:44977", "issue:43257|issue:43278", - "issue:44610|issue:45412", "issue:29942|issue:30064", + "issue:44610|issue:45412", "issue:45440|issue:45593", - "issue:45103|issue:45372", "issue:29127|issue:34567", - "issue:43824|issue:44279", - "issue:33357|issue:39692", + "issue:45103|issue:45372", "issue:32090|issue:33290", - "issue:43334|issue:43531", + "issue:33357|issue:39692", + "issue:43824|issue:44279", + "issue:43381|issue:43701", "issue:45072|issue:45229", - "issue:43526|issue:44186", + "issue:43334|issue:43531", "issue:43653|issue:44442", - "issue:43381|issue:43701", - "issue:44464|issue:45491", - "issue:44977|issue:45245", + "issue:43526|issue:44186", "issue:29942|issue:36010", + "issue:44464|issue:45491", "issue:29127|issue:33357", + "issue:44977|issue:45245", "issue:42915|issue:43931", "issue:29127|issue:35141", "issue:43232|issue:43388", - "issue:43761|issue:44079", - "issue:43262|issue:44206" + "issue:43526|issue:44857", + "issue:43262|issue:44206", + "issue:44442|issue:45072" ], - "prepared_review_unit_hash": "09b4bcdd496f5dc0613c83b014bdd5cdda95a06fe39898804d21e09e6c3eda09", + "prepared_review_unit_hash": "a9a86d85228a5dbe3eeb5a0868ab06d8a24021afbccb6bbf58e9ed6e55838fad", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 29, - "serialized_chars": 36541, - "estimated_input_tokens": 9136, - "estimated_eval_tokens": 18528 + "soft_pair_count": 30, + "serialized_chars": 36814, + "estimated_input_tokens": 9204, + "estimated_eval_tokens": 18664 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous: the items span unrelated bugs in tokenizers, generation, vision/audio processors, training, and model-loading regressions. None of the soft-edge pairs look like the same underlying bug or change, so all are rejected.", - "confidence": 0.91, - "canonical_issue_reason": "issue:45198 is the most concrete, self-contained bug report in the set (a reproducible tokenizer/save_pretrained failure), so it is the least-bad anchor if one must be chosen; however, the cluster is not a true duplicate group.", + "summary": "This cluster is highly heterogeneous: the issues span unrelated tokenizer, generation, vision, audio, training, and dtype/precision bugs. I do not see a credible duplicate core, and there are no PRs to consolidate.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out; the items are not the same underlying bug and should not be merged as duplicates.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45198 is the strongest standalone issue to keep as an anchor: it has a clear failure mode and actionable repro, while the rest of the cluster does not cohere around it.", + "best_issue_reason": "No issue is a strong global representative because the cluster is not a coherent duplicate set; the similarity signal appears to be broad Transformers/topic overlap rather than shared root cause.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44186", + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "Different problems: runtime special-token post-processing update vs Gemma3 NaN embeddings from sliding-window/all-padding behavior." + }, + { + "left": "issue:44361", "right": "issue:45198", "accept": false, - "reason": "Different models and failure paths: LayoutLMv2 tokenizer NER/padding crash vs Wav2Vec2 save_pretrained/tokenization failure." + "reason": "Different models and failures: MLukeTokenizer task AttributeError vs Wav2Vec2 save_pretrained/tokenization breakage." }, { "left": "issue:43756", "right": "issue:45440", "accept": false, - "reason": "Different model families and bugs: Smollm3 RoPE-layer selection vs DeepSeekV3MoE implementation divergence." + "reason": "Both are model-specific, but they describe different bugs: Smollm3 RoPE layer dropping vs DeepSeekV3 implementation divergence." }, { "left": "issue:44206", "right": "issue:44479", "accept": false, - "reason": "Different components and symptoms: feature extractor center-arg crash vs Qwen VL video regression." + "reason": "Unsupported feature argument in LASR extractor vs Qwen video-input regression; unrelated code paths." }, { - "left": "issue:44361", + "left": "issue:44265", "right": "issue:45198", "accept": false, - "reason": "Both involve tokenizers, but the affected models and root failures differ; not the same bug." + "reason": "torch.export/torch_compilable_check failure is unrelated to Wav2Vec2 tokenizer/save_pretrained issues." }, { "left": "issue:44871", "right": "issue:44977", "accept": false, - "reason": "Unrelated model/config mismatch vs flash-attention generation regression." - }, - { - "left": "issue:44265", - "right": "issue:45198", - "accept": false, - "reason": "Different subsystems: torch.export/torch_compilable_check vs tokenizer save/tokenization failure." + "reason": "Gemma eos_token_id config mismatch is a different bug from Qwen3.5 flash-attention generation failures." }, { "left": "issue:43257", "right": "issue:43278", "accept": false, - "reason": "Both mention model loading/training behavior, but one is Qwen3 MoE weight conversion and the other is evaluation dtype drift." + "reason": "Accelerate/deepspeed weight conversion issue vs embedding dtype drift between train and eval; different symptoms and causes." }, { - "left": "issue:44610", - "right": "issue:45412", + "left": "issue:29942", + "right": "issue:30064", "accept": false, - "reason": "Processor input-size mismatch vs RT-DETR memory release/GC bug; not the same underlying issue." + "reason": "Flash Attention 2 test failures are unrelated to void segmentation-map processing in an image processor." }, { - "left": "issue:29942", - "right": "issue:30064", + "left": "issue:44610", + "right": "issue:45412", "accept": false, - "reason": "Failing Flash Attention 2 tests and void segmentation-map processing are unrelated." + "reason": "Processor size mismatch vs RT-DETR memory not being released; not the same defect." }, { "left": "issue:45440", "right": "issue:45593", "accept": false, - "reason": "DeepSeekV3 implementation divergence and D-FINE auxiliary-loss handling are different model-specific bugs." + "reason": "DeepSeekV3 native-vs-remote divergence and D-FINE auxiliary-loss omission are distinct model bugs." }, { - "left": "issue:45103", - "right": "issue:45372", + "left": "issue:29127", + "right": "issue:34567", "accept": false, - "reason": "auto_docstring annotation handling and mistral_common ReasoningEffort import failure are unrelated." + "reason": "LayoutLMv3 error-message clarity and TrainerState token-count updating are unrelated issues." }, { - "left": "issue:29127", - "right": "issue:34567", + "left": "issue:45103", + "right": "issue:45372", "accept": false, - "reason": "LayoutLMv3 error-message clarity and TrainerState token counting are different code paths." + "reason": "auto_docstring annotation handling vs missing ReasoningEffort import; different subsystems." }, { - "left": "issue:43824", - "right": "issue:44279", + "left": "issue:32090", + "right": "issue:33290", "accept": false, - "reason": "Specific Qwen2.5-VL import error vs generic dependency issue; too broad and not the same bug." + "reason": "NoneType broadcast error in Trainer vs deepspeed/adafactor OOM; no shared root cause." }, { "left": "issue:33357", "right": "issue:39692", "accept": false, - "reason": "MacOS bus error for CLIP model vs SigLIP2 documentation/example errors are different problems." + "reason": "MacOS bus error with CLIP and SigLIP2 doc/example mismatch are different bugs." }, { - "left": "issue:32090", - "right": "issue:33290", + "left": "issue:43824", + "right": "issue:44279", "accept": false, - "reason": "Trainer broadcast NoneType error and Deepspeed Adafactor OOM are unrelated training failures." + "reason": "Specific Qwen2_5_VL import failure is not the same as a generic transformers dependency issue." }, { - "left": "issue:43334", - "right": "issue:43531", + "left": "issue:43381", + "right": "issue:43701", "accept": false, - "reason": "Qwen3-VL pad_token_id loading bug and Qwen3-MoE sliding_window bug are different model issues." + "reason": "Gradient checkpointing in eval mode and resume_from_checkpoint key mismatch are unrelated training bugs." }, { "left": "issue:45072", "right": "issue:45229", "accept": false, - "reason": "bfloat16 dtype mismatch in inference is unrelated to Gemma4 multi-GPU OOM." + "reason": "bf16 dtype mismatch in inference is unrelated to Gemma4 multi-GPU OOM." }, { - "left": "issue:43526", - "right": "issue:44186", + "left": "issue:43334", + "right": "issue:43531", "accept": false, - "reason": "BeitImageProcessorFast label reduction bug is unrelated to LayoutLMv2 tokenizer crashes." + "reason": "Missing pad_token_id on Qwen3-VL differs from Qwen3-MoE sliding_window behavior." }, { "left": "issue:43653", "right": "issue:44442", "accept": false, - "reason": "BigBirdTokenizer special-token registration and AutoTokenizer loading FastSpeech2ConformerTokenizer are different tokenizer bugs." - }, - { - "left": "issue:43381", - "right": "issue:43701", - "accept": false, - "reason": "Gradient checkpointing in eval mode and resume-from-checkpoint key mismatch are not the same issue." + "reason": "BigBirdTokenizer special-token registration bug is unrelated to AutoTokenizer loading FastSpeech2ConformerTokenizer." }, { - "left": "issue:44464", - "right": "issue:45491", - "accept": false, - "reason": "Compiled-forward chunked generation inconsistency vs Gemma3 NaN embeddings from sliding-window padding are distinct failures." - }, - { - "left": "issue:44977", - "right": "issue:45245", + "left": "issue:43526", + "right": "issue:44186", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug and category-count limit error are unrelated." + "reason": "BeitImageProcessorFast label reduction and LayoutLMv2Tokenizer NER/padding crashes affect different components." }, { "left": "issue:29942", "right": "issue:36010", "accept": false, - "reason": "Flash Attention 2 test failures and GenerationMixin import error do not share a concrete bug." + "reason": "Flash Attention 2 test regressions are unrelated to the GenerationMixin import error." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency is a different bug from Gemma3 sliding-window NaNs." }, { "left": "issue:29127", @@ -7956,35 +7902,47 @@ "accept": false, "reason": "LayoutLMv3 error messaging and CLIP MacOS bus error are unrelated." }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention generation failure is unrelated to the category-count limit runtime error." + }, { "left": "issue:42915", "right": "issue:43931", "accept": false, - "reason": "Qwen3Moe FineGrainedFP8Config failure and Qwen3-VL shape-mismatch loading error are different bugs." + "reason": "Qwen3Moe FP8 config failure and Qwen3-VL weight-shape mismatch are different model-loading issues." }, { "left": "issue:29127", "right": "issue:35141", "accept": false, - "reason": "Different behavior and modules: layoutlmv3 error handling vs embedding reinit in post_init." + "reason": "LayoutLMv3 error clarity and embedding reinitialization on resize are unrelated." }, { "left": "issue:43232", "right": "issue:43388", "accept": false, - "reason": "Generation kwargs sync issue and gather_for_metrics tuple-label truncation are unrelated." + "reason": "Generation kwarg handling after sync_gpus and gather_for_metrics label truncation are different code paths." }, { - "left": "issue:43761", - "right": "issue:44079", + "left": "issue:43526", + "right": "issue:44857", "accept": false, - "reason": "CLIPVisionModel hidden_states regression and ModelOutput key assignment bug are not the same concrete failure." + "reason": "ImageProcessor label reduction bug vs CUDA float16 AMP crash in LwDetrImageLoss; unrelated." }, { "left": "issue:43262", "right": "issue:44206", "accept": false, - "reason": "Audio chat-template sampling-rate default and LasrFeatureExtractor center-arg crash are different processor bugs." + "reason": "Audio chat-template sampling-rate default mismatch is unrelated to LASR feature-extractor argument handling." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer loading failure and bf16 inference dtype mismatch are not the same underlying bug." } ] }, @@ -8011,6 +7969,7 @@ "issue:33290", "issue:34567", "issue:34689", + "issue:35141", "issue:39692", "issue:41628", "issue:42907", @@ -8021,10 +7980,10 @@ "issue:43493", "issue:43525", "issue:43526", + "issue:43550", "issue:43575", "issue:43611", "issue:43646", - "issue:43673", "issue:43701", "issue:43723", "issue:43761", @@ -8033,19 +7992,17 @@ "issue:43931", "issue:43994", "issue:44077", + "issue:44079", "issue:44188", "issue:44206", "issue:44387", "issue:44442", "issue:44479", - "issue:44610", "issue:44743", "issue:44811", - "issue:44857", "issue:44898", "issue:44977", "issue:45003", - "issue:45072", "issue:45216", "issue:45290", "issue:45372", @@ -8058,245 +8015,245 @@ "issue:45698" ], "soft_pairs": [ - "issue:43526|issue:44857", "issue:43441|issue:45588", + "issue:43761|issue:44079", + "issue:43526|issue:43994", "issue:45003|issue:45440", - "issue:44442|issue:45072", "issue:43723|issue:45479", - "issue:43526|issue:44610", "issue:44743|issue:44811", "issue:43452|issue:45588", "issue:43316|issue:43525", "issue:43646|issue:44077", + "issue:43575|issue:43701", "issue:45588|issue:45600", - "issue:43761|issue:44188", "issue:44479|issue:45290", - "issue:43575|issue:43701", "issue:44977|issue:45584", + "issue:43761|issue:44188", "issue:43931|issue:45216", - "issue:42907|issue:45698", "issue:43575|issue:44387", - "issue:39692|issue:45491", + "issue:42907|issue:45698", "issue:43994|issue:44206", "issue:43873|issue:45588", + "issue:39692|issue:45491", "issue:34689|issue:43611", "issue:33290|issue:34567", - "issue:43673|issue:44743", "issue:44442|issue:44898", "issue:43493|issue:43827", + "issue:33290|issue:34689", "issue:43381|issue:43873", + "issue:33290|issue:35141", + "issue:43550|issue:44743", "issue:41628|issue:45372", - "issue:44077|issue:44479", - "issue:33290|issue:34689" + "issue:44077|issue:44479" ], - "prepared_review_unit_hash": "e4ac15b8b090e3bf71cbf61324c2dddc3f07d69a4aa4362cc441f7582c855015", + "prepared_review_unit_hash": "e5d3b97c019d126817440bb3cea49e897166e1ddcd31f80083c7279070405d2f", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, + "node_count": 47, + "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36704, - "estimated_input_tokens": 9176, - "estimated_eval_tokens": 18608 + "serialized_chars": 36194, + "estimated_input_tokens": 9049, + "estimated_eval_tokens": 18354 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a heterogeneous cluster of mostly unrelated Transformers bug/regression reports. None of the soft edges are strong enough to treat as duplicates; the best representative issue is the broad, active quantization/offloading report.", - "confidence": 0.66, - "canonical_issue_reason": "issue:43873 is the broadest and most active open bug in the set, with multiple inbound references and discussion, so it best serves as the umbrella issue for this mixed cluster.", + "summary": "The cluster is mostly a set of superficially similar but distinct Transformers bugs; none of the soft pairs look like true duplicates. Several involve loading/saving, attention, tokenizer, or model-output issues, but the concrete failure modes differ.", + "confidence": 0.86, + "canonical_issue_reason": "issue:45588 is the most central concrete bug report in this cluster and appears in multiple soft-similarity links, but it should be treated as an anchor rather than a duplicate umbrella.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43873 is the most representative issue overall: it is open, well-discussed, and spans a core runtime area that can absorb related quantization/offloading follow-ups.", + "best_issue_reason": "issue:45588 is the best representative issue because it is specific, actionable, and the most hub-like item among the candidates, even though it does not unify the others.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:43526", - "right": "issue:44857", - "accept": false, - "reason": "Different failures in different subsystems: BEiT fast image label reduction vs LwDetr AMP/CUDA crash." - }, { "left": "issue:43441", "right": "issue:45588", "accept": false, - "reason": "Ministral FlashAttention regression and sink-less flash_attention.py AttributeError are unrelated code paths and symptoms." + "reason": "Different bugs: model-specific FlashAttention generation failure vs a flash_attention.py AttributeError on s_aux=None for sink-less models." }, { - "left": "issue:45003", - "right": "issue:45440", + "left": "issue:43761", + "right": "issue:44079", "accept": false, - "reason": "Generic modeling_utils sys.modules safety bug is not the same as DeepseekV3MoE behavioral divergence." + "reason": "Related area, but not the same bug: one is CLIPVisionModel.forward dropping hidden_states, the other is ModelOutput key assignment when a value was previously None." }, { - "left": "issue:44442", - "right": "issue:45072", + "left": "issue:43526", + "right": "issue:43994", "accept": false, - "reason": "Tokenizer loading failure and dtype mismatch in bfloat16 inference are different bugs." + "reason": "Different model/processor failures: BeitImageProcessorFast reduce_labels behavior vs SigLIP2 nonsensical outputs with AutoModel/pipeline." }, { - "left": "issue:43723", - "right": "issue:45479", + "left": "issue:45003", + "right": "issue:45440", "accept": false, - "reason": "AutoTokenizer v5 loading issue and zero-loss sequence-classification bug are unrelated." + "reason": "Unrelated issues: unsafe sys.modules access in modeling_utils vs DeepseekV3MoE diverging from remote implementation." }, { - "left": "issue:43526", - "right": "issue:44610", + "left": "issue:43723", + "right": "issue:45479", "accept": false, - "reason": "BEiT label reduction bug and OmDet image-size mismatch affect different models and code paths." + "reason": "Different failure modes: AutoTokenizer loading in v5 vs degenerate zero-loss classification with num_labels=1." }, { "left": "issue:44743", "right": "issue:44811", "accept": false, - "reason": "Qwen recurrent-state reset and Whisper batch_decode token skipping are separate generation/processor bugs." + "reason": "Separate code paths: recurrent state reset with cache/seq_len>1 vs Whisper batch_decode ignoring skip_special_tokens." }, { "left": "issue:43452", "right": "issue:45588", "accept": false, - "reason": "GGUF tokenizer/model loading issue is not the same as the flash_attention.py s_aux=None crash." + "reason": "Tokenizer/model loading with gguf_file is unrelated to the flash_attention integration crash." }, { "left": "issue:43316", "right": "issue:43525", "accept": false, - "reason": "Both are config/API issues, but one is Gemma3TextConfig inconsistency and the other is a missing Llama4Config attribute." + "reason": "Both are config/API inconsistencies, but they concern different models and different missing attributes; not the same underlying bug." }, { "left": "issue:43646", "right": "issue:44077", "accept": false, - "reason": "Custom model initialization regression and patchtsmixer post_init policy change are unrelated." + "reason": "Broadly about initialization, but not the same concrete defect: generic v5 custom model init breakage vs patchtsmixer-specific optional post_init handling." }, { - "left": "issue:45588", - "right": "issue:45600", + "left": "issue:43575", + "right": "issue:43701", "accept": false, - "reason": "flash_attention.py null-aux crash and auto_mappings stale config references are different failures." + "reason": "Different problems: tensor-parallel OOM while loading a model vs resume_from_checkpoint key mismatch." }, { - "left": "issue:43761", - "right": "issue:44188", + "left": "issue:45588", + "right": "issue:45600", "accept": false, - "reason": "CLIPVision hidden_states regression and attention-kernel divergence under torch.compile do not share the same bug." + "reason": "Both involve internals around loading/integration, but one is a flash_attention crash and the other is a stale auto_mappings reference causing CI breakage." }, { "left": "issue:44479", "right": "issue:45290", "accept": false, - "reason": "Qwen video-input regression and chat-template tool-call crash are different parts of the stack." - }, - { - "left": "issue:43575", - "right": "issue:43701", - "accept": false, - "reason": "Tensor-parallel OOM and resume_from_checkpoint key mismatch are unrelated." + "reason": "Different regressions: video input handling for Qwen variants vs apply_chat_template crashing on tool-call assistant messages with no content." }, { "left": "issue:44977", "right": "issue:45584", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug and Whisper empty-transcription bug are unrelated." + "reason": "Both are generation-related, but one is a Qwen3.5 flash-attention issue and the other is Whisper empty-transcription handling after align_special_tokens." }, { - "left": "issue:43931", - "right": "issue:45216", + "left": "issue:43761", + "right": "issue:44188", "accept": false, - "reason": "Qwen3-VL shape-mismatch load failure and Qwen3.5 save_pretrained regression are different lifecycle bugs." + "reason": "Different bugs: hidden_states not returned vs divergent attention kernels under torch.compile branching." }, { - "left": "issue:42907", - "right": "issue:45698", + "left": "issue:43931", + "right": "issue:45216", "accept": false, - "reason": "Both involve save/load, but dequantized save failure and wrong custom module loading are not the same issue." + "reason": "Both are load/save regressions, but one is weight-shape mismatch for a Qwen3-VL checkpoint and the other is incorrect checkpoint saving for Qwen3.5." }, { "left": "issue:43575", "right": "issue:44387", "accept": false, - "reason": "Both mention memory/quantization, but tensor-parallel load OOM and increased reserved memory under int4 quantization are distinct." + "reason": "Both mention OOM, but one is tensor-parallel model loading and the other is increased reserved memory under int4 quantization." }, { - "left": "issue:39692", - "right": "issue:45491", + "left": "issue:42907", + "right": "issue:45698", "accept": false, - "reason": "SigLIP2 docs/example errors and Gemma3 mixed-length NaN embeddings are unrelated." + "reason": "Both mention save/load behavior, but one is saving dequantized models and the other is loading the wrong custom module after save_pretrained." }, { "left": "issue:43994", "right": "issue:44206", "accept": false, - "reason": "SigLIP2 wrong outputs and LasrFeatureExtractor unsupported-arg crash are different models and failure modes." + "reason": "Different concrete regressions: SigLIP2 wrong outputs vs LasrFeatureExtractor crashing on an unsupported center argument." }, { "left": "issue:43873", "right": "issue:45588", "accept": false, - "reason": "Quantization/offloading behavior and flash_attention.py s_aux=None crash are not the same underlying bug." + "reason": "Different subsystems and failure modes: quantization/offloading behavior vs flash_attention integration AttributeError." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "One is a docs/example problem for SigLIP2, the other is a runtime NaN bug in Gemma3 with sliding-window attention." }, { "left": "issue:34689", "right": "issue:43611", "accept": false, - "reason": "Llama 3.2 Vision load breakage from v4.46.2 and base_model_prefix loading regression in v5.0.0 are separate loading issues." + "reason": "Both are model-loading regressions, but they involve different root causes and different affected paths/models." }, { "left": "issue:33290", "right": "issue:34567", "accept": false, - "reason": "Deepspeed Adafactor OOM and TrainerState token counter not updating are unrelated trainer bugs." - }, - { - "left": "issue:43673", - "right": "issue:44743", - "accept": false, - "reason": "Missing GenerationMixin cache in v5.0.0 and Qwen recurrent-state reset are both cache-adjacent but not the same concrete bug." + "reason": "Unrelated trainer/runtime issues: Adafactor+DeepSpeed OOM vs TrainerState.num_input_tokens_seen not updating." }, { "left": "issue:44442", "right": "issue:44898", "accept": false, - "reason": "FastSpeech2ConformerTokenizer loading failure and Perceiver non-default resolution failure are unrelated." + "reason": "Different tokenizer vs image-classification bugs; no shared concrete code path." }, { "left": "issue:43493", "right": "issue:43827", "accept": false, - "reason": "SigLIP2 implementation divergence and docs still referencing pipeline() are different scope entirely." + "reason": "Different concerns: SigLIP2 implementation mismatch vs docs still referencing removed pipeline() APIs." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "OOM during training with DeepSpeed is unrelated to Llama 3.2 Vision model loading breakage." }, { "left": "issue:43381", "right": "issue:43873", "accept": false, - "reason": "Gradient checkpointing in eval mode and quantization/offloading behavior are separate runtime bugs." + "reason": "Eval-mode gradient checkpointing and quantization offloading are separate features with different failure modes." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Training OOM is unrelated to embedding reinitialization after resizing token embeddings." + }, + { + "left": "issue:43550", + "right": "issue:44743", + "accept": false, + "reason": "Different code paths: Bamba SDPA torch.compile failure vs Qwen3.5 recurrent state reset with cache." }, { "left": "issue:41628", "right": "issue:45372", "accept": false, - "reason": "AutoImageProcessor import failure and Gemma 4 processor loading import error involve imports, but not the same missing symbol or path." + "reason": "Both are import-related, but one is a direct AutoImageProcessor import removal issue and the other is a Gemma 4 processor dependency/import failure." }, { "left": "issue:44077", "right": "issue:44479", "accept": false, - "reason": "patchtsmixer post_init policy and Qwen video-input regression are unrelated." - }, - { - "left": "issue:33290", - "right": "issue:34689", - "accept": false, - "reason": "Adafactor/deepspeed OOM and Llama 3.2 Vision model-loading regression are different issues." + "reason": "PatchTSMixer post_init handling is unrelated to Qwen video-input regressions." } ] }, @@ -8326,7 +8283,6 @@ "issue:28282", "issue:33290", "issue:34689", - "issue:35141", "issue:36010", "issue:41950", "issue:42175", @@ -8340,6 +8296,7 @@ "issue:43475", "issue:43540", "issue:43643", + "issue:43673", "issue:43688", "issue:43716", "issue:43720", @@ -8357,6 +8314,7 @@ "issue:44496", "issue:44514", "issue:44617", + "issue:44743", "issue:44792", "issue:44855", "issue:44871", @@ -8369,217 +8327,217 @@ "issue:45656" ], "soft_pairs": [ - "issue:33290|issue:35141", "issue:43643|issue:44913", "issue:44190|issue:44496", - "issue:43749|issue:43931", "issue:43720|issue:44442", + "issue:43749|issue:43931", "issue:43873|issue:44871", "issue:33290|issue:45656", "issue:44855|issue:45137", "issue:34689|issue:36010", - "issue:44162|issue:44792", - "issue:43316|issue:44877", + "issue:43673|issue:44743", "issue:43381|issue:43856", + "issue:43316|issue:44877", "issue:43540|issue:44514", + "issue:44162|issue:44792", "issue:42175|issue:43316", "issue:43404|issue:43441", + "issue:43381|issue:43742", "issue:41950|issue:43475", "issue:43316|issue:43716", - "issue:43388|issue:43688", - "issue:43381|issue:43742", - "issue:16998|issue:22355", "issue:25251|issue:28282", + "issue:16998|issue:22355", + "issue:43388|issue:43688", "issue:43295|issue:43825", "issue:43065|issue:44617", - "issue:43761|issue:44479", "issue:44871|issue:45584", + "issue:43761|issue:44479", "issue:45127|issue:45200" ], - "prepared_review_unit_hash": "040d87c8261b79411a46f99eafe83135a022385929f07e0b7ec3031651cccaee", + "prepared_review_unit_hash": "494d070670bd1ce22f40ebf12ef7b4a2150f99566e558c309d20098aaaa03a33", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 47, - "item_count": 47, + "node_count": 48, + "item_count": 48, "soft_pair_count": 26, - "serialized_chars": 34847, - "estimated_input_tokens": 8712, - "estimated_eval_tokens": 17680 + "serialized_chars": 35389, + "estimated_input_tokens": 8848, + "estimated_eval_tokens": 17952 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set does not form a single duplicate cluster; the soft pairs are mostly broad-subsystem lookalikes rather than the same underlying bug. I found no PRs. If one issue must be kept as the representative item, issue:43749 is the strongest candidate because it is a concrete, high-impact regression with the most discussion/references.", - "confidence": 0.81, - "canonical_issue_reason": "issue:43749 is the best single representative: it describes a concrete broken feature flag/regression, has the highest discussion/inbound signal among the set, and is more actionable than the others.", + "summary": "This cluster is mostly a loose set of Transformers bug reports around model/config loading, processor behavior, and regression issues, but the items are not duplicates of one another. The soft edges are similarity-only and should be rejected.", + "confidence": 0.78, + "canonical_issue_reason": "No single issue fully represents the cluster, but issue 43643 is the best anchor because it is a concrete config-loading regression with a broad API surface (`AutoConfig.from_pretrained` returning incomplete objects).", "canonical_pr_reason": null, - "best_issue_reason": "issue:43749 is the most suitable standalone issue to anchor triage because it is specific, clearly reproducible, and has the strongest evidence of impact.", + "best_issue_reason": "Issue 43643 is the most suitable single representative: it is specific, user-facing, and sits in the center of the broader config/load regression theme present in several nearby issues.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:33290", - "right": "issue:35141", - "accept": false, - "reason": "Different bugs: one is DeepSpeed/Adafactor OOM, the other is embedding reinitialization after resize." - }, { "left": "issue:43643", "right": "issue:44913", "accept": false, - "reason": "Both involve config reload, but one is missing fields with trust_remote_code and the other is GPTNeoX rotary_pct not persisting." + "reason": "Both are config-related, but one is missing fields on load and the other loses `rotary_pct` on reload; different bugs and code paths." }, { "left": "issue:44190", "right": "issue:44496", "accept": false, - "reason": "Local dataset loading in a training script vs unrecognized model/config.json loading error are unrelated." + "reason": "Local dataset loading error vs missing `model_type` in model config; unrelated failure modes." }, { - "left": "issue:43749", - "right": "issue:43931", + "left": "issue:43720", + "right": "issue:44442", "accept": false, - "reason": "FSDP CPU RAM efficient loading failure is not the same as Qwen3-VL shape-mismatch loading." + "reason": "BitNet packed-weight loading bug and AutoTokenizer tokenizer-class loading failure are distinct issues." }, { - "left": "issue:43720", - "right": "issue:44442", + "left": "issue:43749", + "right": "issue:43931", "accept": false, - "reason": "Packed-weight accelerate loading bug and AutoTokenizer failing for a specific tokenizer are different code paths." + "reason": "FSDP RAM-efficient loading vs Qwen3-VL shape mismatch are different model-loading problems." }, { "left": "issue:43873", "right": "issue:44871", "accept": false, - "reason": "Quantization/offloading behavior and Gemma eos_token_id inconsistency are separate problems." + "reason": "Quantization/offloading behavior and Gemma eos-token config mismatch are unrelated." }, { "left": "issue:33290", "right": "issue:45656", "accept": false, - "reason": "Both mention DeepSpeed/optimizer, but one is OOM with Adafactor while the other is double step invocation." + "reason": "Both involve DeepSpeed/optimizer behavior, but one is OOM with Adafactor and the other is duplicate optimizer stepping." }, { "left": "issue:44855", "right": "issue:45137", "accept": false, - "reason": "Python 3.13 TorchScript parsing error and DeepSpeed ZeRO3 deque underflow are unrelated." + "reason": "Python 3.13 JIT parsing error in DebertaV2 vs DeepSpeed ZeRO3 deque underflow; unrelated." }, { "left": "issue:34689", "right": "issue:36010", "accept": false, - "reason": "Model loading regression for a specific model family is not the same as a GenerationMixin import error." + "reason": "A model-loading regression for Llama 3.2 Vision is not the same as a missing `GenerationMixin` import path." }, { - "left": "issue:44162", - "right": "issue:44792", + "left": "issue:43673", + "right": "issue:44743", "accept": false, - "reason": "ESM2 breakage and a janus image-generation test failure are unrelated failures." + "reason": "Chunked prefill cache regression and Qwen3_5 recurrent-state reset are different cache/state bugs." }, { - "left": "issue:43316", - "right": "issue:44877", + "left": "issue:43381", + "right": "issue:43856", "accept": false, - "reason": "API discrepancy in Gemma3TextConfig differs from strict config loading for granite_speech." + "reason": "Gradient checkpointing in eval mode and MoE training memory usage are separate concerns." }, { - "left": "issue:43381", - "right": "issue:43856", + "left": "issue:43316", + "right": "issue:44877", "accept": false, - "reason": "Gradient checkpointing eval-mode restriction is unrelated to Qwen3 MoE memory usage." + "reason": "Gemma3TextConfig API inconsistency and strict config rejection for granite_speech are not the same bug." }, { "left": "issue:43540", "right": "issue:44514", "accept": false, - "reason": "Both are multimodal/video-related, but one is Qwen3OmniMoe video processing and the other is batched chat template padding behavior." + "reason": "Video-input handling in Qwen3OmniMoe and batched chat-template padding crashes in Qwen2_5_VLProcessor are different code paths." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "ESM2 breakage and a janus generate-images test failure are unrelated." }, { "left": "issue:42175", "right": "issue:43316", "accept": false, - "reason": "Backend dependency packaging and config API mismatch are different issues." + "reason": "Backend-install packaging issue vs config API discrepancy; no shared underlying defect." }, { "left": "issue:43404", "right": "issue:43441", "accept": false, - "reason": "lm_head tying bug and FlashAttention failure in Ministral-3 are distinct model issues." + "reason": "Weight tying bug in Mistral3ForConditionalGeneration and FlashAttention failure in Ministral-3 are distinct regressions." + }, + { + "left": "issue:43381", + "right": "issue:43742", + "accept": false, + "reason": "Gradient-checkpointing eval-mode bug is unrelated to key errors while loading MobileLLM-125M." }, { "left": "issue:41950", "right": "issue:43475", "accept": false, - "reason": "Video-classification processor selection and Sam3VisionEncoderOutput missing attribute are not the same bug." + "reason": "Video-classification processor selection and missing `fpn_position_embeddings` in Sam3Video are different bugs." }, { "left": "issue:43316", "right": "issue:43716", "accept": false, - "reason": "Gemma3TextConfig API mismatch and Mistral-3 dtype mismatch are unrelated." - }, - { - "left": "issue:43388", - "right": "issue:43688", - "accept": false, - "reason": "Metric-gather label truncation and MoE auxiliary-loss normalization are different training bugs." + "reason": "API discrepancy in a config class and image-preprocessor dtype mismatch are not the same change." }, { - "left": "issue:43381", - "right": "issue:43742", + "left": "issue:25251", + "right": "issue:28282", "accept": false, - "reason": "Eval-mode gradient checkpointing and MobileLLM key error are unrelated." + "reason": "Pipeline output nesting change and missing PyTorch import are unrelated." }, { "left": "issue:16998", "right": "issue:22355", "accept": false, - "reason": "Model_max_length question for DeBERTa-V3 is unrelated to a missing transformers.onnx module." + "reason": "Model max length question and missing `transformers.onnx` module are unrelated." }, { - "left": "issue:25251", - "right": "issue:28282", + "left": "issue:43388", + "right": "issue:43688", "accept": false, - "reason": "Pipeline top_k output shape change is unrelated to missing PyTorch import." + "reason": "Metric gathering truncation and MoE auxiliary-loss normalization are different training/eval bugs." }, { "left": "issue:43295", "right": "issue:43825", "accept": false, - "reason": "Processor.tokenizer regression and translation-task error messaging are different pipeline regressions." + "reason": "Processor/tokenizer regression and pipeline translation-error messaging are unrelated." }, { "left": "issue:43065", "right": "issue:44617", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder and Sam3Video OOM are not the same underlying problem." + "reason": "Sam3PixelDecoder dummy-conv issue and Sam3Video OOM are not the same underlying problem." }, { - "left": "issue:43761", - "right": "issue:44479", + "left": "issue:44871", + "right": "issue:45584", "accept": false, - "reason": "CLIPVisionModel hidden_states regression and Qwen video-input regression are separate issues." + "reason": "Gemma eos-token configuration mismatch and Whisper empty-transcription failure are different generation bugs." }, { - "left": "issue:44871", - "right": "issue:45584", + "left": "issue:43761", + "right": "issue:44479", "accept": false, - "reason": "Gemma eos_token_id mismatch and Whisper empty-transcription generation failure do not share the same code path." + "reason": "CLIPVisionModel hidden-state regression and Qwen video-input regression are unrelated model families and code paths." }, { "left": "issue:45127", "right": "issue:45200", "accept": false, - "reason": "LoRA merge collapse with extended vocab is unrelated to Gemma 4 mm_token_type_ids defaults." + "reason": "LoRA merge with extended vocabulary collapse and Gemma 4 token-type defaults are different model-specific issues." } ] }, @@ -8654,244 +8612,244 @@ ], "soft_pairs": [ "issue:44077|issue:44534", - "issue:43295|issue:44871", "issue:43550|issue:43701", - "issue:44734|issue:45406", + "issue:43295|issue:44871", "issue:43606|issue:43701", + "issue:44734|issue:45406", "issue:44936|issue:45698", "issue:43646|issue:44913", "issue:43452|issue:44496", "issue:43931|issue:44060", - "issue:43761|issue:44242", "issue:43575|issue:43742", + "issue:43761|issue:44242", "issue:42617|issue:43575", "issue:44617|issue:45584", "issue:45563|issue:45584", + "issue:41720|issue:43575", + "issue:32090|issue:35141", "issue:44521|issue:45381", - "issue:44479|issue:44734", "issue:43906|issue:45290", - "issue:32090|issue:35141", - "issue:41720|issue:43575", "issue:44933|issue:45362", + "issue:44479|issue:44734", "issue:43065|issue:44610", "issue:44464|issue:44871", - "issue:44464|issue:45563", - "issue:43827|issue:43994", "issue:44792|issue:44977", + "issue:43827|issue:43994", + "issue:44464|issue:45563", "issue:44534|issue:44913", - "issue:41720|issue:45245", "issue:44190|issue:44964", + "issue:41720|issue:45245", "issue:44792|issue:45600", "issue:45127|issue:45357" ], - "prepared_review_unit_hash": "4146e420e269dd8577aa6b69dd5cc721c93b21471d8932362a9d7b398118dec3", + "prepared_review_unit_hash": "6b6046e19c9d641f2e527e8d9bda6b188dc13c1305a269a483804b5093aaa304", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36571, - "estimated_input_tokens": 9143, - "estimated_eval_tokens": 18542 + "serialized_chars": 36573, + "estimated_input_tokens": 9144, + "estimated_eval_tokens": 18544 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab-bag of unrelated Transformers regressions and model-specific bugs; none of the soft pairs look like true duplicates or mergeable PR-equivalents.", - "confidence": 0.38, - "canonical_issue_reason": "Issue 43646 is the broadest and most representative bug report in the set: a generic Transformers 5.0.0 custom-model initialization breakage that could plausibly encompass nearby init/load regressions better than the narrower model-specific reports.", + "summary": "The cluster is very heterogeneous: most items are superficially similar only because they are Transformers regressions, loading failures, or multimodal/model-specific bugs. I do not see any true duplicate pairs among the soft candidates; they should stay separate.", + "confidence": 0.96, + "canonical_issue_reason": "issue:43646 is the broadest and most representative framework-level regression in the set: custom model initialization breaking under Transformers 5.0.0. It best fits the recurring init/load regression theme, even though the cluster as a whole is not a duplicate group.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43646 is the best cluster anchor because it is phrased as a general framework regression rather than a single-model symptom, making it the most reusable representative issue in an otherwise heterogeneous cluster.", + "best_issue_reason": "issue:43646 is the strongest global representative because it describes a core library regression affecting model initialization, which is more central than the many narrow model- or feature-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44077", "right": "issue:44534", "accept": false, - "reason": "Both mention initialization behavior, but one is a model-specific post_init policy change and the other is a broad non-persistent buffer regression; different bugs and code paths." + "reason": "Both are v5-related, but one is about patchtsmixer/post_init policy and the other about non-persistent buffer serialization; different bugs and fixes." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "Torch.compile + SDPA failure for Bamba is unrelated to resume_from_checkpoint key mismatch." }, { "left": "issue:43295", "right": "issue:44871", "accept": false, - "reason": "One is a processor/tokenizer compatibility regression in v4.57.5, the other is an eos_token_id config mismatch for Gemma-3; no shared underlying defect." + "reason": "One is a processor/tokenizer regression in multimodal custom code; the other is an eos_token_id config inconsistency for Gemma-3." }, { - "left": "issue:43550", + "left": "issue:43606", "right": "issue:43701", "accept": false, - "reason": "torch.compile + SDPA failure for Bamba is unrelated to a resume_from_checkpoint key mismatch." + "reason": "CPU offload device mismatch in bark-small is unrelated to checkpoint key mismatch on resume." }, { "left": "issue:44734", "right": "issue:45406", "accept": false, - "reason": "KV-cache continuation indexing crash and missing _tokenizer in serve are separate server bugs." - }, - { - "left": "issue:43606", - "right": "issue:43701", - "accept": false, - "reason": "CPU offload device mismatch in Bark is unrelated to checkpoint resume key handling." + "reason": "Serve KV-cache continuation indexing bug is not the same as Gemma4Processor missing _tokenizer." }, { "left": "issue:44936", "right": "issue:45698", "accept": false, - "reason": "trainer.evaluate() after train() and wrong custom module loaded after save/load are different lifecycle bugs." + "reason": "trainer.evaluate after train and wrong custom module loading after save_pretrained are different code paths and failures." }, { "left": "issue:43646", "right": "issue:44913", "accept": false, - "reason": "A broad custom model initialization regression is not the same as GPTNeoXConfig rotary_pct not persisting on reload." + "reason": "Custom model initialization breakage and GPTNeoXConfig rotary_pct reload defaulting are separate config/init issues." }, { "left": "issue:43452", "right": "issue:44496", "accept": false, - "reason": "GGUF loading breakage for AutoTokenizer/AutoModel is unrelated to missing model_type in config.json." + "reason": "gguf_file path regression and unrecognized model_type/config loading error are both loading-related but not the same defect." }, { "left": "issue:43931", "right": "issue:44060", "accept": false, - "reason": "Weight-shape mismatch for Qwen3-VL and a tied-weights warning bug in Qwen3-Next are distinct model-family issues." + "reason": "Qwen3-VL weight shape mismatch and Qwen3-Next tied-weights warning are different model architecture problems." }, { - "left": "issue:43761", - "right": "issue:44242", + "left": "issue:43575", + "right": "issue:43742", "accept": false, - "reason": "CLIPVision hidden_states regression and MoE load-balancing loss omission affect different components and behaviors." + "reason": "Tensor-parallel OOM and MobileLLM key error on load are unrelated failure modes." }, { - "left": "issue:43575", - "right": "issue:43742", + "left": "issue:43761", + "right": "issue:44242", "accept": false, - "reason": "Tensor-parallel OOM on Qwen2-57B is not the same as a key error loading MobileLLM-125M." + "reason": "CLIPVision hidden_states regression and MoE load-balancing loss omission are different model behaviors." }, { "left": "issue:42617", "right": "issue:43575", "accept": false, - "reason": "A 3d_parallel.py execution failure is not the same concrete issue as tp-induced OOM while loading Qwen2-57B." + "reason": "3d_parallel.py runtime failure is not the same issue as Qwen2 large-model TP OOM." }, { "left": "issue:44617", "right": "issue:45584", "accept": false, - "reason": "Sam3Video CUDA OOM and Whisper empty-transcription failure are unrelated model/runtime bugs." + "reason": "Sam3Video CUDA OOM and Whisper empty-transcription failure are unrelated." }, { "left": "issue:45563", "right": "issue:45584", "accept": false, - "reason": "A stale warning in paged generate() is unrelated to Whisper failing after align_special_tokens." - }, - { - "left": "issue:44521", - "right": "issue:45381", - "accept": false, - "reason": "All-zero assistant masks and wrong vision_position_ids are both multimodal, but they are different code paths and symptoms." + "reason": "A stale warning for num_return_sequences is not the same bug as empty-transcription failure after align_special_tokens." }, { - "left": "issue:44479", - "right": "issue:44734", + "left": "issue:41720", + "right": "issue:43575", "accept": false, - "reason": "A Qwen2.5/Qwen3 video-input regression is unrelated to serve KV-cache continuation tensor indexing." + "reason": "Qwen3 auto device mapping cuda assert and Qwen2 TP OOM are distinct distributed-loading issues." }, { - "left": "issue:43906", - "right": "issue:45290", + "left": "issue:32090", + "right": "issue:35141", "accept": false, - "reason": "The isolated reproduction of another issue does not match the specific apply_chat_template tool-call crash described here." + "reason": "Trainer gpu_broadcast_one NoneType error and token-embedding resize reinitialization are different training/model-init bugs." }, { - "left": "issue:32090", - "right": "issue:35141", + "left": "issue:44521", + "right": "issue:45381", "accept": false, - "reason": "Trainer GPU broadcast NoneType error and token-embedding resize reinit are unrelated training/model-init bugs." + "reason": "All-zero assistant masks in apply_chat_template and wrong vision_position_ids are both multimodal, but not the same concrete bug." }, { - "left": "issue:41720", - "right": "issue:43575", + "left": "issue:43906", + "right": "issue:45290", "accept": false, - "reason": "Auto device mapping cudaErrorAssert on A800 is not the same as OOM during tensor-parallel model load." + "reason": "An isolated reproduction report is not the same as the tool-call/empty-content crash in apply_chat_template(tokenize=True)." }, { "left": "issue:44933", "right": "issue:45362", "accept": false, - "reason": "Missing image_utils import and Qwen3.5 chat crash are unrelated issues." + "reason": "Missing import from image_utils and Qwen3.5 chat crash are unrelated." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression in Qwen2.5-VL/Qwen3-VL models and serve KV-cache tensor indexing are different problems." }, { "left": "issue:43065", "right": "issue:44610", "accept": false, - "reason": "A dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo\u2019s processor size mismatch are separate vision-model problems." + "reason": "Dummy Conv2d in Sam3PixelDecoder and wrong processor image size are both SAM3-related but different components and fixes." }, { "left": "issue:44464", "right": "issue:44871", "accept": false, - "reason": "Chunked generation inconsistency with compiled forward and Gemma eos_token_id mismatch do not share the same bug." + "reason": "Chunked generation inconsistency under compile and Gemma-3 eos_token_id mismatch are unrelated." }, { - "left": "issue:44464", - "right": "issue:45563", + "left": "issue:44792", + "right": "issue:44977", "accept": false, - "reason": "Compiled-forward generation inconsistency is unrelated to a stale num_return_sequences warning." + "reason": "A failed Janus image-generation test and Qwen3.5 flash-attention generation bug are different models and code paths." }, { "left": "issue:43827", "right": "issue:43994", "accept": false, - "reason": "Docs still referencing pipeline() and SigLIP2 producing nonsense are different issues; one is documentation, the other model behavior." + "reason": "Docs still referencing pipeline() and SigLIP2 nonsensical outputs are not the same issue." }, { - "left": "issue:44792", - "right": "issue:44977", + "left": "issue:44464", + "right": "issue:45563", "accept": false, - "reason": "A janus image-generation test failure is not the same as Qwen3.5 flash-attention generation misbehavior." + "reason": "Compiled-forward generation inconsistency is unrelated to a stale warning about num_return_sequences." }, { "left": "issue:44534", "right": "issue:44913", "accept": false, - "reason": "Non-persistent buffer junk in v5 is a framework-wide bug, not the specific config reload issue in GPTNeoXConfig." + "reason": "Non-persistent buffer junk and rotary_pct reverting on reload are separate serialization/config bugs." }, { - "left": "issue:41720", - "right": "issue:45245", + "left": "issue:44190", + "right": "issue:44964", "accept": false, - "reason": "cudaErrorAssert on A800 and a category-count limit runtime error are unrelated failures." + "reason": "Local dataset loading in a no-trainer script and Phi-4 multimodal loading failure are different loading scenarios." }, { - "left": "issue:44190", - "right": "issue:44964", + "left": "issue:41720", + "right": "issue:45245", "accept": false, - "reason": "Local dataset loading in a training script is not the same as failing to load a specific multimodal model." + "reason": "CUDA assert on Qwen3 device mapping and category-count overflow are unrelated runtime errors." }, { "left": "issue:44792", "right": "issue:45600", "accept": false, - "reason": "Janus generation test failure and stale auto_mappings references to removed Sam3LiteText configs are unrelated." + "reason": "Janus image-generation test failure and broken auto_mappings references for Sam3LiteText are not the same defect." }, { "left": "issue:45127", "right": "issue:45357", "accept": false, - "reason": "LoRA merge collapse with extended vocab and incorrect visual encoder keys on save are different save/load behaviors." + "reason": "LoRA merge collapse with extended vocab and incorrect visual encoder keys on save_pretrained are different save/merge regressions." } ] }, @@ -8931,7 +8889,6 @@ "issue:43479", "issue:43540", "issue:43550", - "issue:43582", "issue:43644", "issue:43650", "issue:43761", @@ -8943,6 +8900,7 @@ "issue:44186", "issue:44190", "issue:44220", + "issue:44246", "issue:44279", "issue:44466", "issue:44561", @@ -8952,11 +8910,11 @@ "issue:44855", "issue:44913", "issue:44938", + "issue:45003", "issue:45216", "issue:45245", "issue:45276", "issue:45375", - "issue:45406", "issue:45446", "issue:45542", "issue:45584", @@ -8965,252 +8923,252 @@ "issue:45632" ], "soft_pairs": [ - "issue:43388|issue:43582", - "issue:44938|issue:45542", + "issue:43388|issue:44079", "issue:44855|issue:45588", + "issue:44938|issue:45542", "issue:43761|issue:44743", "issue:43976|issue:44855", "issue:43335|issue:44855", "issue:44077|issue:44079", - "issue:43540|issue:44734", - "issue:43644|issue:44913", "issue:43550|issue:44190", + "issue:43644|issue:44913", "issue:43650|issue:44220", - "issue:44112|issue:45600", + "issue:43540|issue:44734", + "issue:45276|issue:45584", "issue:43116|issue:44190", + "issue:44112|issue:45600", "issue:41950|issue:44016", - "issue:45276|issue:45584", "issue:39692|issue:44190", + "issue:43295|issue:45276", "issue:43479|issue:45375", "issue:36246|issue:41628", "issue:43404|issue:45216", "issue:45245|issue:45584", - "issue:43295|issue:45276", "issue:44734|issue:45632", "issue:36246|issue:38617", - "issue:44016|issue:44220", "issue:38175|issue:43065", - "issue:44186|issue:44734", + "issue:44016|issue:44220", "issue:44683|issue:45446", "issue:44561|issue:45632", + "issue:44186|issue:44734", "issue:42175|issue:44279", "issue:44466|issue:44913", - "issue:45406|issue:45632" + "issue:44246|issue:45003" ], - "prepared_review_unit_hash": "84e26be2c15b66f20daf2ddd567e1c3bb3185c07703114865a9b6cdf963a76cc", + "prepared_review_unit_hash": "07748904f4c63450aca9175962fa6875e6ed158b409d9e004da1f36ef108fc1b", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37213, - "estimated_input_tokens": 9304, - "estimated_eval_tokens": 18864 + "serialized_chars": 37122, + "estimated_input_tokens": 9281, + "estimated_eval_tokens": 18818 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is highly heterogeneous: most pairs are only superficially similar by broad terms like imports, configs, or model regressions, but they describe different subsystems and concrete failures. I found no soft edge that should be merged.", - "confidence": 0.31, - "canonical_issue_reason": "issue:41628 is the broadest public-facing import regression in the set and the best single representative of the API-breakage theme, though the overall cluster is quite mixed.", + "summary": "This set is mostly a loose similarity cluster rather than true duplicates: it mixes unrelated import/export regressions, model-specific bugs, pipeline issues, and docs/notebook problems. I rejected all soft edges because none look like the same concrete underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "issue:41628 is the broadest top-level API regression in the set and the closest thing to a hub for the import/export failures, though it is not a true duplicate center for the rest.", "canonical_pr_reason": null, - "best_issue_reason": "issue:41628 is the most representative and centrally scoped issue among these, because it concerns a top-level `transformers` import surface rather than a narrow model-specific failure.", + "best_issue_reason": "issue:41628 is the most representative issue for the cluster because it describes a generic import failure at the package boundary, making it the closest match to the few other import-related items.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43388", - "right": "issue:43582", + "right": "issue:44079", "accept": false, - "reason": "Tuple-label metric bug vs Apple Silicon allocator warmup TypeError; unrelated code paths." + "reason": "Different failures: batch label truncation in metrics vs ModelOutput key assignment logic." }, { - "left": "issue:44938", - "right": "issue:45542", + "left": "issue:44855", + "right": "issue:45588", "accept": false, - "reason": "Python 3.14 import/load failure vs missing TensorFlow backend when only tensorboard is installed; different failures." + "reason": "Unrelated code paths: Python 3.13 import/parsing issue vs flash_attention crash on sink-less models." }, { - "left": "issue:44855", - "right": "issue:45588", + "left": "issue:44938", + "right": "issue:45542", "accept": false, - "reason": "Python 3.13 Deberta import indentation error vs flex_attention auxiliary-arg crash; unrelated." + "reason": "Different runtime problems: Python 3.14 import/load failure vs missing TensorFlow backend wiring." }, { "left": "issue:43761", "right": "issue:44743", "accept": false, - "reason": "CLIP hidden_states regression vs Qwen recurrent-state reset; different models and bugs." + "reason": "Different model bugs: CLIP hidden_states regression vs Qwen recurrent state reset with cache." }, { "left": "issue:43976", "right": "issue:44855", "accept": false, - "reason": "General Python version compatibility complaint vs specific JIT parsing indentation error; not the same bug." + "reason": "Both are Python-version related, but one is a broad 5.1.0 compatibility issue and the other is a specific DebertaV2 parsing failure." }, { "left": "issue:43335", "right": "issue:44855", "accept": false, - "reason": "SwitchTransformers config layer-count bug vs Python 3.13 import syntax error; unrelated." + "reason": "SwitchTransformers config construction bug is unrelated to the DebertaV2 import/parse error." }, { "left": "issue:44077", "right": "issue:44079", "accept": false, - "reason": "Model class optional post_init policy vs ModelOutput key assignment behavior; different subsystems." + "reason": "Different semantics: optional post_init handling vs ModelOutput key assignment for None values." }, { - "left": "issue:43540", - "right": "issue:44734", + "left": "issue:43550", + "right": "issue:44190", "accept": false, - "reason": "Qwen3OmniMoe video input validation vs serve KV-cache indexing crash; unrelated paths." + "reason": "Different subsystems: SDPA/torch.compile failure in Bamba vs dataset loading in an image classification script." }, { "left": "issue:43644", "right": "issue:44913", "accept": false, - "reason": "Non-persistent buffer serialization vs GPTNeoX config reload loss; different bugs." + "reason": "Non-persistent buffer initialization issue is unrelated to GPTNeoX rotary_pct reload behavior." }, { - "left": "issue:43550", - "right": "issue:44190", + "left": "issue:43650", + "right": "issue:44220", "accept": false, - "reason": "torch.compile SDPA failure in Bamba vs local dataset loading in image-classification script; unrelated." + "reason": "One is a vague data issue, the other is a specific feature-extraction function bug." }, { - "left": "issue:43650", - "right": "issue:44220", + "left": "issue:43540", + "right": "issue:44734", "accept": false, - "reason": "Generic data request vs fbank feature extraction issue; no shared code path." + "reason": "Different execution paths: Qwen3OmniMoe video input validation vs serve KV-cache continuation indexing." }, { - "left": "issue:44112", - "right": "issue:45600", + "left": "issue:45276", + "right": "issue:45584", "accept": false, - "reason": "GraniteSpeech CI test flake vs stale auto_mappings reference to removed configs; different failures." + "reason": "Embedding resize/tied-weight behavior is unrelated to Whisper empty-transcription generation failure." }, { "left": "issue:43116", "right": "issue:44190", "accept": false, - "reason": "Multi-label classification output bug vs local dataset loading issue; unrelated examples." + "reason": "Different examples and symptoms: multi-label empty outputs vs local dataset loading failure." }, { - "left": "issue:41950", - "right": "issue:44016", + "left": "issue:44112", + "right": "issue:45600", "accept": false, - "reason": "Video pipeline processor selection bug vs notebook syntax error; not the same underlying issue." + "reason": "CI test flakiness in GraniteSpeech is unrelated to stale auto_mappings references for Sam3LiteText." }, { - "left": "issue:45276", - "right": "issue:45584", + "left": "issue:41950", + "right": "issue:44016", "accept": false, - "reason": "Embedding resize propagation bug vs Whisper empty-transcription generation bug; different model code." + "reason": "Pipeline processor selection bug vs notebook syntax error; not the same bug class." }, { "left": "issue:39692", "right": "issue:44190", "accept": false, - "reason": "SigLIP2 doc example errors vs local dataset loading in a different script; not a duplicate." + "reason": "SigLIP2 doc example/model mismatch and quantization errors are unrelated to local dataset loading." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Processor/tokenizer regression and resize_token_embeddings weight propagation are different code paths." }, { "left": "issue:43479", "right": "issue:45375", "accept": false, - "reason": "Default config initialization vs missing config field dropped by strict serialization; similar area but different concrete bug." + "reason": "Both are config-related, but one is default multimodal config initialization and the other is a missing field dropped by strict serialization." }, { "left": "issue:36246", "right": "issue:41628", "accept": false, - "reason": "Specific missing Qwen2.5 VL class export vs missing top-level AutoImageProcessor export; different symbols and fixes." + "reason": "Both are import errors, but they involve different missing symbols and modules, not the same export bug." }, { "left": "issue:43404", "right": "issue:45216", "accept": false, - "reason": "lm_head weight tying/serialization bug vs Qwen3.5 checkpoint save_pretrained regression; different model behaviors." + "reason": "Tied lm_head weight bug in Mistral3 is unrelated to Qwen3.5 save_pretrained checkpoint regression." }, { "left": "issue:45245", "right": "issue:45584", "accept": false, - "reason": "Category-cardinality runtime limit vs Whisper empty output generation; unrelated." - }, - { - "left": "issue:43295", - "right": "issue:45276", - "accept": false, - "reason": "Processor/tokenizer API regression vs token embedding resize propagation; different surfaces." + "reason": "Category-count runtime limit has no relation to Whisper generation on empty transcription." }, { "left": "issue:44734", "right": "issue:45632", "accept": false, - "reason": "Serve KV-cache tensor indexing crash vs trust_remote_code cache-path collision; unrelated." + "reason": "Serve KV-cache continuation bug and trust_remote_code cache path collision are unrelated." }, { "left": "issue:36246", "right": "issue:38617", "accept": false, - "reason": "Missing Qwen image processor export vs missing layer_type_validation import; both import errors but different removed symbols." - }, - { - "left": "issue:44016", - "right": "issue:44220", - "accept": false, - "reason": "Notebook syntax error vs audio fbank feature issue; no common bug." + "reason": "Different import failures: missing Qwen2_5_VLImageProcessor export vs missing layer_type_validation symbol." }, { "left": "issue:38175", "right": "issue:43065", "accept": false, - "reason": "SigLIP2 zero-probabilities behavior vs Sam3PixelDecoder dummy Conv2d; unrelated models." + "reason": "siglip2 zero probabilities and Sam3PixelDecoder dummy Conv2d are unrelated model-specific issues." }, { - "left": "issue:44186", - "right": "issue:44734", + "left": "issue:44016", + "right": "issue:44220", "accept": false, - "reason": "LayoutLMv2 tokenizer padding/NER crash vs serve KV-cache indexing crash; different components." + "reason": "Notebook syntax error is not the same as the _torch_extract_fbank_features() bug." }, { "left": "issue:44683", "right": "issue:45446", "accept": false, - "reason": "flex_attention compile failure on torch >=2.9 vs incorrect version check for AuxRequest import; same area, different bug." + "reason": "Compiled flex_attention failure on torch>=2.9 is not the same as a bad PyTorch version gate for AuxRequest import." }, { "left": "issue:44561", "right": "issue:45632", "accept": false, - "reason": "Removal of is_torch_fx_available breaking trust_remote_code vs cache-path collision for local models; unrelated." + "reason": "Removal of is_torch_fx_available breaking trust_remote_code is unrelated to cache-path collisions." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Tokenizer padding/truncation crash in LayoutLMv2 is unrelated to serve KV-cache indexing." }, { "left": "issue:42175", "right": "issue:44279", "accept": false, - "reason": "Install-extra backend omission vs generic dependency issue; too vague to be duplicates." + "reason": "Package dependency/install issue vs a generic dependency issue report; not the same concrete bug." }, { "left": "issue:44466", "right": "issue:44913", "accept": false, - "reason": "Device-dependent lm_head serialization vs rotary_pct reload defaulting; different configuration/state bugs." + "reason": "Serialization of tied lm_head weights is unrelated to GPTNeoX rotary_pct resetting on reload." }, { - "left": "issue:45406", - "right": "issue:45632", + "left": "issue:44246", + "right": "issue:45003", "accept": false, - "reason": "Gemma4Processor missing _tokenizer in serve vs trust_remote_code cache collision; unrelated failures." + "reason": "Import-time slowness and unsafe sys.modules access are related thematically, but not the same bug or fix." } ] }, @@ -9256,22 +9214,19 @@ "issue:44075", "issue:44112", "issue:44242", - "issue:44246", "issue:44279", "issue:44336", "issue:44351", "issue:44368", - "issue:44462", "issue:44496", "issue:44521", + "issue:44683", "issue:44877", "issue:44933", "issue:44964", "issue:44987", - "issue:45003", "issue:45020", "issue:45042", - "issue:45081", "issue:45200", "issue:45216", "issue:45278", @@ -9279,232 +9234,253 @@ "issue:45335", "issue:45357", "issue:45405", + "issue:45406", + "issue:45542", "issue:45584", "issue:45593", - "issue:45600" + "issue:45600", + "issue:45632" ], "soft_pairs": [ - "issue:44246|issue:45003", + "issue:45406|issue:45632", "issue:43643|issue:44496", "issue:42907|issue:43441", "issue:44016|issue:44062", "issue:44964|issue:45600", + "issue:43295|issue:45335", "issue:43299|issue:45216", "issue:43976|issue:44987", - "issue:43295|issue:45335", - "issue:43673|issue:44336", "issue:44112|issue:44964", - "issue:43824|issue:44933", - "issue:43531|issue:45600", + "issue:43673|issue:44336", "issue:44496|issue:45042", + "issue:43824|issue:44933", "issue:43854|issue:44964", + "issue:43531|issue:45600", "issue:43976|issue:44279", "issue:43526|issue:43901", - "issue:43299|issue:45357", "issue:45335|issue:45584", "issue:44075|issue:44368", - "issue:44521|issue:45325", + "issue:43299|issue:45357", "issue:43381|issue:45593", + "issue:44521|issue:45325", "issue:43577|issue:45216", "issue:44351|issue:45278", + "issue:43901|issue:44496", "issue:43976|issue:45405", "issue:41950|issue:45020", - "issue:43901|issue:44496", "issue:44242|issue:45200", "issue:43479|issue:43525", "issue:44496|issue:44877", - "issue:44462|issue:45081" + "issue:44683|issue:45542" ], - "prepared_review_unit_hash": "5e7cbe907f70754c74f799ff828ed4b5bc68d63f27db8eeff33b092d19a19cc2", + "prepared_review_unit_hash": "85aafd3376af131895d929d0d2ff1b6ad5582026588cdfba8e31a0b1d77a3e0b", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36634, - "estimated_input_tokens": 9159, - "estimated_eval_tokens": 18574 + "serialized_chars": 36644, + "estimated_input_tokens": 9161, + "estimated_eval_tokens": 18578 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab bag of unrelated Transformers regressions, import failures, config issues, and model-specific bugs. None of the soft-edge pairs look like true duplicates on the evidence given. The broadest representative issue is the remote-code regression report.", - "confidence": 0.82, - "canonical_issue_reason": "issue:45020 is the broadest and most central report: it describes recent Transformers versions breaking remote_code-based models, which is the closest umbrella for several of the nearby loading/import/config regressions.", + "summary": "This set is mostly a loose collection of unrelated Transformers regression and model-loading issues, not a tight duplicate cluster. If a single umbrella issue must be chosen, the broad open remote-code regression report is the closest anchor; the soft pairs are all too different to merge.", + "confidence": 0.79, + "canonical_issue_reason": "issue:45020 is the broadest open umbrella here, covering recent-version regressions in remote-code model loading, but it is still only a thematic anchor rather than a true duplicate of the others.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 best fits as the cluster anchor because it is the most general user-facing failure mode and the least model-specific of the set.", + "best_issue_reason": "issue:45020 is the best global issue anchor because it is open, broad in scope, and plausibly encompasses multiple version-regression reports better than any model-specific bug report.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44246", - "right": "issue:45003", + "left": "issue:45406", + "right": "issue:45632", "accept": false, - "reason": "One is about slow import time; the other is about unsafe sys.modules access. Related area, but not the same bug or code path." + "reason": "Different failures: Gemma4Processor missing _tokenizer vs trust_remote_code cache-path collision." }, { "left": "issue:43643", "right": "issue:44496", "accept": false, - "reason": "Both involve config/loading, but one is missing fields with trust_remote_code and the other is an unrecognized model due to missing model_type. Different failures." + "reason": "Both involve config/remote-code loading, but one is missing fields in AutoConfig output and the other is invalid model config recognition; not the same bug." }, { "left": "issue:42907", "right": "issue:43441", "accept": false, - "reason": "Same model family, but one is save_pretrained/dequantized export and the other is FlashAttention loading. Not the same concrete issue." + "reason": "Different Ministral issues: save_pretrained/dequantized save failure vs FlashAttention load failure." }, { "left": "issue:44016", "right": "issue:44062", "accept": false, - "reason": "A notebook syntax error is unrelated to the AddedToken TypeError; no shared underlying bug." + "reason": "Notebook syntax error and AddedToken keyword conflict are unrelated." }, { "left": "issue:44964", "right": "issue:45600", "accept": false, - "reason": "Different models and different failure modes: Phi-4 multimodal loading versus stale auto_mappings references in CI." + "reason": "Phi-4 multimodal loading failure and stale auto_mappings CI references are different code paths." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Separate regressions: processor.tokenizer/images handling vs t5gemma embedding resize not propagating." }, { "left": "issue:43299", "right": "issue:45216", "accept": false, - "reason": "Both touch Qwen3.5, but one is a model-loading regression and the other is a save_pretrained checkpoint serialization bug." + "reason": "Both are Qwen regressions, but one is model loading and the other is save_pretrained output corruption; not one underlying bug." }, { "left": "issue:43976", "right": "issue:44987", "accept": false, - "reason": "Python version compatibility and a specific model-loading failure are unrelated." + "reason": "A Python-version compatibility complaint and a model-specific load failure are not the same issue." }, { - "left": "issue:43295", - "right": "issue:45335", + "left": "issue:44112", + "right": "issue:44964", "accept": false, - "reason": "Custom processor/tokenizer access regression is unrelated to resize_token_embeddings not updating decoder.embed_tokens." + "reason": "CI stale-device override test failure is unrelated to Phi-4 multimodal loading." }, { "left": "issue:43673", "right": "issue:44336", "accept": false, - "reason": "Generation cache behavior and ANSI output formatting are completely different subsystems." + "reason": "Generation cache/chunked prefill bug and ANSI-code logging bug are unrelated." }, { - "left": "issue:44112", - "right": "issue:44964", + "left": "issue:44496", + "right": "issue:45042", "accept": false, - "reason": "CI device-override test flakiness is not the same as a Phi-4 model-loading bug." + "reason": "Unrecognized model/config.json problem is unrelated to PIL backend torchvision dependency handling." }, { "left": "issue:43824", "right": "issue:44933", "accept": false, - "reason": "Both are import errors, but they concern different symbols and different root causes." - }, - { - "left": "issue:43531", - "right": "issue:45600", - "accept": false, - "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to removed config references in auto_mappings." + "reason": "Both are import errors, but for different symbols and subsystems; not the same underlying breakage." }, { - "left": "issue:44496", - "right": "issue:45042", + "left": "issue:43854", + "right": "issue:44964", "accept": false, - "reason": "Model config resolution failure and PIL processor torchvision dependency enforcement are different bugs." + "reason": "Different models and failure modes: GLM-4.7-Flash unit-test loading vs Phi-4 multimodal loading." }, { - "left": "issue:43854", - "right": "issue:44964", + "left": "issue:43531", + "right": "issue:45600", "accept": false, - "reason": "Both are model-loading failures, but for different models and likely different code paths." + "reason": "Qwen3-MoE sliding_window behavior and removed auto_mappings references are unrelated." }, { "left": "issue:43976", "right": "issue:44279", "accept": false, - "reason": "A Python support regression is unrelated to an unspecified dependency issue." + "reason": "Generic dependency/version issue vs a vague transformers dependency problem; no shared concrete bug." }, { "left": "issue:43526", "right": "issue:43901", "accept": false, - "reason": "BeitImageProcessorFast label reduction is unrelated to outdated docs about return_all_scores." - }, - { - "left": "issue:43299", - "right": "issue:45357", - "accept": false, - "reason": "Both involve Qwen3.5, but one is loading and the other is save_pretrained visual-key serialization; not the same bug." + "reason": "BeitImageProcessorFast label reduction bug and TextClassificationPipeline docs mismatch are not the same change." }, { "left": "issue:45335", "right": "issue:45584", "accept": false, - "reason": "Token embedding resizing and Whisper empty-transcription generation are unrelated." + "reason": "t5gemma embedding resize bug and Whisper empty-transcription generation bug are unrelated." }, { "left": "issue:44075", "right": "issue:44368", "accept": false, - "reason": "Optimizer arguments being ignored has nothing to do with a tie_word_embeddings warning." + "reason": "SGD optimizer args not used and tie_word_embeddings warning are different behavior issues." }, { - "left": "issue:44521", - "right": "issue:45325", + "left": "issue:43299", + "right": "issue:45357", "accept": false, - "reason": "Both are multimodal, but assistant_masks computation and RoPE position scaling are different code paths." + "reason": "Both affect Qwen save/load behavior, but one is a loading regression and the other is incorrect visual-encoder keys on save; not mergeable as one fix." }, { "left": "issue:43381", "right": "issue:45593", "accept": false, - "reason": "Gradient checkpointing in eval mode and missing auxiliary losses in D-FINE are unrelated training behaviors." + "reason": "Gradient checkpointing eval-mode failure and D-FINE auxiliary-loss omission are unrelated training bugs." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "Multimodal assistant mask generation and still-image temporal position-id scaling are separate multimodal issues." }, { "left": "issue:43577", "right": "issue:45216", "accept": false, - "reason": "Blip2 dtype propagation and Qwen3.5 checkpoint saving are unrelated." + "reason": "Blip2 dtype propagation bug and Qwen3.5 save_pretrained regression are unrelated." }, { "left": "issue:44351", "right": "issue:45278", "accept": false, - "reason": "A specific missing HybridCache import is not the same underlying bug as a broad report of many import errors." + "reason": "HybridCache import error may be one symptom of import breakage, but 45278 is a broad version-upgrade report and not the same concrete bug." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Documentation text mismatch is unrelated to model config recognition failure." }, { "left": "issue:43976", "right": "issue:45405", "accept": false, - "reason": "Python compatibility issues and an unreleased PEFT minimum-version bump are unrelated." + "reason": "Python compatibility and MIN_PEFT_VERSION release timing are unrelated." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Video-classification processor lookup bug is unrelated to remote_code model-loading regressions." }, { "left": "issue:44242", "right": "issue:45200", "accept": false, - "reason": "Load-balancing loss gating and mm_token_type_ids defaults are different model-training bugs." + "reason": "Router-logit load-balancing loss and Gemma 4 mm_token_type_ids defaulting are different model-train path bugs." }, { "left": "issue:43479", "right": "issue:43525", "accept": false, - "reason": "Phi4MultimodalConfig default initialization and Llama4Config missing pad_token_id are distinct config problems." + "reason": "Phi4Multimodal default-config initialization and Llama4Config missing pad_token_id are separate config issues." }, { - "left": "issue:44462", - "right": "issue:45081", + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Unrecognized model due to missing model_type and strict granite_speech config loading are not the same bug." + }, + { + "left": "issue:44683", + "right": "issue:45542", "accept": false, - "reason": "AutoTokenizer ignoring tokenizer.json and a Mistral regex patch crash are different tokenizer issues." + "reason": "Compiled flex_attention on torch>=2.9 and tensorboard-without-TensorFlow backend error are unrelated." } ] }, @@ -9531,7 +9507,6 @@ "issue:38175", "issue:39692", "issue:41950", - "issue:42175", "issue:42994", "issue:43065", "issue:43122", @@ -9549,7 +9524,6 @@ "issue:43784", "issue:43854", "issue:43901", - "issue:43976", "issue:44016", "issue:44162", "issue:44261", @@ -9568,6 +9542,7 @@ "issue:44779", "issue:44898", "issue:44987", + "issue:45081", "issue:45276", "issue:45290", "issue:45397", @@ -9578,238 +9553,231 @@ "issue:45712" ], "soft_pairs": [ - "issue:44683|issue:45542", - "issue:44297|issue:44462", + "issue:44462|issue:45081", "issue:44279|issue:45405", "issue:43299|issue:43854", "issue:41950|issue:43650", + "issue:44297|issue:44462", "issue:39692|issue:43901", "issue:43295|issue:44779", + "issue:38175|issue:44617", "issue:44462|issue:44779", "issue:45600|issue:45712", - "issue:38175|issue:44617", "issue:44016|issue:44291", "issue:44261|issue:44263", - "issue:43723|issue:44162", "issue:43450|issue:44279", "issue:42994|issue:43317", "issue:43065|issue:44898", + "issue:43723|issue:44162", "issue:43441|issue:44683", - "issue:43650|issue:45290", "issue:44466|issue:45276", + "issue:43650|issue:45290", "issue:44623|issue:45397", - "issue:43262|issue:44521", "issue:44623|issue:44749", + "issue:43262|issue:44521", "issue:43784|issue:45542", "issue:43673|issue:43761", "issue:44671|issue:44779", - "issue:43122|issue:43650", "issue:43653|issue:45593", - "issue:44987|issue:45542", - "issue:42175|issue:43976" + "issue:43122|issue:43650", + "issue:44987|issue:45542" ], - "prepared_review_unit_hash": "c4b8e8cf9bd3981d20440fe773cbfa7ed861be2e3ffbf38f6a20e8ed3a57cdda", + "prepared_review_unit_hash": "7a12f72b7bb534663446e9d7591dd8f6d1b3b3fff035daffcb2fb4be28cada09", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 29, - "serialized_chars": 36283, - "estimated_input_tokens": 9071, - "estimated_eval_tokens": 18398 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35529, + "estimated_input_tokens": 8883, + "estimated_eval_tokens": 18022 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a loose cluster of Transformer v5 regression reports plus a few docs, backend, and model-specific bugs. The soft pairs are mostly surface-similar but do not describe the same underlying defect, so none should be merged.", - "confidence": 0.83, - "canonical_issue_reason": "issue:43295 is the best representative because it is an explicit regression tied to the v4.57.5/v5 transition and touches the processor/tokenizer boundary, which is the broadest common theme across the cluster. It is still only a weak umbrella since the cluster is heterogeneous.", + "summary": "The cluster is heterogeneous: most items are separate Transformers regressions, docs issues, or model-specific bugs. The soft pairs share broad subsystems at best, but not the same concrete underlying issue or mergeable fix.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out because the items span unrelated bugs. If a loose anchor is needed, issue 44462 is the broadest tokenizer-loading report, but it does not actually subsume the rest of the cluster.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43295 is the most central, actionable, and regression-framed issue in the set, making it the least-bad canonical anchor for triage.", + "best_issue_reason": "Issue 44462 is the closest thing to a representative issue because it is a generic tokenizer-loading regression, but it is still too narrow to serve as a true duplicate hub.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44683", - "right": "issue:45542", - "accept": false, - "reason": "Different problems: flex_attention compilation on torch>=2.9 vs missing TensorFlow backend when only tensorboard is installed." - }, - { - "left": "issue:44297", - "right": "issue:44462", + "left": "issue:44462", + "right": "issue:45081", "accept": false, - "reason": "Both involve tokenizer persistence/loading, but one is about tokenizer_class metadata on save and the other about AutoTokenizer ignoring repo tokenizer.json; different code paths." + "reason": "Both are tokenizer-related, but one is AutoTokenizer ignoring repository tokenizer.json and the other is a Mistral regex patch crash; different code paths and fixes." }, { "left": "issue:44279", "right": "issue:45405", "accept": false, - "reason": "Both mention dependencies, but one is a generic transformers dependency issue and the other is a specific unreleased MIN_PEFT_VERSION bump." + "reason": "These are unrelated version/dependency problems: a generic transformers dependency issue versus an unreleased PEFT version bump." }, { "left": "issue:43299", "right": "issue:43854", "accept": false, - "reason": "Both are model-loading regressions, but they affect different models and likely different loaders; not the same concrete bug." + "reason": "Both concern model loading failures, but for different models and different failure modes; not the same underlying bug." }, { "left": "issue:41950", "right": "issue:43650", "accept": false, - "reason": "Unrelated: video-classification processor bug vs a standalone request with no shared defect." + "reason": "Completely unrelated: a video-classification processor lookup bug versus a trivial placeholder issue." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Both involve tokenizer serialization/loading, but one is wrong tokenizer_class saved on disk and the other is AutoTokenizer ignoring tokenizer.json; distinct fixes." }, { "left": "issue:39692", "right": "issue:43901", "accept": false, - "reason": "Both are docs-related, but they refer to different examples and different mistakes; not the same issue." + "reason": "Both are docs-related, but one is a SigLIP2 example with model/processor mismatch and quantization errors, while the other is a pipeline docs mismatch." }, { "left": "issue:43295", "right": "issue:44779", "accept": false, - "reason": "Different regressions: processor.tokenizer/image handling vs DeepSeek tokenization output mismatch." + "reason": "Both are v5 tokenizer/processor regressions, but one is about processor.tokenizer access and multimodal tokenization, while the other is Deepseek tokenization output changes." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unrelated model-specific issues: unexpected zero probabilities in SigLIP2 versus Sam3Video CUDA OOM." }, { "left": "issue:44462", "right": "issue:44779", "accept": false, - "reason": "Repo tokenizer.json handling vs DeepSeek tokenization correctness; related area, but distinct bugs." + "reason": "Both mention tokenizers, but the reported failures are different models and different behaviors; not the same bug." }, { "left": "issue:45600", "right": "issue:45712", "accept": false, - "reason": "Both are CI/repo hygiene problems, but one is stale auto_mappings references and the other is leftover dummy classes leaking into the package." - }, - { - "left": "issue:38175", - "right": "issue:44617", - "accept": false, - "reason": "Different model failures: unexpected zero probabilities in SigLIP2 vs CUDA OOM in Sam3Video." + "reason": "Both are CI/repo hygiene problems, but they affect different maintenance checks and files." }, { "left": "issue:44016", "right": "issue:44291", "accept": false, - "reason": "Notebook syntax error vs init_empty_weights/_is_hf_initialized TypeError; unrelated failures." + "reason": "A notebook syntax error and an init_empty_weights TypeError are unrelated issues." }, { "left": "issue:44261", - "right": "issue:44263", - "accept": false, - "reason": "Both are GLM-family issues, but one is an rms_norm_eps precision/config bug and the other is a torch.split return-value issue in a different component." - }, - { - "left": "issue:43723", - "right": "issue:44162", + "right": "issue:44263", "accept": false, - "reason": "Different failure modes and targets: AutoTokenizer loading in v5 vs ESM2 model breakage." + "reason": "Different model internals: MLA q_a_layernorm precision/config handling versus torch.split return handling in GlmMoeDsaIndexer." }, { "left": "issue:43450", "right": "issue:44279", "accept": false, - "reason": "Video batch shape bug vs generic dependency issue; no shared underlying defect." + "reason": "Video processor shape handling and a dependency issue are unrelated." }, { "left": "issue:42994", "right": "issue:43317", "accept": false, - "reason": "Both touch quantization/offload, but one is saving a quantized model and the other is loading a dequantized model with device_map=auto; different stages and failure mechanisms." + "reason": "Both touch quantization, but one is about saving a quantized model and the other is about loading a dequantized model with device_map offload." }, { "left": "issue:43065", "right": "issue:44898", "accept": false, - "reason": "Different vision-model bugs on different architectures; one is a dummy Conv2d in Sam3PixelDecoder, the other is Perceiver resolution handling." + "reason": "Different vision-model bugs with different symptoms and code paths; not mergeable as one fix." }, { - "left": "issue:43441", - "right": "issue:44683", + "left": "issue:43723", + "right": "issue:44162", "accept": false, - "reason": "Different attention-stack problems: Ministral-3 FlashAttention failure vs compiled flex_attention failure on newer torch." + "reason": "A generic AutoTokenizer loading regression and an ESM2 breakage are not the same underlying problem." }, { - "left": "issue:43650", - "right": "issue:45290", + "left": "issue:43441", + "right": "issue:44683", "accept": false, - "reason": "Unrelated: a nonspecific request vs a crash in apply_chat_template with tool-call assistant messages." + "reason": "Both mention attention backends, but one is model-specific FlashAttention breakage and the other is compiled flex_attention failing on newer torch." }, { "left": "issue:44466", "right": "issue:45276", "accept": false, - "reason": "Both concern weight/serialization behavior, but one is device-dependent lm_head serialization and the other is resize_token_embeddings not updating Gemma4 embedding/output structures." + "reason": "Related to embeddings/weight tying in Gemma4, but one is device-dependent serialization and the other is resize_token_embeddings not updating per-layer/output embeddings." }, { - "left": "issue:44623", - "right": "issue:45397", + "left": "issue:43650", + "right": "issue:45290", "accept": false, - "reason": "Processor.save_pretrained missing files vs Gemma-4 Zero3 from_pretrained; distinct bugs." + "reason": "Completely different: a placeholder issue versus a crash on assistant messages with tool calls and empty content." }, { - "left": "issue:43262", - "right": "issue:44521", + "left": "issue:44623", + "right": "issue:45397", "accept": false, - "reason": "Both are chat_template/multimodal-adjacent, but one is default audio sampling rate and the other is assistant_masks being all zero for multimodal inputs." + "reason": "Processor save_pretrained file omission and gemma-4 zero3 loading failure are different save/load problems." }, { "left": "issue:44623", "right": "issue:44749", "accept": false, - "reason": "File-saving bug vs performance slowdown after upgrading; no shared underlying defect." + "reason": "Missing processor files and a performance regression in filtering are unrelated." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Both involve apply_chat_template, but one is an audio sampling-rate default bug and the other is multimodal assistant mask generation." }, { "left": "issue:43784", "right": "issue:45542", "accept": false, - "reason": "Both are import/dependency-related, but the missing name is nn in sentence-transformers integration, not TensorFlow backend installation." + "reason": "Both are import/backend errors, but they come from different optional dependencies and different missing symbols." }, { "left": "issue:43673", "right": "issue:43761", "accept": false, - "reason": "Different model/runtime regressions: GenerationMixin cache in chunked_prefill vs CLIPVisionModel hidden_states behavior." + "reason": "A generation cache regression and a CLIPVision hidden_states regression are unrelated." }, { "left": "issue:44671", "right": "issue:44779", "accept": false, - "reason": "Both are output-correctness regressions, but one is CamemBERT masked LM predictions and the other is tokenizer behavior for DeepSeek." - }, - { - "left": "issue:43122", - "right": "issue:43650", - "accept": false, - "reason": "Tokenizer behavior regression vs unrelated placeholder request." + "reason": "Different v5 regressions affecting different models and outputs; not the same code-path problem." }, { "left": "issue:43653", "right": "issue:45593", "accept": false, - "reason": "Tokenizer special-token registration bug vs D-FINE auxiliary-loss behavior; unrelated." + "reason": "BigBirdTokenizer special-token registration and D-FINE auxiliary-loss behavior are unrelated." }, { - "left": "issue:44987", - "right": "issue:45542", + "left": "issue:43122", + "right": "issue:43650", "accept": false, - "reason": "Model-loading failure for a specific repo vs TensorFlow backend dependency issue; different causes." + "reason": "Tokenizer behavior change and a placeholder issue have no overlap." }, { - "left": "issue:42175", - "right": "issue:43976", + "left": "issue:44987", + "right": "issue:45542", "accept": false, - "reason": "Different environment/dependency issues: missing TensorFlow backend in torch extra vs Python version incompatibility in 5.1.0." + "reason": "A model-loading failure for physical-intelligence/fast and a TensorFlow backend import issue are unrelated." } ] }, @@ -9836,6 +9804,7 @@ "issue:36246", "issue:41628", "issue:41950", + "issue:42175", "issue:42491", "issue:42757", "issue:43295", @@ -9870,7 +9839,6 @@ "issue:44589", "issue:44617", "issue:44661", - "issue:44857", "issue:45003", "issue:45200", "issue:45250", @@ -9883,251 +9851,245 @@ "issue:45593" ], "soft_pairs": [ + "issue:42175|issue:43976", "issue:43901|issue:43994", "issue:44361|issue:45593", "issue:44297|issue:45397", - "issue:43475|issue:44016", "issue:43824|issue:43976", + "issue:43475|issue:44016", "issue:43525|issue:43881", - "issue:43352|issue:45250", - "issue:45561|issue:45563", "issue:44589|issue:45362", + "issue:45561|issue:45563", "issue:44188|issue:44336", + "issue:43352|issue:45250", "issue:41628|issue:43976", "issue:43295|issue:44617", "issue:42491|issue:43299", - "issue:44336|issue:44373", "issue:43901|issue:44246", - "issue:41950|issue:44220", + "issue:44336|issue:44373", "issue:44242|issue:45593", + "issue:41950|issue:44220", "issue:44295|issue:45397", "issue:44661|issue:45542", "issue:44462|issue:45356", - "issue:43531|issue:45362", "issue:44246|issue:44496", - "issue:45003|issue:45561", + "issue:43531|issue:45362", "issue:36246|issue:42757", + "issue:45003|issue:45561", "issue:36246|issue:44351", "issue:44336|issue:44485", - "issue:43673|issue:44561", "issue:43526|issue:45003", - "issue:44162|issue:45200", - "issue:44242|issue:44857" + "issue:43673|issue:44561", + "issue:44162|issue:45200" ], - "prepared_review_unit_hash": "3fbd9fb275bd9eeba3eb8b6186623372774f93b6f7f0765a3359214abe25f765", + "prepared_review_unit_hash": "2bc9024962422aa0f36b3cdcab6dd5a6ad0fb1426f298ebdfcdcd7a7f6491452", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36319, - "estimated_input_tokens": 9080, - "estimated_eval_tokens": 18416 + "serialized_chars": 36338, + "estimated_input_tokens": 9085, + "estimated_eval_tokens": 18426 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a loose mix of unrelated Transformers bug reports, mostly import/regression/model-specific issues. None of the soft pairs look like true duplicates or the same underlying fix.", - "confidence": 0.91, - "canonical_issue_reason": "If an anchor is needed, issue 43824 is the best representative: it is a concrete, reproducible ImportError with a specific missing symbol and substantial discussion/inbound references.", + "summary": "The set is mostly heterogeneous. Several items share broad themes like import errors, model-loading regressions, or tokenizer behavior, but the soft pairs do not describe the same concrete bug/change closely enough to merge under strict duplicate triage.", + "confidence": 0.82, + "canonical_issue_reason": "issue:43824 is the clearest, most referenced, user-facing concrete regression here: a specific Qwen2.5-VL import failure with strong duplicate potential in the same family of export/import problems.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43824 is the strongest standalone issue in the set because it is specific, actionable, and well-discussed; many others are either broader, more speculative, or clearly about different subsystems.", + "best_issue_reason": "issue:43824 is the best representative issue because it is specific, actionable, and tied to a concrete import/export failure rather than a broad symptom or unrelated regression.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "TensorFlow-backend packaging bug vs Python-version compatibility regression; unrelated failures." + }, { "left": "issue:43901", "right": "issue:43994", "accept": false, - "reason": "Docs/behavior mismatch in text classification is unrelated to a SigLIP2 bad-results bug." + "reason": "Docs/behavior mismatch for a pipeline flag vs incorrect SigLIP2 outputs; different problem classes." }, { "left": "issue:44361", "right": "issue:45593", "accept": false, - "reason": "Different models and failures: MLuke tokenizer task AttributeError vs D-FINE auxiliary-loss handling." + "reason": "Tokenizer AttributeError in tasks vs missing auxiliary losses in D-FINE; different models and code paths." }, { "left": "issue:44297", "right": "issue:45397", "accept": false, - "reason": "Tokenizer config name mismatch is a different path from Gemma-4 zero3 loading." + "reason": "Tokenizer save metadata bug vs Gemma-4 from_pretrained/ZeRO loading issue; not the same bug." }, { - "left": "issue:43475", - "right": "issue:44016", + "left": "issue:43824", + "right": "issue:43976", "accept": false, - "reason": "SAM3 video encoder attribute bug and a notebook syntax error are unrelated." + "reason": "Qwen2.5-VL import/export failure vs Python version support regression; no shared code-path." }, { - "left": "issue:43824", - "right": "issue:43976", + "left": "issue:43475", + "right": "issue:44016", "accept": false, - "reason": "Missing Qwen2_5_VL export is not the same as a Python-version compatibility regression." + "reason": "SAM3 video model attribute error vs notebook syntax error; completely unrelated." }, { "left": "issue:43525", "right": "issue:43881", "accept": false, - "reason": "Llama4 pad_token_id access and GLM-4V loading failure involve different model/config bugs." + "reason": "Missing pad_token_id on Llama4Config vs GLM-4V loading failure; different model families and failure modes." }, { - "left": "issue:43352", - "right": "issue:45250", + "left": "issue:44589", + "right": "issue:45362", "accept": false, - "reason": "A specific Nemotron flash-attn support error is not the same as the generic Flash Attention issue." + "reason": "Float8 storage type error vs Qwen3.5 chat crash; unrelated runtime issues." }, { "left": "issue:45561", "right": "issue:45563", "accept": false, - "reason": "Testing xdist file races and a stale generation warning are unrelated." + "reason": "pytest-xdist file race vs stale generate() warning; unrelated." }, { - "left": "issue:44589", - "right": "issue:45362", + "left": "issue:44188", + "right": "issue:44336", "accept": false, - "reason": "Float8 storage lookup failure is a different runtime problem than Qwen3.5 chat crashes." + "reason": "torch.compile attention-kernel divergence vs terminal ANSI output leakage; unrelated subsystems." }, { - "left": "issue:44188", - "right": "issue:44336", + "left": "issue:43352", + "right": "issue:45250", "accept": false, - "reason": "Torch.compile attention-kernel divergence and ANSI-code output cleanup are unrelated." + "reason": "Specific unsupported Flash Attention 2.0 error for Nemotron vs a generic FA2 topic; not the same concrete bug." }, { "left": "issue:41628", "right": "issue:43976", "accept": false, - "reason": "Missing AutoImageProcessor export is not the same as the Python 3.9/3.10 packaging issue." + "reason": "AutoImageProcessor import failure vs Python3.9/3.10 compatibility problem; different causes." }, { "left": "issue:43295", "right": "issue:44617", "accept": false, - "reason": "Processor/tokenizer regression and SAM3Video OOM are different failures." + "reason": "Processor/tokenizer regression in v4.57.5 vs SAM3 video CUDA OOM; unrelated." }, { "left": "issue:42491", "right": "issue:43299", "accept": false, - "reason": "Both mention Qwen3 variants, but they describe different models and different loading problems." - }, - { - "left": "issue:44336", - "right": "issue:44373", - "accept": false, - "reason": "ANSI-code logging output and a docstring error are clearly different issues." + "reason": "Qwen3 MoE LoRA compatibility issue vs Qwen3VL MoE loading breakage; same broad family but not the same concrete bug." }, { "left": "issue:43901", "right": "issue:44246", "accept": false, - "reason": "A pipeline documentation mismatch is unrelated to intermittent slow imports." + "reason": "Pipeline docs mismatch vs import performance regression; no common underlying defect." }, { - "left": "issue:41950", - "right": "issue:44220", + "left": "issue:44336", + "right": "issue:44373", "accept": false, - "reason": "Video-classification image-processor lookup and fbank feature extraction are different code paths." + "reason": "ANSI code emission in loading_report vs wrong position_ids docstring; unrelated." }, { "left": "issue:44242", "right": "issue:45593", "accept": false, - "reason": "Load-balancing loss gating and D-FINE auxiliary-loss behavior are not the same bug." + "reason": "MoE load-balancing loss bookkeeping vs D-FINE denoising auxiliary-loss behavior; different models and paths." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Video-classification pipeline selecting image processors vs fbank feature extraction issue; unrelated." }, { "left": "issue:44295", "right": "issue:45397", "accept": false, - "reason": "Position_ids buffer access and Gemma-4 zero3 loading are unrelated." + "reason": "position_ids buffer access bug vs Gemma-4 ZeRO loading bug; not the same issue." }, { "left": "issue:44661", "right": "issue:45542", "accept": false, - "reason": "Tokenizer mapping generation failure and undefined TensorFlow backend are different subsystems." + "reason": "add-new-model-like tokenizer mapping bug vs tf backend installation/availability issue; unrelated." }, { "left": "issue:44462", "right": "issue:45356", "accept": false, - "reason": "AutoTokenizer ignoring tokenizer.json is not the same as the Kimi-K2.5 codec regression." - }, - { - "left": "issue:43531", - "right": "issue:45362", - "accept": false, - "reason": "Qwen3-MoE sliding-window behavior and Qwen3.5 chat crashes are different issues." + "reason": "AutoTokenizer ignoring tokenizer.json vs Kimi-K2.5 codec/warning regression; both tokenizer-related but different concrete bugs." }, { "left": "issue:44246", "right": "issue:44496", "accept": false, - "reason": "Import latency is unrelated to unrecognized model/config loading." + "reason": "Import latency sometimes vs unrecognized model/config loading error; unrelated." }, { - "left": "issue:45003", - "right": "issue:45561", + "left": "issue:43531", + "right": "issue:45362", "accept": false, - "reason": "Unsafe sys.modules access and xdist captured_info.txt races are different bugs." + "reason": "Qwen3-MoE sliding_window behavior vs Qwen3.5 chat crash; different failure modes." }, { "left": "issue:36246", "right": "issue:42757", "accept": false, - "reason": "Two different import errors from different packages and symbols." + "reason": "Missing Qwen2_5_VLImageProcessor export vs missing is_offline_mode from huggingface_hub; both import errors but unrelated symbols." + }, + { + "left": "issue:45003", + "right": "issue:45561", + "accept": false, + "reason": "Unsafe sys.modules access in modeling_utils vs xdist test-file race; unrelated." }, { "left": "issue:36246", "right": "issue:44351", "accept": false, - "reason": "Missing Qwen2_5_VLImageProcessor and missing HybridCache are distinct export issues." + "reason": "Different missing exports/symbols in transformers; same pattern of import error but not the same bug." }, { "left": "issue:44336", "right": "issue:44485", "accept": false, - "reason": "ANSI logging noise is unrelated to GLM-5 RoPE implementation." - }, - { - "left": "issue:43673", - "right": "issue:44561", - "accept": false, - "reason": "Chunked-prefill cache regression and removal of is_torch_fx_available are different v5 breakages." + "reason": "Terminal ANSI formatting bug vs GLM-5 RoPE implementation discussion; unrelated." }, { "left": "issue:43526", "right": "issue:45003", "accept": false, - "reason": "BeitImageProcessorFast label reduction and sys.modules access are unrelated." - }, - { - "left": "issue:44162", - "right": "issue:45200", - "accept": false, - "reason": "ESM2 breakage and Gemma-4 token-type defaults concern different models and fixes." + "reason": "BeitImageProcessorFast label reduction bug vs sys.modules access bug; different code paths." }, { - "left": "issue:44246", - "right": "issue:44857", + "left": "issue:43673", + "right": "issue:44561", "accept": false, - "reason": "Import slowness is unrelated to the LwDetrImageLoss AMP CUDA crash." + "reason": "Generation cache issue in chunked_prefill vs removal of is_torch_fx_available breaking trust_remote_code; unrelated regressions." }, { - "left": "issue:45561", - "right": "issue:45563", + "left": "issue:44162", + "right": "issue:45200", "accept": false, - "reason": "Testing infrastructure race and a stale generation warning do not describe the same bug." + "reason": "ESM2 breakage vs Gemma 4 mm_token_type_ids defaulting; different model-specific defects." } ] }, @@ -10166,20 +10128,20 @@ "issue:43540", "issue:43596", "issue:43653", - "issue:43673", "issue:43901", "issue:44060", "issue:44077", "issue:44162", + "issue:44242", "issue:44246", "issue:44295", "issue:44336", "issue:44368", "issue:44466", "issue:44496", - "issue:44734", "issue:44749", "issue:44779", + "issue:44857", "issue:44871", "issue:44877", "issue:44977", @@ -10201,231 +10163,231 @@ "issue:45701" ], "soft_pairs": [ + "issue:44242|issue:44857", "issue:44246|issue:44871", "issue:36246|issue:43596", "issue:44295|issue:44749", "issue:43441|issue:43540", + "issue:44779|issue:45292", "issue:44877|issue:45375", "issue:43066|issue:44779", "issue:42886|issue:45229", - "issue:44779|issue:45292", - "issue:44987|issue:45459", "issue:41628|issue:41950", "issue:45292|issue:45584", + "issue:44987|issue:45459", "issue:43065|issue:43408", "issue:43901|issue:45003", + "issue:45292|issue:45701", "issue:43232|issue:44336", "issue:44995|issue:45200", - "issue:45292|issue:45701", "issue:43257|issue:45137", "issue:44496|issue:45003", "issue:44060|issue:44077", "issue:43475|issue:43540", - "issue:43653|issue:44162", "issue:43262|issue:44246", - "issue:44995|issue:45588", + "issue:43653|issue:44162", "issue:44368|issue:45563", + "issue:44995|issue:45588", "issue:44466|issue:45335", - "issue:44368|issue:44977", "issue:45397|issue:45656", - "issue:43673|issue:44734" + "issue:44368|issue:44977" ], - "prepared_review_unit_hash": "92ba2f7a0b1dc4dc3c0aee213fb01b8b26b0485d93fd453eb71a5bfb2c856844", + "prepared_review_unit_hash": "f6f6404144b654fe1a3725191ca5147bb3eaecb8f82b9f901c19baee694cf0ab", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 28, - "serialized_chars": 36194, - "estimated_input_tokens": 9049, - "estimated_eval_tokens": 18354 + "serialized_chars": 36152, + "estimated_input_tokens": 9038, + "estimated_eval_tokens": 18332 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The set is mostly a grab-bag of unrelated Transformers regressions and docs/config issues; I don\u2019t see any soft pair that is clearly the same underlying bug/change. The broadest issue is 44246, but it is only a weak anchor for this cluster.", - "confidence": 0.72, - "canonical_issue_reason": "issue:44246 is the broadest and most discussed item here, so it is the best available anchor, though the cluster is too heterogeneous for a strong canonical match.", + "summary": "The cluster is thematically noisy: many items mention Transformers loading/tokenizer/modeling regressions, but the paired candidates are not the same concrete bugs. I rejected all soft edges because they differ in model, failure mode, or code path.", + "confidence": 0.33, + "canonical_issue_reason": "issue:45701 is the broadest and most representative open report here: a version-dependent tokenization regression with clear impact and room for follow-up.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44246 is the most generic/high-activity issue in the set, making it the least-bad global representative even though it does not truly unify the rest.", + "best_issue_reason": "issue:45701 is the best single anchor for this cluster because it is general, open, and closest to the recurring tokenizer-regression theme among the listed issues.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "Different bugs: MoE router loss handling vs an AMP/CUDA crash in image loss." + }, { "left": "issue:44246", "right": "issue:44871", "accept": false, - "reason": "Import slowdown vs eos_token_id config mismatch; different bugs and code paths." + "reason": "Different issues: import slowness vs eos_token_id config mismatch." }, { "left": "issue:36246", "right": "issue:43596", "accept": false, - "reason": "Qwen2.5-VL import error vs DeepSpeed ZeRO3 BertModel crash; unrelated failures." + "reason": "Different code paths: missing Qwen2.5-VL image processor export vs DeepSpeed/BertModel zero3 indexing." }, { "left": "issue:44295", "right": "issue:44749", "accept": false, - "reason": "Position_ids buffer access error vs broad tokenizer slowdown after upgrade." + "reason": "Different symptoms: position_ids buffer access error vs a general performance regression." }, { "left": "issue:43441", "right": "issue:43540", "accept": false, - "reason": "FlashAttention bug in Ministral-3 vs Qwen3OmniMoe video input ValueError; not the same issue." + "reason": "Different models and failures: FlashAttention on Ministral-3 vs Qwen3OmniMoe video-input validation." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer output regression vs resize_token_embeddings not updating output embeddings." }, { "left": "issue:44877", "right": "issue:45375", "accept": false, - "reason": "Both involve strict config handling, but they target different model configs and missing fields." + "reason": "Both involve config strictness, but they are different model-specific config problems and not the same bug." }, { "left": "issue:43066", "right": "issue:44779", "accept": false, - "reason": "Tokenizer decoder-type warning vs DeepSeek tokenizer regression; related theme but not the same concrete bug." + "reason": "Different concrete failures: wrong tokenizer decoder-type warning vs incorrect DeepSeek tokenization." }, { "left": "issue:42886", "right": "issue:45229", "accept": false, - "reason": "Offline cache loading failure vs multi-GPU OOM; clearly distinct." - }, - { - "left": "issue:44779", - "right": "issue:45292", - "accept": false, - "reason": "Tokenizer output regression vs resize_token_embeddings not updating output embeddings." - }, - { - "left": "issue:44987", - "right": "issue:45459", - "accept": false, - "reason": "Model loading failure in v5.1.0 vs protobuf exception masking tokenizer errors; different root causes." + "reason": "Unrelated: offline tokenizer cache loading vs Gemma4 multi-GPU OOM." }, { "left": "issue:41628", "right": "issue:41950", "accept": false, - "reason": "Top-level AutoImageProcessor import error vs video pipeline choosing image processors; same ecosystem area, different bug." + "reason": "Different import/pipeline problems: missing AutoImageProcessor export vs video pipeline image-processor lookup." }, { "left": "issue:45292", "right": "issue:45584", "accept": false, - "reason": "Embedding resize propagation bug vs Whisper empty-transcription failure after align_special_tokens." + "reason": "Different areas: embedding resize propagation vs Whisper empty-transcription generation failure." + }, + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Different bugs: model loading failure for a repo vs tokenizer error masking when protobuf is absent." }, { "left": "issue:43065", "right": "issue:43408", "accept": false, - "reason": "SAM3 pixel decoder implementation detail vs sam3_video/sam3_tracker config mismatch warning." + "reason": "Different SAM3 issues: dummy Conv2d in pixel decoder vs model-type warning for sam3_tracker." }, { "left": "issue:43901", "right": "issue:45003", "accept": false, - "reason": "Docs mismatch for return_all_scores vs unsafe sys.modules access in modeling_utils." + "reason": "Docs mismatch vs a modeling_utils sys.modules access bug; not the same underlying issue." + }, + { + "left": "issue:45292", + "right": "issue:45701", + "accept": false, + "reason": "Related to tokenization/embeddings in a broad sense, but one is resizing embeddings and the other is a version-dependent tokenizer regression." }, { "left": "issue:43232", "right": "issue:44336", "accept": false, - "reason": "Generation kwarg handling after sync_gpus vs ANSI code emission in loading_report." + "reason": "Generation kwargs handling vs ANSI codes in loading_report are unrelated." }, { "left": "issue:44995", "right": "issue:45200", "accept": false, - "reason": "GlmMoeDsa stale cache crash vs mm_token_type_ids defaulting issue." - }, - { - "left": "issue:45292", - "right": "issue:45701", - "accept": false, - "reason": "resize_token_embeddings propagation bug vs general tokenization change across versions." + "reason": "Different model failures: stale MoE cache on second forward vs missing mm_token_type_ids default." }, { "left": "issue:43257", "right": "issue:45137", "accept": false, - "reason": "Qwen3 MOE conversion under accelerate+deepspeed vs ZeRO3 deque pop crash; both deepspeed-related but different bugs." + "reason": "Both mention DeepSpeed, but one is Qwen3 MOE weight conversion and the other is a generic deque IndexError." }, { "left": "issue:44496", "right": "issue:45003", "accept": false, - "reason": "Unrecognized model/config loading vs sys.modules access bug." + "reason": "Unrecognized model/config issue vs sys.modules access bug; unrelated code paths." }, { "left": "issue:44060", "right": "issue:44077", "accept": false, - "reason": "Incorrect tied-weights warning vs patchtsmixer post_init policy; unrelated." + "reason": "Different model/config issues: tied-weights warning in Qwen3-Next vs patchtsmixer post_init policy." }, { "left": "issue:43475", "right": "issue:43540", "accept": false, - "reason": "Missing fpn_position_embeddings in SAM 3 Video vs Qwen3OmniMoe video processing ValueError." - }, - { - "left": "issue:43653", - "right": "issue:44162", - "accept": false, - "reason": "BigBirdTokenizer special-token registration bug vs broad ESM2 breakage." + "reason": "Different SAM3 vs Qwen3OmniMoe video-processing failures." }, { "left": "issue:43262", "right": "issue:44246", "accept": false, - "reason": "Audio chat-template sample-rate default vs import slowness; unrelated." + "reason": "Audio chat-template sampling-rate bug vs import latency; unrelated." }, { - "left": "issue:44995", - "right": "issue:45588", + "left": "issue:43653", + "right": "issue:44162", "accept": false, - "reason": "GlmMoeDsa cache bug vs flash_attention s_aux=None crash." + "reason": "Different tokenizer/model problems: BigBirdTokenizer special-token registration vs ESM2 breakage." }, { "left": "issue:44368", "right": "issue:45563", "accept": false, - "reason": "Tied-embeddings warning vs stale num_return_sequences warning; both warnings, different code paths." + "reason": "Different warnings in unrelated areas: tied-weights config warning vs stale paged-generate warning." }, { - "left": "issue:44466", - "right": "issue:45335", + "left": "issue:44995", + "right": "issue:45588", "accept": false, - "reason": "Serialization/device-dependent lm_head.weight issue vs resize_token_embeddings not updating decoder.embed_tokens." + "reason": "Different failures: stale indexer cache vs flash-attention s_aux=None crash." }, { - "left": "issue:44368", - "right": "issue:44977", + "left": "issue:44466", + "right": "issue:45335", "accept": false, - "reason": "Tied-embeddings warning during fine-tuning vs Qwen3.5 flash-attention generation failure." + "reason": "Both touch embedding serialization/resizing, but they are not the same concrete bug." }, { "left": "issue:45397", "right": "issue:45656", "accept": false, - "reason": "ZeRO3 from_pretrained failure vs optimizer.step being called twice under deepspeed." + "reason": "Different DeepSpeed issues: gemma-4 zero3 loading vs duplicate optimizer stepping." }, { - "left": "issue:43673", - "right": "issue:44734", + "left": "issue:44368", + "right": "issue:44977", "accept": false, - "reason": "Chunked-prefill cache missing vs serve KV-cache continuation indexing crash; both cache-related but not the same defect." + "reason": "Different bugs: config warning text vs Qwen3.5 flash-attention generation failure." } ] }, @@ -10452,7 +10414,6 @@ "issue:28282", "issue:30990", "issue:41628", - "issue:42222", "issue:42371", "issue:42673", "issue:42831", @@ -10472,10 +10433,12 @@ "issue:44188", "issue:44222", "issue:44297", + "issue:44351", "issue:44361", "issue:44368", "issue:44623", "issue:44661", + "issue:44734", "issue:44749", "issue:44869", "issue:44933", @@ -10484,7 +10447,6 @@ "issue:45092", "issue:45103", "issue:45137", - "issue:45200", "issue:45230", "issue:45278", "issue:45310", @@ -10499,6 +10461,7 @@ "issue:45656" ], "soft_pairs": [ + "issue:43673|issue:44734", "issue:45137|issue:45600", "issue:44933|issue:45459", "issue:43931|issue:44368", @@ -10507,10 +10470,10 @@ "issue:43673|issue:45278", "issue:44297|issue:44623", "issue:43646|issue:44162", - "issue:44361|issue:44749", "issue:44623|issue:45656", - "issue:28282|issue:30990", + "issue:44361|issue:44749", "issue:44749|issue:45310", + "issue:28282|issue:30990", "issue:42994|issue:43482", "issue:42371|issue:45092", "issue:44038|issue:44623", @@ -10527,182 +10490,188 @@ "issue:43475|issue:44661", "issue:45362|issue:45375", "issue:42831|issue:44222", - "issue:43232|issue:44188", "issue:43873|issue:44945", - "issue:42222|issue:45200" + "issue:43232|issue:44188", + "issue:41628|issue:44351" ], - "prepared_review_unit_hash": "e69c041c37ed9d921a3ff0f79f4d5e47a92d8aa49ad81ce89b87b505a596acd0", + "prepared_review_unit_hash": "49c5a7dd951e57194ef922f186e03ffb13169eececfdbd001239f370bf465cde", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 36719, - "estimated_input_tokens": 9180, - "estimated_eval_tokens": 18616 + "soft_pair_count": 32, + "serialized_chars": 37022, + "estimated_input_tokens": 9256, + "estimated_eval_tokens": 18768 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated Transformers issue cluster with a few clear duplicate pockets. Two exact-title duplicate pairs stand out: issue:42673 / issue:43704 (Qwen3ForCausalLM VRAM leak) and issue:45310 / issue:45478 ([BUG] transformers>=5.4.0, Qwen3.5 Moe from_pretrained error). The remaining pairs generally share a model family or subsystem but describe different failures and should stay separate.", - "confidence": 0.64, - "canonical_issue_reason": "issue:42673 is the best canonical issue because it is a concrete, reproducible bug report and has an exact-title duplicate in issue:43704. The packet also contains another exact-title duplicate pair, issue:45310 and issue:45478, but that is a separate duplicate bug rather than evidence that the whole cluster is one issue.", + "summary": "This cluster is mostly heterogeneous; there are only a couple of clear duplicate pairs (the two identical Qwen3ForCausalLM VRAM-leak reports, and the two identical Qwen3.5 Moe from_pretrained reports). Most other soft edges are related only at a broad subsystem level and should stay separate.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45278 is the broadest umbrella-style report in the set (\u201cmany import errors after update from 4.57.0 to 5.5.0\u201d), so it is the closest thing to a cluster anchor even though the cluster is not a single duplicate topic.", "canonical_pr_reason": null, - "best_issue_reason": "issue:42673 is the strongest representative issue in this cluster: specific, actionable, and clearly duplicated by issue:43704. It is a better anchor than the broader upgrade/regression reports elsewhere in the packet, including the separate exact-title duplicate pair issue:45310 / issue:45478.", + "best_issue_reason": "issue:45278 is the most representative standalone issue for the overall cluster because it captures a wide v5 compatibility regression pattern; the other items are mostly narrower, model-specific, or unrelated failures.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Both are generation/cache-related, but they describe different failing code paths and symptoms." + }, { "left": "issue:45137", "right": "issue:45600", "accept": false, - "reason": "Different bugs: ZeRO3 deque popping vs a removed auto-mapping config reference." + "reason": "DeepSpeed ZeRO3 deque underflow is unrelated to the Sam3LiteText auto-mapping CI break." }, { "left": "issue:44933", "right": "issue:45459", "accept": false, - "reason": "Both involve imports, but one is a nonexistent image_utils import and the other is tokenizer error masking when protobuf is absent." + "reason": "Missing import from image_utils and tokenizer protobuf error are different bugs." }, { "left": "issue:43931", "right": "issue:44368", "accept": false, - "reason": "Different failure modes: Qwen3-VL weight-shape mismatch vs a tie_word_embeddings warning." + "reason": "Weight-shape mismatch for Qwen3-VL is not the same as a tie_word_embeddings warning." }, { "left": "issue:44869", "right": "issue:45356", "accept": false, - "reason": "Both are tokenizer-related, but one is a Whisper timestamp decode crash and the other is a Kimi-K2.5 tokenizer codec regression." + "reason": "Whisper timestamp decode crash and Kimi tokenizer codec regression are distinct issues." }, { "left": "issue:43232", "right": "issue:43673", "accept": false, - "reason": "Both are generation regressions, but the concrete failures differ: sync_gpus kwargs handling vs missing GenerationMixin cache in chunked prefill." + "reason": "Both touch generation internals, but one is sync_gpus kwarg handling and the other is chunked_prefill cache state." }, { "left": "issue:43673", "right": "issue:45278", "accept": false, - "reason": "One is a chunked_prefill cache bug; the other is a broad import-error regression after upgrade." + "reason": "Cache/generation bug is not the same underlying problem as broad import failures after upgrade." }, { "left": "issue:44297", "right": "issue:44623", "accept": false, - "reason": "Different save_pretrained failures: tokenizer_class mismatch vs missing processor files." + "reason": "Tokenizer.save_pretrained metadata mismatch and processor.save_pretrained missing files are different save paths." }, { "left": "issue:43646", "right": "issue:44162", "accept": false, - "reason": "Both are model breakages, but the specific failure modes and affected models differ." - }, - { - "left": "issue:44361", - "right": "issue:44749", - "accept": false, - "reason": "MLukeTokenizer task AttributeError is unrelated to the Chinese performance slowdown regression." + "reason": "Missing fields from trust_remote_code AutoConfig is unrelated to the ESM2 breakage." }, { "left": "issue:44623", "right": "issue:45656", "accept": false, - "reason": "Processor save_pretrained missing files is unrelated to deepspeed optimizer stepping twice." + "reason": "Save-pretrained file emission and double optimizer step under deepspeed are unrelated." }, { - "left": "issue:28282", - "right": "issue:30990", + "left": "issue:44361", + "right": "issue:44749", "accept": false, - "reason": "PyTorch-missing ImportError and Sentence Transformers hanging on load are different issues." + "reason": "MLukeTokenizer task AttributeError and upgrade-related slowdown are different failures." }, { "left": "issue:44749", "right": "issue:45310", "accept": false, - "reason": "Performance slowdown after upgrade is not the same as a Qwen3.5 MoE from_pretrained error." + "reason": "Performance regression is not the same code-path problem as Qwen3.5 Moe from_pretrained failure." + }, + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "Missing PyTorch import error and Sentence Transformers hanging on load are different problems." }, { "left": "issue:42994", "right": "issue:43482", "accept": false, - "reason": "Quantized save failure and GGUF loading failure are different code paths and different bugs." + "reason": "Quantized save failure and GGUF loading failure are separate bugs." }, { "left": "issue:42371", "right": "issue:45092", "accept": false, - "reason": "TF32 API guidance is unrelated to remote-code meta-initialization incompatibility." + "reason": "TF32 API settings guidance is unrelated to InternVL2 meta-initialization incompatibility." }, { "left": "issue:44038", "right": "issue:44623", "accept": false, - "reason": "Qwen3-VL-Moe v5 bug and processor save_pretrained missing files are different problems." + "reason": "Qwen3-VL-Moe/Transformers v5 breakage is not the same as processor.save_pretrained missing files." }, { "left": "issue:44749", "right": "issue:45656", "accept": false, - "reason": "Performance slowdown and deepspeed double-step are unrelated." + "reason": "Data-path slowdown and deepspeed optimizer double-step are unrelated." }, { "left": "issue:45230", "right": "issue:45397", "accept": false, - "reason": "Generic bug report vs a specific Gemma-4 Zero3 from_pretrained issue; no clear duplicate relation." + "reason": "A generic bug report is not the same issue as gemma-4 zero3 from_pretrained failure." }, { "left": "issue:43475", "right": "issue:43643", "accept": false, - "reason": "SAM3 video output attribute missing and AutoConfig missing fields are different bugs." + "reason": "SAM 3 video attribute error and trust_remote_code missing fields are different model/config bugs." }, { "left": "issue:45375", "right": "issue:45478", "accept": false, - "reason": "Same model family, but one is a missing config field and the other is a broader from_pretrained error; not enough to merge." + "reason": "Missing deepstack_visual_indexes in a vision config is not the same as the Qwen3.5 Moe from_pretrained error." }, { "left": "issue:42673", "right": "issue:43704", "accept": true, - "reason": "Exact same-title Qwen3ForCausalLM VRAM leak report; clearly the same underlying bug." + "reason": "Exact same title and same Qwen3ForCausalLM VRAM leak in multiple dataloader threads." }, { "left": "issue:43475", "right": "issue:45381", "accept": false, - "reason": "Different vision/video model regressions with different symptoms and code paths." + "reason": "SAM 3 vision encoder attribute error and qwen2.5-vl video position-id bug are different vision-model failures." }, { "left": "issue:41628", "right": "issue:44933", "accept": false, - "reason": "Both are import-related, but they concern different missing symbols and different subsystems." + "reason": "Both are import errors, but they involve different missing symbols and likely different fixes." }, { "left": "issue:45310", "right": "issue:45375", "accept": false, - "reason": "These are related to Qwen3.5 MoE, but one is a general from_pretrained error and the other is a specific missing vision config field; not enough to merge." + "reason": "One is a Qwen3.5 Moe from_pretrained failure; the other is a missing vision-config field being dropped." }, { "left": "issue:45081", "right": "issue:45103", "accept": false, - "reason": "Tokenizer backend crash and auto-docstring annotation handling are unrelated." + "reason": "Mistral regex patch crash and docstring annotation crash are unrelated." }, { "left": "issue:42994", @@ -10714,46 +10683,46 @@ "left": "issue:43475", "right": "issue:44661", "accept": false, - "reason": "SAM3 output attribute bug is unrelated to add-new-model-like failing in tokenizer mapping logic." + "reason": "SAM 3 encoder output attribute error is unrelated to add-new-model-like failing in TOKENIZER_MAPPING_NAMES." }, { "left": "issue:45362", "right": "issue:45375", "accept": false, - "reason": "Different Qwen3.5 issues: chat crash vs missing deepstack_visual_indexes config field." + "reason": "Qwen3.5 chat crash and missing vision-config field are different problems." }, { "left": "issue:42831", "right": "issue:44222", "accept": false, - "reason": "FP8 accuracy degradation and FP8 save_pretrained are different bugs." + "reason": "FP8 accuracy regression and FP8 save_pretrained bug are not the same code-path issue." }, { - "left": "issue:43232", - "right": "issue:44188", + "left": "issue:43873", + "right": "issue:44945", "accept": false, - "reason": "Generation kwargs update bug and diverging attention kernels under torch.compile are different failure modes." + "reason": "Quantization offloading behavior and incorrect pipeline-parallel output are different failures." }, { - "left": "issue:43873", - "right": "issue:44945", + "left": "issue:43232", + "right": "issue:44188", "accept": false, - "reason": "Quantization offloading behavior and pipeline-parallel wrong output are different issues." + "reason": "Generation kwargs sync_gpus handling is unrelated to torch.compile attention-kernel divergence." }, { - "left": "issue:42222", - "right": "issue:45200", + "left": "issue:41628", + "right": "issue:44351", "accept": false, - "reason": "VitPose model breakage and Gemma 4 token-type defaults are unrelated." + "reason": "Both are import errors, but they are separate missing exports rather than one underlying bug." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The summary overstates the evidence and misses another clear duplicate pair in the packet: issue:45310 and issue:45478 have the exact same title (`[BUG] transformers>=5.4.0, Qwen3.5 Moe from_pretrained error`), so the claim that only one clear duplicate pair appears is not grounded. The canonical rationale should also acknowledge this second exact-title duplicate before concluding the cluster is mostly unrelated." + "accept": true, + "feedback": "The summary is grounded in the packet: it correctly treats the cluster as heterogeneous, picks issue:45278 as a reasonable umbrella anchor, and the only accepted soft edge is an exact duplicate pair. The rejected soft edges are conservative and the reasons generally match the issue titles and descriptions. The only minor caution is that the confidence feels a bit high for a heterogeneous cluster, but not enough to reject." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -10778,6 +10747,7 @@ "issue:36010", "issue:41628", "issue:42175", + "issue:42222", "issue:42898", "issue:42994", "issue:43232", @@ -10799,7 +10769,6 @@ "issue:44188", "issue:44222", "issue:44242", - "issue:44351", "issue:44373", "issue:44496", "issue:44568", @@ -10814,259 +10783,252 @@ "issue:45103", "issue:45125", "issue:45137", + "issue:45200", "issue:45310", "issue:45356", - "issue:45405", "issue:45561", "issue:45712" ], "soft_pairs": [ - "issue:41628|issue:44351", + "issue:42222|issue:45200", "issue:44222|issue:45310", - "issue:42994|issue:44164", "issue:44038|issue:45310", + "issue:42994|issue:44164", "issue:42175|issue:45712", "issue:43526|issue:45561", - "issue:43673|issue:44188", - "issue:44496|issue:45561", "issue:43377|issue:44242", + "issue:43673|issue:44188", "issue:36010|issue:41628", - "issue:43504|issue:45561", + "issue:44496|issue:45561", "issue:43232|issue:44792", - "issue:42898|issue:44568", + "issue:43504|issue:45561", "issue:30990|issue:35141", - "issue:43479|issue:44869", - "issue:43479|issue:44079", + "issue:42898|issue:44568", + "issue:44964|issue:45137", "issue:43976|issue:44933", + "issue:43479|issue:44079", + "issue:43479|issue:44869", "issue:42175|issue:44933", - "issue:44964|issue:45137", - "issue:44188|issue:44373", "issue:44623|issue:45310", + "issue:44188|issue:44373", "issue:43784|issue:43824", "issue:43278|issue:45137", "issue:43723|issue:44568", - "issue:44734|issue:45103", "issue:44060|issue:45125", - "issue:42898|issue:45356", "issue:43874|issue:45092", - "issue:45405|issue:45712", + "issue:44734|issue:45103", + "issue:42898|issue:45356", "issue:42898|issue:44779", "issue:30333|issue:33290" ], - "prepared_review_unit_hash": "72f70f489886538b380418ef508ddbf7dbc255a0881a787a40aa15c261e20836", + "prepared_review_unit_hash": "8c62155ed164d9133c330c2b4b7266b303c45239d7f5709fb81e717c2cbc0771", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 36979, - "estimated_input_tokens": 9245, - "estimated_eval_tokens": 18746 + "soft_pair_count": 30, + "serialized_chars": 36710, + "estimated_input_tokens": 9178, + "estimated_eval_tokens": 18612 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are a heterogeneous set of unrelated Transformers bugs and regressions. The soft pairs mostly share broad themes (imports, saving/loading, generation, multimodal, tokenization), but not the same concrete defect or fix path, so they should not be deduplicated.", - "confidence": 0.96, - "canonical_issue_reason": null, + "summary": "The cluster is highly heterogeneous: most items are distinct Transformers bug reports with only superficial overlap around imports, tokenizers, or save/load flows. I do not see any true duplicate pair among the soft candidates.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45310 is the best anchor if one must pick a representative issue: it is a concrete, version-specific regression report about Qwen3.5-MoE from_pretrained behavior, and it is the closest match to the strongest soft similarities in the set. That said, the cluster is not a real duplicate group.", "canonical_pr_reason": null, - "best_issue_reason": "No single canonical issue fits this cluster: the reports span distinct subsystems and failure modes (model loading, tokenizer behavior, generation, multimodal processors, training/runtime, and packaging/import errors).", + "best_issue_reason": "issue:43824 is the strongest standalone issue in this set: it has high discussion/inbound reference activity and a clear, actionable import regression symptom, making it a good representative issue even though it is not a duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:41628", - "right": "issue:44351", + "left": "issue:42222", + "right": "issue:45200", "accept": false, - "reason": "Both are import errors, but for different missing symbols (`AutoImageProcessor` vs `HybridCache`) and likely different code paths." + "reason": "Different models and failures: VitPose breakage vs Gemma 4 multimodal token defaults." }, { "left": "issue:44222", "right": "issue:45310", "accept": false, - "reason": "Both mention MoE/FP8/PyTorch saving/loading, but one is FP8 `save_pretrained` and the other is a `from_pretrained` failure on a different model family/version." + "reason": "Same broad MoE area, but different code paths and symptoms: FP8 save_pretrained vs from_pretrained load error." }, { - "left": "issue:42994", - "right": "issue:44164", + "left": "issue:44038", + "right": "issue:45310", "accept": false, - "reason": "Both involve serialization, but one is about quantized model saving and the other about handling `extra_state` in save/from_pretrained; not the same bug." + "reason": "Both mention Qwen MoE models, but they target different model generations and different loading/runtime issues." }, { - "left": "issue:44038", - "right": "issue:45310", + "left": "issue:42994", + "right": "issue:44164", "accept": false, - "reason": "Different model families and symptoms: Qwen3-VL-Moe compatibility vs Qwen3.5 MoE `from_pretrained` error." + "reason": "Both involve save/load, but one is quantized model saving and the other is extra_state handling; not the same bug." }, { "left": "issue:42175", "right": "issue:45712", "accept": false, - "reason": "Package/backend install behavior vs leftover dummy classes leaking into `dir(transformers)`/repo checks; unrelated." + "reason": "Unrelated packaging/testing issues: missing TensorFlow backend vs dummy PT objects leaking into repo checks." }, { "left": "issue:43526", "right": "issue:45561", "accept": false, - "reason": "Different components and failures: `reduce_labels` in `BeitImageProcessorFast` vs a pytest-xdist race on a captured-info file." + "reason": "Completely different bugs: BeitImageProcessorFast label reduction vs xdist file race in testing utils." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "Different model behaviors: missing padding-mask support in MIMI encoder vs missing load balancing loss when router logits are off." }, { "left": "issue:43673", "right": "issue:44188", "accept": false, - "reason": "Both touch generation internals, but one is missing `GenerationMixin` cache in v5/chunked prefill and the other is divergent attention kernels under `torch.compile`; not the same defect." + "reason": "Both are generation-related, but one is cache availability in chunked prefill and the other is attention-kernel divergence under torch.compile." }, { - "left": "issue:44496", - "right": "issue:45561", + "left": "issue:36010", + "right": "issue:41628", "accept": false, - "reason": "Unrelated: model loading/config recognition error vs test harness file-race issue." + "reason": "Both are import errors, but for different symbols and unrelated modules." }, { - "left": "issue:43377", - "right": "issue:44242", + "left": "issue:44496", + "right": "issue:45561", "accept": false, - "reason": "Different bugs: MIMI encoder batching/padding-mask mismatch vs MoE load-balancing loss not being added." + "reason": "Unrelated: model config recognition failure vs pytest-xdist captured-info race." }, { - "left": "issue:36010", - "right": "issue:41628", + "left": "issue:43232", + "right": "issue:44792", "accept": false, - "reason": "Both are import failures, but for different APIs (`GenerationMixin` vs `AutoImageProcessor`) and different packaging causes." + "reason": "Different issues: generation kwargs after sync_gpus vs a janus image-generation test failure." }, { "left": "issue:43504", "right": "issue:45561", "accept": false, - "reason": "Model preset loading with a legacy field vs pytest-xdist file contention; no shared code-path bug." + "reason": "Different subsystems: BEiT preset loading legacy field vs a parallel test-file race." }, { - "left": "issue:43232", - "right": "issue:44792", + "left": "issue:30990", + "right": "issue:35141", "accept": false, - "reason": "Generation cache/update issue vs a failed janus image-generation test; the latter is a test symptom, not the same underlying bug." + "reason": "Sentence-Transformers loading hang and embedding reinitialization are unrelated." }, { "left": "issue:42898", "right": "issue:44568", "accept": false, - "reason": "Both are tokenizer-related regressions, but one is `clean_up_tokenization_spaces` behavior and the other is BOS/EOS insertion for a specific tokenizer." + "reason": "Both are tokenizer regressions, but one is cleanup-space behavior and the other is special-token insertion; not the same bug." }, { - "left": "issue:30990", - "right": "issue:35141", + "left": "issue:44964", + "right": "issue:45137", "accept": false, - "reason": "SentenceTransformer loading hang vs token embedding resizing reinitialization; unrelated subsystems." + "reason": "Different failures: Phi-4 multimodal load error vs DeepSpeed ZeRO3 deque underflow." }, { - "left": "issue:43479", - "right": "issue:44869", + "left": "issue:43976", + "right": "issue:44933", "accept": false, - "reason": "Different multimodal/config bug vs Whisper timestamp decode crash; no shared fix path." + "reason": "Python version compatibility issue vs missing import from image_utils; unrelated." }, { "left": "issue:43479", "right": "issue:44079", "accept": false, - "reason": "Config default initialization bug vs `ModelOutput` key assignment bug; different objects and code paths." + "reason": "Different None-handling bugs in different layers: multimodal config defaults vs ModelOutput key assignment." }, { - "left": "issue:43976", - "right": "issue:44933", + "left": "issue:43479", + "right": "issue:44869", "accept": false, - "reason": "Python version compatibility/package issue vs a missing/nonexistent image_utils import; unrelated." + "reason": "Different subsystems and symptoms: config initialization vs Whisper timestamp decode crash." }, { "left": "issue:42175", "right": "issue:44933", "accept": false, - "reason": "Packaging/backend install issue vs a bad import path in image_utils; different problems." + "reason": "Packaging/backend dependency issue vs nonexistent image_utils import; no shared underlying bug." }, { - "left": "issue:44964", - "right": "issue:45137", + "left": "issue:44623", + "right": "issue:45310", "accept": false, - "reason": "Model loading failure for Phi-4 multimodal vs DeepSpeed ZeRO3 deque pop error; unrelated." + "reason": "Processor save_pretrained missing files is a different path from Qwen3.5-MoE model loading." }, { "left": "issue:44188", "right": "issue:44373", "accept": false, - "reason": "Different areas: attention kernel divergence under compile vs a wrong docstring for `position_ids`." - }, - { - "left": "issue:44623", - "right": "issue:45310", - "accept": false, - "reason": "Processor `save_pretrained` missing files vs Qwen3.5 MoE `from_pretrained` error; save/load are different code paths and not the same bug." + "reason": "Attention-kernel runtime divergence and a wrong docstring are not the same issue." }, { "left": "issue:43784", "right": "issue:43824", "accept": false, - "reason": "Both are import errors, but for different missing names (`nn` during import vs `Qwen2_5_VLForConditionalGeneration`)." + "reason": "Both are import failures, but they involve different missing symbols and different packages/code paths." }, { "left": "issue:43278", "right": "issue:45137", "accept": false, - "reason": "Embedding dtype drift during evaluation vs DeepSpeed ZeRO3 deque error; completely different failures." + "reason": "Training/eval dtype drift is unrelated to the ZeRO3 deque error." }, { "left": "issue:43723", "right": "issue:44568", "accept": false, - "reason": "Tokenizer loading issue in v5 vs a tokenizer not adding BOS/EOS for a specific model; related area but not the same bug." - }, - { - "left": "issue:44734", - "right": "issue:45103", - "accept": false, - "reason": "KV cache continuation crash in serving vs auto-docstring crash with future annotations; different subsystems." + "reason": "Tokenizer loading in v5 and missing BOS/EOS addition are separate tokenizer regressions." }, { "left": "issue:44060", "right": "issue:45125", "accept": false, - "reason": "Both concern Qwen3.x MoE internals, but one is a tied-weights warning and the other is missing `_tp_plan` for tensor parallelism." + "reason": "Different Qwen model issues: tied-weights warning logic vs missing tensor-parallel plan." }, { - "left": "issue:42898", - "right": "issue:45356", + "left": "issue:43874", + "right": "issue:45092", "accept": false, - "reason": "Different tokenizer regressions: cleanup space behavior vs Kimi-K2.5 codec handling/regression warning." + "reason": "Both are multimodal/model-init issues, but they affect different models and different failure mechanisms." }, { - "left": "issue:43874", - "right": "issue:45092", + "left": "issue:44734", + "right": "issue:45103", "accept": false, - "reason": "Missing image-patch count method on a processor vs old InternVL2 remote-code checkpoints failing meta init; not the same defect." + "reason": "Serving KV-cache indexing and auto-docstring annotation handling are unrelated." }, { - "left": "issue:45405", - "right": "issue:45712", + "left": "issue:42898", + "right": "issue:45356", "accept": false, - "reason": "Dependency version pinning problem vs leftover dummy classes leaking into the namespace; unrelated." + "reason": "Both are tokenizer-related, but one concerns cleanup spaces and the other codec handling in Kimi-K2.5." }, { "left": "issue:42898", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer regressions, but for different models and failure modes (`clean_up_tokenization_spaces` vs DeepSeek codec behavior)." + "reason": "Tokenizer regressions, but different models and different underlying tokenization failures." }, { "left": "issue:30333", "right": "issue:33290", "accept": false, - "reason": "Training-job/MLflow reporting vs OOM with Adafactor in DeepSpeed; unrelated runtime issues." + "reason": "MLflow reporting on failed jobs is unrelated to DeepSpeed Adafactor OOM." } ] }, @@ -11104,13 +11066,11 @@ "issue:42886", "issue:42898", "issue:42994", - "issue:43097", "issue:43295", "issue:43479", "issue:43644", "issue:43723", "issue:43824", - "issue:44038", "issue:44206", "issue:44222", "issue:44261", @@ -11128,39 +11088,42 @@ "issue:44623", "issue:44871", "issue:44913", + "issue:44933", "issue:44977", "issue:44991", "issue:45092", "issue:45103", "issue:45310", "issue:45357", + "issue:45405", "issue:45406", "issue:45459", "issue:45656", "issue:45712" ], "soft_pairs": [ + "issue:45405|issue:45712", "issue:42898|issue:44448", "issue:44295|issue:45656", - "issue:43097|issue:45712", + "issue:44933|issue:45712", "issue:41628|issue:45459", - "issue:44222|issue:44297", "issue:42898|issue:43295", + "issue:44222|issue:44297", "issue:44297|issue:45656", "issue:43479|issue:43644", "issue:42831|issue:44623", "issue:44222|issue:44623", "issue:44206|issue:45103", - "issue:41628|issue:42757", "issue:22355|issue:28282", + "issue:41628|issue:42757", "issue:44297|issue:45310", "issue:30333|issue:36331", - "issue:44222|issue:44484", "issue:44263|issue:45092", "issue:44263|issue:44360", + "issue:44222|issue:44484", "issue:43723|issue:44977", - "issue:44483|issue:45406", "issue:30333|issue:34689", + "issue:44483|issue:45406", "issue:28282|issue:30333", "issue:30333|issue:35141", "issue:44295|issue:44623", @@ -11168,282 +11131,274 @@ "issue:44261|issue:45092", "issue:44514|issue:45103", "issue:43723|issue:44871", - "issue:43723|issue:44991", "issue:45092|issue:45357", + "issue:43723|issue:44991", "issue:43723|issue:44442", "issue:43479|issue:44913", "issue:30990|issue:38175", "issue:42886|issue:42994", - "issue:42831|issue:45310", "issue:41628|issue:43824", - "issue:42831|issue:44038", - "issue:44263|issue:44373" + "issue:42831|issue:45310" ], - "prepared_review_unit_hash": "54653d9cf9ee56925925083c11e408bee15c44d5dec2ca01cdd518f546ce32c3", + "prepared_review_unit_hash": "29c6c181e4bdaf9f6883a672401afc914351a3ae4fe0f7a4fd0ac74408ec8035", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 38, - "serialized_chars": 38892, - "estimated_input_tokens": 9723, - "estimated_eval_tokens": 19702 + "soft_pair_count": 37, + "serialized_chars": 38598, + "estimated_input_tokens": 9650, + "estimated_eval_tokens": 19556 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly heterogeneous issue cluster, with one clear tokenizer-loading subgroup centered on v5 regressions. I keep issue 43723 as the canonical umbrella because it most directly matches the repeated AutoTokenizer/tokenizer-loading failures across different models; the rest are mostly separate bugs in different code paths.", - "confidence": 0.69, - "canonical_issue_reason": "Issue 43723 is the broadest and most central report of the v5 tokenizer-loading regression, and it best covers the related model-specific loading failures in the cluster.", + "summary": "This cluster is mostly a grab-bag of unrelated Transformers issues: tokenizer/import regressions, model config bugs, save/load failures, and a few training/runtime problems. Only one soft edge looks like a likely duplicate pair within the tokenizer-loading regression family; the rest are too broad or hit different code paths.", + "confidence": 0.44, + "canonical_issue_reason": "Issue 43723 is the best representative of the only clearly related subset here: v5 tokenizer-loading regressions. It is broader than the model-specific reports, so it makes the most plausible anchor among the issues, though the overall cluster is not truly homogeneous.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43723 is the best global representative because it cleanly captures the shared v5 tokenizer-loading failure pattern without overreaching into unrelated regressions.", + "best_issue_reason": "43723 is the most central issue for the tokenizer-loading problem space and can absorb a specific example like 44991 if a single duplicate anchor is needed.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:45405", + "right": "issue:45712", + "accept": false, + "reason": "Unrelated topics: PEFT version pinning vs leftover dummy classes and repo checks." + }, { "left": "issue:42898", "right": "issue:44448", "accept": false, - "reason": "They may belong to the same v5 tokenizer/output regression family, but the evidence here is too thin to treat them as the same concrete bug; one is about `clean_up_tokenization_spaces`, the other about Pegasus output differences." + "reason": "Both are version-related, but one is tokenization-space behavior and the other is a model output divergence; not the same concrete bug." }, { "left": "issue:44295", "right": "issue:45656", "accept": false, - "reason": "Different code paths: position_ids buffer handling versus deepspeed optimizer-step behavior." + "reason": "Different subsystems: position_ids buffer reading vs optimizer stepping twice under deepspeed." }, { - "left": "issue:43097", + "left": "issue:44933", "right": "issue:45712", "accept": false, - "reason": "Different issues: removed config field versus leftover dummy classes and repo-check failures." + "reason": "Both involve exposed names, but one is an image_utils import issue and the other is dummy class leakage; different fixes." }, { "left": "issue:41628", "right": "issue:45459", "accept": false, - "reason": "Both involve import/tokenizer-related behavior, but they point to different missing symbols and different failure mechanisms." + "reason": "Different import failures in different areas: AutoImageProcessor export vs tokenizer error handling with protobuf." }, { - "left": "issue:44222", - "right": "issue:44297", + "left": "issue:42898", + "right": "issue:43295", "accept": false, - "reason": "Different save_pretrained problems: FP8/MoE saving versus tokenizer_class metadata mismatch." + "reason": "Related to v5 behavior changes, but the concrete failures differ: clean_up_tokenization_spaces vs processor.tokenizer/image passing regression." }, { - "left": "issue:42898", - "right": "issue:43295", + "left": "issue:44222", + "right": "issue:44297", "accept": false, - "reason": "Related to v5 behavior changes, but the concrete failures are different APIs and different regressions." + "reason": "Both mention save_pretrained, but one is FP8 MoE model saving and the other is tokenizer metadata mismatch." }, { "left": "issue:44297", "right": "issue:45656", "accept": false, - "reason": "Tokenizer metadata saving and deepspeed optimizer double-stepping are unrelated." + "reason": "Tokenizer save metadata vs optimizer stepping; no shared code-path." }, { "left": "issue:43479", "right": "issue:43644", "accept": false, - "reason": "Both touch model internals, but one is default multimodal config initialization and the other is junk non-persistent buffer filling." + "reason": "Config initialization bug vs non-persistent buffer initialization bug; different objects and failure modes." }, { "left": "issue:42831", "right": "issue:44623", "accept": false, - "reason": "FP8 accuracy issues and processor.save_pretrained missing files are distinct bugs." + "reason": "Different saving problems: FineGrainedFP8 accuracy/saving vs processor.save_pretrained missing files." }, { "left": "issue:44222", "right": "issue:44623", "accept": false, - "reason": "Both mention saving, but they affect different objects and failure modes." + "reason": "Both are save-related, but one is quantized model saving and the other is processor artifact completeness." }, { "left": "issue:44206", "right": "issue:45103", "accept": false, - "reason": "Feature extractor center-arg crash and auto_docstring annotation crash are unrelated." + "reason": "Unrelated regressions: feature extractor argument handling vs auto-docstring AttributeError under future annotations." }, { - "left": "issue:41628", - "right": "issue:42757", + "left": "issue:22355", + "right": "issue:28282", "accept": false, - "reason": "Different missing imports from different dependencies and code paths." + "reason": "Both are import errors, but one is transformers.onnx and the other is missing PyTorch for AutoModel; different dependency/path issues." }, { - "left": "issue:22355", - "right": "issue:28282", + "left": "issue:41628", + "right": "issue:42757", "accept": false, - "reason": "Both are import errors, but they concern different optional dependencies and modules." + "reason": "Both are top-level import failures, but for different missing symbols from different packages." }, { "left": "issue:44297", "right": "issue:45310", "accept": false, - "reason": "Tokenizer_class metadata mismatch is unrelated to the Qwen3.5 MoE from_pretrained regression." + "reason": "Tokenizer save metadata mismatch vs a specific Qwen3.5 MoE from_pretrained regression; not the same bug." }, { "left": "issue:30333", "right": "issue:36331", "accept": false, - "reason": "MLflow reporting versus Trainer compute_loss signature are unrelated." - }, - { - "left": "issue:44222", - "right": "issue:44484", - "accept": false, - "reason": "Both involve saving/sharding, but they are different save_pretrained concerns." + "reason": "MLflow reporting on failed jobs vs Trainer compute_loss signature mismatch; unrelated training issues." }, { "left": "issue:44263", "right": "issue:45092", "accept": false, - "reason": "GlmMoeDsaIndexer split handling and InternVL2 meta-init incompatibility are different issues." + "reason": "Different GLM/InternVL problems: torch.split return handling vs remote-code checkpoint/meta-init incompatibility." }, { "left": "issue:44263", "right": "issue:44360", "accept": false, - "reason": "Same component area, but one is split return handling and the other is a missing ReLU; not the same bug." + "reason": "Both are DSA-indexer related, but one is split return handling and the other is missing ReLU; not mergeable as one fix." }, { - "left": "issue:43723", - "right": "issue:44977", + "left": "issue:44222", + "right": "issue:44484", "accept": false, - "reason": "Tokenizer-loading regression versus flash-attention generation failure; different symptoms and code paths." + "reason": "Quantized model save failure vs a question about default shard size; not the same bug." }, { - "left": "issue:44483", - "right": "issue:45406", + "left": "issue:43723", + "right": "issue:44977", "accept": false, - "reason": "Chat completions request handling is unrelated to Gemma4Processor `_tokenizer` attribute errors." + "reason": "Both are generation/loading regressions, but one is tokenizer loading and the other is flash-attention generation behavior for Qwen3.5." }, { "left": "issue:30333", "right": "issue:34689", "accept": false, - "reason": "MLflow failure reporting and Llama model loading regression are unrelated." + "reason": "Both are regressions, but MLflow reporting and Llama model loading are unrelated code paths." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "Chat-completions request handling vs Gemma4Processor missing _tokenizer; different API layers." }, { "left": "issue:28282", "right": "issue:30333", "accept": false, - "reason": "AutoModel torch import error and MLflow reporting bug are unrelated." + "reason": "Missing PyTorch import dependency vs MLflow reporting after failed training; unrelated." }, { "left": "issue:30333", "right": "issue:35141", "accept": false, - "reason": "Trainer reporting and embedding reinitialization are different bugs." + "reason": "Training-job reporting vs embedding resize/post_init reinitialization; no common bug." }, { "left": "issue:44295", "right": "issue:44623", "accept": false, - "reason": "Position_ids buffer read issue is unrelated to processor.save_pretrained missing files." + "reason": "Position_ids buffer access vs processor save_pretrained missing files; unrelated." }, { "left": "issue:44373", "right": "issue:44485", "accept": false, - "reason": "A wrong docstring and a RoPE implementation discussion are not the same underlying change." + "reason": "Docstring wording vs RoPE implementation; no shared underlying code failure." }, { "left": "issue:44261", "right": "issue:45092", "accept": false, - "reason": "RMS norm epsilon precision bug is unrelated to remote-code checkpoint meta-init incompatibility." + "reason": "Layernorm epsilon/config precision issue vs remote-code meta-init incompatibility; different problems." }, { "left": "issue:44514", "right": "issue:45103", "accept": false, - "reason": "Qwen2_5_VL batched chat-template crash and auto_docstring annotation crash are unrelated." + "reason": "Processor chat-template batching crash vs auto-docstring AttributeError; unrelated." }, { "left": "issue:43723", "right": "issue:44871", "accept": false, - "reason": "Tokenizer-loading regression and eos_token_id config inconsistency are different issues." - }, - { - "left": "issue:43723", - "right": "issue:44991", - "accept": true, - "reason": "Both report the same v5 AutoTokenizer loading regression, just for different tokenizers/models." + "reason": "Tokenizer loading regression vs eos_token_id configuration inconsistency; separate model/tokenizer concerns." }, { "left": "issue:45092", "right": "issue:45357", "accept": false, - "reason": "Both involve multimodal models, but one is meta-init incompatibility and the other is incorrect save_pretrained visual keys." + "reason": "Both concern Qwen/InternVL-era regressions, but one is meta initialization compatibility and the other is save_pretrained visual-key serialization." }, { "left": "issue:43723", - "right": "issue:44442", + "right": "issue:44991", "accept": true, - "reason": "Same underlying v5 tokenizer-loading failure surfaced through AutoTokenizer on a different tokenizer class." + "reason": "Both describe the same tokenizer-loading regression in v5; 44991 is a concrete model-specific instance of the broader AutoTokenizer.from_pretrained failure in 43723, and one fix could plausibly address both." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Both mention tokenizers, but one is AutoTokenizer loading and the other is a specific tokenizer class load failure for FastSpeech2ConformerTokenizer." }, { "left": "issue:43479", "right": "issue:44913", "accept": false, - "reason": "Default multimodal config initialization and rotary_pct reload behavior are unrelated." + "reason": "Config default initialization vs GPTNeoX rotary_pct persistence; distinct config bugs." }, { "left": "issue:30990", "right": "issue:38175", "accept": false, - "reason": "Sentence Transformers loading hang and SigLIP2 zero probabilities are different bugs." + "reason": "Stuck loading Sentence Transformers vs unexpected zero probabilities in SigLIP2; different model/runtime failures." }, { "left": "issue:42886", "right": "issue:42994", "accept": false, - "reason": "Offline tokenizer cache loading and quantized model saving failure are unrelated." - }, - { - "left": "issue:42831", - "right": "issue:45310", - "accept": false, - "reason": "FP8 accuracy issue and Qwen3.5 MoE from_pretrained regression are different failures." + "reason": "Offline cache tokenizer loading vs quantized model saving; unrelated." }, { "left": "issue:41628", "right": "issue:43824", "accept": false, - "reason": "Both are import errors, but they concern different missing symbols and model families." + "reason": "Different missing top-level exports: AutoImageProcessor vs Qwen2_5_VLForConditionalGeneration." }, { "left": "issue:42831", - "right": "issue:44038", - "accept": false, - "reason": "FP8 accuracy issue is unrelated to the Qwen3.5-VL-Moe bug report." - }, - { - "left": "issue:44263", - "right": "issue:44373", + "right": "issue:45310", "accept": false, - "reason": "DSA indexer runtime bugs and a position_ids docstring error are not the same change." + "reason": "FP8 accuracy/saving issue vs Qwen3.5 MoE from_pretrained regression; different failures." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The overall summary is mostly grounded, but at least one soft-edge verdict is too strong for the evidence in the packet. In particular, `issue:42898` (`clean_up_tokenization_spaces` behavior changes in v5) and `issue:44448` (different output for Pegasus between v4 and v5) look plausibly like the same regression family from the titles alone, so rejecting them as clearly different bugs is not conservative enough. Because the evaluator should only accept when all soft-edge calls stay conservative, I have to reject." + "accept": true, + "feedback": "The analysis is grounded in the packet and stays conservative overall. The one accepted soft edge (43723/44991) is reasonably supported by the titles as the same tokenizer-loading regression family, and the remaining pairwise rejections are cautious and code-path specific. The summary also appropriately notes that the cluster is mostly heterogeneous rather than a single bug." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -11511,260 +11466,268 @@ "issue:45600" ], "soft_pairs": [ + "issue:42831|issue:44038", + "issue:44263|issue:44373", "issue:42757|issue:45278", "issue:43232|issue:45593", "issue:43756|issue:44060", "issue:43576|issue:43784", - "issue:43122|issue:44521", "issue:42886|issue:43066", + "issue:43122|issue:44521", "issue:42371|issue:44263", "issue:44117|issue:44704", "issue:30990|issue:33357", - "issue:44704|issue:45290", "issue:42907|issue:44945", "issue:43976|issue:44945", "issue:44295|issue:44297", - "issue:43232|issue:44743", + "issue:44704|issue:45290", "issue:43122|issue:44514", + "issue:43232|issue:44743", "issue:39401|issue:44488", "issue:43408|issue:43475", - "issue:43232|issue:45245", "issue:42907|issue:43937", + "issue:43232|issue:45245", "issue:43784|issue:45600", - "issue:44360|issue:44485", "issue:41628|issue:45278", + "issue:44360|issue:44485", "issue:44261|issue:44373", "issue:39401|issue:44521", "issue:44625|issue:44829", "issue:43116|issue:45200", "issue:29127|issue:30990", - "issue:43122|issue:44779", "issue:44038|issue:44297", + "issue:43122|issue:44779", "issue:42371|issue:42831", "issue:42907|issue:44964", "issue:44038|issue:44295", "issue:36296|issue:43116" ], - "prepared_review_unit_hash": "2a7c1c9f98c93521a483fc2638997dc3acd8101200b807562ba029b1be17d6b7", + "prepared_review_unit_hash": "38b3928678c7f5f23a17307ff9f99ee7a160dd634c48dc13df80fdf9b2f30132", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37177, - "estimated_input_tokens": 9295, - "estimated_eval_tokens": 18846 + "soft_pair_count": 35, + "serialized_chars": 37778, + "estimated_input_tokens": 9445, + "estimated_eval_tokens": 19146 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a loose mix of unrelated Transformers bugs, mostly model-specific regressions and a few generic import/tokenizer issues. None of the soft-edge pairs look like the same underlying bug/change with enough confidence to deduplicate, though a small subset share broad v5 import-regression language.", - "confidence": 0.68, - "canonical_issue_reason": "issue:45278 is the broadest umbrella-style report in the set (many import errors after upgrading to 5.5.0) and is the best single issue to represent the generic import-regression theme, even though the cluster is not truly one duplicate group.", + "summary": "These items are mostly unrelated standalone bugs/feature issues. A few are thematically close around imports, tokenization, or generation, but none look like the same concrete underlying bug. No PRs are present.", + "confidence": 0.72, + "canonical_issue_reason": "issue:45278 is the broadest umbrella regression in the set: multiple import failures after upgrading from 4.57.0 to 5.5.0. It is the best representative of the import-regression subset, even though the overall cluster is not a single duplicate group.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the most suitable global anchor because it is broad, version-centric, and can plausibly collect related import breakage reports; it is still only a loose thematic hub, not a true duplicate canonical.", + "best_issue_reason": "issue:45278 is the best global issue candidate because it covers the widest user-facing failure mode among the issues shown and can reasonably subsume several import-related reports.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "Both are model-specific bugs, but one is an FP8 accuracy issue and the other is a Qwen3-VL-Moe loading/runtime problem. Different root causes." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "One is about a torch.split return-value bug in a DSA indexer; the other is a docstring problem for position_ids. Not the same issue." + }, { "left": "issue:42757", "right": "issue:45278", "accept": false, - "reason": "Both are import-related, but one is a specific huggingface_hub symbol failure while the other is a broad v5 upgrade report; not the same underlying bug." + "reason": "Both mention imports, but 42757 is a specific huggingface_hub symbol import failure while 45278 is a broader Transformers upgrade regression. Different bugs." }, { "left": "issue:43232", "right": "issue:45593", "accept": false, - "reason": "Generation sync/update-state bug vs. D-FINE auxiliary-loss behavior; unrelated code paths and symptoms." + "reason": "Generation state handling after sync_gpus is unrelated to D-FINE auxiliary-loss behavior when denoising is disabled." }, { "left": "issue:43756", "right": "issue:44060", "accept": false, - "reason": "Both involve model internals, but one is a RoPE-layer drop issue and the other is a tied-weights warning; different defects." + "reason": "Smollm3 RoPE-layer mismatch and Qwen3-Next tied-weights warning affect different models and code paths." }, { "left": "issue:43576", "right": "issue:43784", "accept": false, - "reason": "CLI/env command breakage vs. import-time NameError in sentence-transformers integration; not the same bug." + "reason": "A broken env CLI command is not the same as an import-time NameError in sentence-transformers." }, { - "left": "issue:43122", - "right": "issue:44521", + "left": "issue:42886", + "right": "issue:43066", "accept": false, - "reason": "Tokenizer behavior regression vs. multimodal chat-template assistant-mask bug; different API surfaces and failure modes." + "reason": "Both are tokenizer-related, but one is offline cache loading and the other is decoder-type metadata in v5. Different failure modes." }, { - "left": "issue:42886", - "right": "issue:43066", + "left": "issue:43122", + "right": "issue:44521", "accept": false, - "reason": "Offline cache loading failure vs. tokenizer decoder-type mismatch in v5; both tokenizer-related but distinct issues." + "reason": "Both involve tokenization/chat templates, but one is regression in tokenization output and the other is all-zero assistant masks for multimodal inputs." }, { "left": "issue:42371", "right": "issue:44263", "accept": false, - "reason": "TF32 settings guidance vs. a GlmMoeDsaIndexer torch.split issue; no shared code-path problem." + "reason": "TF32 configuration guidance is unrelated to a torch.split indexing bug." }, { "left": "issue:44117", "right": "issue:44704", "accept": false, - "reason": "Both touch loading helpers, but one is a tokenizer mapping None bug and the other is AutoProcessor kwarg forwarding; different defects." + "reason": "One is a TOKENIZER_MAPPING_NAMES null-handling bug; the other is AutoProcessor not forwarding kwargs to cached_file. Related loading area, but different code-paths." }, { "left": "issue:30990", "right": "issue:33357", "accept": false, - "reason": "Sentence Transformers loading hang vs. MacOS bus error on a CLIP model; different symptoms and likely causes." - }, - { - "left": "issue:44704", - "right": "issue:45290", - "accept": false, - "reason": "AutoProcessor cached-file kwarg handling vs. apply_chat_template crash on tool-call assistant messages; unrelated." + "reason": "Both are loading-related complaints, but stuck loading Sentence Transformers and a MacOS bus error on a CLIP model are different issues." }, { "left": "issue:42907", "right": "issue:44945", "accept": false, - "reason": "Saving dequantized model failure vs. incorrect output under pipeline parallelism; different subsystems." + "reason": "Saving dequantized models and pipeline-parallel incorrect outputs are unrelated bugs." }, { "left": "issue:43976", "right": "issue:44945", "accept": false, - "reason": "Python-version compatibility issue vs. pipeline-parallel output corruption; unrelated." + "reason": "Python-version support breakage is unrelated to pipeline parallelism output corruption." }, { "left": "issue:44295", "right": "issue:44297", "accept": false, - "reason": "position_ids buffer-read problem vs. tokenizer_config save mismatch; both persistence-related but not the same bug." + "reason": "A position_ids buffer read error and a tokenizer_class mismatch on save_pretrained are distinct serialization issues." }, { - "left": "issue:43232", - "right": "issue:44743", + "left": "issue:44704", + "right": "issue:45290", "accept": false, - "reason": "Generation kwargs update timing bug vs. recurrent-state reset in modular_qwen3_5; different generation failures." + "reason": "Passing kwargs to cached_file is unrelated to apply_chat_template crashing on tool-call assistant messages." }, { "left": "issue:43122", "right": "issue:44514", "accept": false, - "reason": "Tokenizer output changes across versions vs. batched apply_chat_template padding=False crash; distinct behaviors." + "reason": "Both involve tokenization/chat-template behavior, but one is output regression and the other is a batched-input padding crash. Not the same bug." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both touch generation/cache logic, but they concern different mechanisms: sync_gpus kwargs vs Qwen3.5 recurrent-state reset." }, { "left": "issue:39401", "right": "issue:44488", "accept": false, - "reason": "Qwen3 tokenizer offset mapping bug vs. model loading failure for a specific sentence-transformer; unrelated." + "reason": "Tokenizer offset_mapping mismatch and failure to load a specific model are unrelated." }, { "left": "issue:43408", "right": "issue:43475", "accept": false, - "reason": "Auto-model class warning vs. missing attribute on Sam3VisionEncoderOutput; both SAM3-related but not the same issue." + "reason": "A model-type warning and a missing attribute on Sam3VisionEncoderOutput are separate SAM3 issues." }, { - "left": "issue:43232", - "right": "issue:45245", + "left": "issue:42907", + "right": "issue:43937", "accept": false, - "reason": "Generation sync bug vs. categories limit runtime error; no shared underlying defect." + "reason": "Dequantized-save failures and invalid GenerationConfig errors are unrelated." }, { - "left": "issue:42907", - "right": "issue:43937", + "left": "issue:43232", + "right": "issue:45245", "accept": false, - "reason": "Dequantized save failure vs. invalid GenerationConfig for GLM-5; unrelated." + "reason": "Generation kwargs handling has nothing to do with the category-count runtime limit." }, { "left": "issue:43784", "right": "issue:45600", "accept": false, - "reason": "Import NameError in sentence-transformers vs. auto_mappings referencing removed Sam3LiteText configs; different packages and failures." + "reason": "An import NameError in sentence-transformers is unrelated to stale auto_mappings references breaking CI." }, { - "left": "issue:44360", - "right": "issue:44485", + "left": "issue:41628", + "right": "issue:45278", "accept": false, - "reason": "DSA ReLU concern vs. GLM-5 RoPE implementation; unrelated model math issues." + "reason": "Both are import regressions, but 41628 is a specific AutoImageProcessor export failure while 45278 is a broader post-upgrade import breakage. Not the same concrete bug." }, { - "left": "issue:41628", - "right": "issue:45278", + "left": "issue:44360", + "right": "issue:44485", "accept": false, - "reason": "Specific missing AutoImageProcessor import vs. broad import-error report; too generic to be the same underlying bug." + "reason": "The DSA ReLU discussion and GLM-5 RoPE implementation are different model/backend topics." }, { "left": "issue:44261", "right": "issue:44373", "accept": false, - "reason": "LayerNorm epsilon precision bug vs. a docstring issue for position_ids; not a duplicate." + "reason": "Missing rms_norm_eps in MLA q_a_layernorm is unrelated to a wrong docstring for position_ids." }, { "left": "issue:39401", "right": "issue:44521", "accept": false, - "reason": "Tokenizer offset mapping bug vs. multimodal assistant_masks bug; different tokenizer behaviors." + "reason": "Qwen3 offset_mapping and multimodal assistant_masks are different tokenizer/template issues." }, { "left": "issue:44625", "right": "issue:44829", "accept": false, - "reason": "num_labels propagation in Qwen3.5 vs. flash_attention_3 degenerate training; separate model/training issues." + "reason": "num_labels propagation in Qwen3.5 is unrelated to degenerate training with flash_attention_3." }, { "left": "issue:43116", "right": "issue:45200", "accept": false, - "reason": "Example-script multi-label classification bug vs. Gemma 4 mm_token_type_ids defaulting; unrelated." + "reason": "Multi-label classification returning empty results in an example script is unrelated to Gemma 4 mm_token_type_ids defaults." }, { "left": "issue:29127", "right": "issue:30990", "accept": false, - "reason": "LayoutLMv3 box-validation error message vs. Sentence Transformers loading hang; unrelated." + "reason": "LayoutLMv3 error messaging and Sentence Transformers loading hang are not the same bug." }, { - "left": "issue:43122", - "right": "issue:44779", + "left": "issue:44038", + "right": "issue:44297", "accept": false, - "reason": "Both are tokenizer regressions across versions, but they concern different models and symptoms; not clearly the same bug." + "reason": "Qwen3-VL-Moe behavior and tokenizer_class mismatch on save_pretrained are unrelated." }, { - "left": "issue:44038", - "right": "issue:44297", + "left": "issue:43122", + "right": "issue:44779", "accept": false, - "reason": "Qwen3-VL-Moe load bug vs. tokenizer_class mismatch on save; unrelated." + "reason": "Both are tokenizer regressions across versions, but they affect different models and likely different code paths. Too broad to merge." }, { "left": "issue:42371", "right": "issue:42831", "accept": false, - "reason": "TF32 control settings vs. FineGrainedFP8 accuracy issue; both precision-related but not the same code-path problem." - }, - { - "left": "issue:42907", - "right": "issue:44964", - "accept": false, - "reason": "Saving dequantized models vs. loading Phi-4 multimodal instruct; distinct failures." + "reason": "TF32 settings guidance and FineGrainedFP8 accuracy loss are different precision-related problems." }, { "left": "issue:36296", "right": "issue:43116", "accept": false, - "reason": "Tensor-parallel training bug vs. example-script classification output bug; unrelated." + "reason": "Tensor parallel training bugs and a multi-label example script issue are unrelated." } ] }, @@ -11819,6 +11782,7 @@ "issue:44075", "issue:44117", "issue:44222", + "issue:44291", "issue:44351", "issue:44485", "issue:44704", @@ -11826,20 +11790,19 @@ "issue:44936", "issue:44945", "issue:44977", + "issue:45071", "issue:45072", "issue:45083", "issue:45216", "issue:45278", - "issue:45341", "issue:45446", "issue:45478", - "issue:45561", "issue:45563", "issue:45588" ], "soft_pairs": [ - "issue:42907|issue:43452", "issue:44038|issue:44222", + "issue:42907|issue:43452", "issue:43824|issue:44351", "issue:45216|issue:45478", "issue:42757|issue:44351", @@ -11848,18 +11811,18 @@ "issue:43278|issue:44945", "issue:43122|issue:44977", "issue:43784|issue:45446", - "issue:42222|issue:43441", - "issue:43937|issue:44485", "issue:38617|issue:45278", + "issue:43937|issue:44485", "issue:42673|issue:45072", - "issue:43064|issue:43673", + "issue:42222|issue:43441", "issue:22355|issue:29127", "issue:42371|issue:43874", + "issue:43064|issue:43673", "issue:43116|issue:43493", - "issue:44075|issue:44936", "issue:43976|issue:45446", "issue:44038|issue:45478", "issue:44222|issue:45478", + "issue:44075|issue:44936", "issue:43116|issue:43825", "issue:43937|issue:45563", "issue:45083|issue:45446", @@ -11869,228 +11832,228 @@ "issue:44117|issue:45588", "issue:43827|issue:45446", "issue:43937|issue:44977", - "issue:45341|issue:45561" + "issue:44291|issue:45071" ], - "prepared_review_unit_hash": "66590a882974b2adcd7acb7f6ea7eab2a835e09c82ddfadc8f74a4019b70aa27", + "prepared_review_unit_hash": "06c8d924556da6a0a0e16ff60dc0ce9f8d224af52d23ab6c04f9a5d23478dc4b", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 32, - "serialized_chars": 36858, - "estimated_input_tokens": 9215, - "estimated_eval_tokens": 18686 + "serialized_chars": 36939, + "estimated_input_tokens": 9235, + "estimated_eval_tokens": 18726 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are mostly distinct issue reports. The proposed soft links are broad thematic similarities (imports, tokenization, training, MoE, testing), but they do not look like the same concrete bug or change, so none should be merged as duplicates.", - "confidence": 0.61, - "canonical_issue_reason": "issue:45278 is the broadest umbrella report in the set, covering multiple import failures after an upgrade, so it is the closest thing to a cluster representative.", + "summary": "No soft edge looks like the same underlying bug/change. The set is a heterogeneous mix of import failures, tokenizer regressions, model-specific loading/saving issues, training problems, and docs/error-message fixes.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the cluster does not converge on one bug theme closely enough to represent as a duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the most generic and reusable issue here; it can plausibly anchor related import-regression reports, even though the rest of the set is not actually duplicate-heavy.", + "best_issue_reason": "No single issue is a good global representative; the broadest candidate is still just an umbrella import-error report, not the same problem as the rest.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:42907", - "right": "issue:43452", + "left": "issue:44038", + "right": "issue:44222", "accept": false, - "reason": "Different bugs: dequantized-save failure vs gguf loading/tokenizer/model-from-pretrained failure." + "reason": "Both mention MoE/Qwen, but one is a model loading bug and the other is FP8 save_pretrained behavior; different code paths." }, { - "left": "issue:44038", - "right": "issue:44222", + "left": "issue:42907", + "right": "issue:43452", "accept": false, - "reason": "Both involve MoE models, but one is a Qwen3-VL-Moe bug and the other is an FP8 save_pretrained issue; not the same code path." + "reason": "Different failures: saving dequantized Ministral/Devstral models vs gguf_file breaking AutoTokenizer/AutoModel loading." }, { "left": "issue:43824", "right": "issue:44351", "accept": false, - "reason": "Different missing symbols and different import failures; same broad category, not the same bug." + "reason": "Both are import errors, but for different missing symbols and different causes." }, { "left": "issue:45216", "right": "issue:45478", "accept": false, - "reason": "One is a save_pretrained checkpoint regression, the other a from_pretrained loading error for a different Qwen3.5-Moe path." + "reason": "Same model family/version area, but one is save_pretrained checkpoint regression and the other is from_pretrained load failure." }, { "left": "issue:42757", "right": "issue:44351", "accept": false, - "reason": "Both are import errors, but for different names (`is_offline_mode` vs `HybridCache`) and unrelated breakages." + "reason": "Missing is_offline_mode from huggingface_hub is unrelated to missing HybridCache from transformers." }, { "left": "issue:43992", "right": "issue:44704", "accept": false, - "reason": "UMT5 weight loading and AutoProcessor cached_file kwargs are unrelated API bugs." + "reason": "UMT5Encoder weight-loading issue and AutoProcessor cached_file kwargs forwarding are separate loading bugs." }, { "left": "issue:42907", "right": "issue:44222", "accept": false, - "reason": "Both touch saving, but they concern different models and different save-time failures." + "reason": "Both involve saving, but one is dequantized Ministral/Devstral output and the other is FP8 MoE save_pretrained." }, { "left": "issue:43278", "right": "issue:44945", "accept": false, - "reason": "A dtype change between train/eval is not the same bug as incorrect output under pipeline parallelism." + "reason": "Embedding dtype drift during evaluation is not the same as incorrect output from pipeline parallelism." }, { "left": "issue:43122", "right": "issue:44977", "accept": false, - "reason": "Tokenizer behavior regression vs flash-attention generation failure; different subsystems and symptoms." + "reason": "Tokenizer-version behavior change is unrelated to a flash-attention generation failure." }, { "left": "issue:43784", "right": "issue:45446", "accept": false, - "reason": "Unrelated import-time failures in different modules; one is sentence-transformers, the other flex_attention version gating." + "reason": "Import NameError in sentence-transformers setup is unrelated to a PyTorch version check bug in flex_attention." }, { - "left": "issue:42222", - "right": "issue:43441", + "left": "issue:38617", + "right": "issue:45278", "accept": false, - "reason": "Different model families and failures: vitpose breakage vs Ministral flash-attention regression." + "reason": "Generic 'many import errors' is too broad to be the same concrete bug as one specific missing import." }, { "left": "issue:43937", "right": "issue:44485", "accept": false, - "reason": "One is a GenerationConfig validation error; the other is a discussion about GLM-5 RoPE implementation." - }, - { - "left": "issue:38617", - "right": "issue:45278", - "accept": false, - "reason": "A specific missing import is not the same as a broad report of many import errors after upgrade." + "reason": "GLM-5 generation config validation and GLM-5 RoPE implementation are different problems." }, { "left": "issue:42673", "right": "issue:45072", "accept": false, - "reason": "VRAM leak in threaded Qwen3ForCausalLM usage vs CI dtype mismatch in inference; unrelated bugs." + "reason": "VRAM leak in multi-threaded loading and bfloat16 dtype mismatch in inference are unrelated." }, { - "left": "issue:43064", - "right": "issue:43673", + "left": "issue:42222", + "right": "issue:43441", "accept": false, - "reason": "Wrong optimizer state on FSDP2/PEFT vs missing GenerationMixin cache; different training/generation paths." + "reason": "Vitpose breakage and Ministral-3 FlashAttention failure affect different models and different failure points." }, { "left": "issue:22355", "right": "issue:29127", "accept": false, - "reason": "Completely different issues: missing `transformers.onnx` module vs LayoutLMv3 error-message clarity." + "reason": "ONNX import failure and LayoutLMv3 error-message clarity are unrelated issues." }, { "left": "issue:42371", "right": "issue:43874", "accept": false, - "reason": "TF32 settings guidance and a missing image-processor method are unrelated." + "reason": "TF32 API settings guidance is not the same bug as a missing image-processor method." }, { - "left": "issue:43116", - "right": "issue:43493", + "left": "issue:43064", + "right": "issue:43673", "accept": false, - "reason": "Example-script multi-label output bug vs SigLIP2 HF/JAX discrepancy; not the same failure." + "reason": "Wrong optimizer states under FSDP2/PEFT training and missing cache in chunked_prefill are distinct bugs." }, { - "left": "issue:44075", - "right": "issue:44936", + "left": "issue:43116", + "right": "issue:43493", "accept": false, - "reason": "Optimizer SGD args not used is unrelated to trainer.evaluate failing after trainer.train()." + "reason": "Example-script multi-label output bug and SigLIP2 HF-vs-JAX discrepancy are different code paths." }, { "left": "issue:43976", "right": "issue:45446", "accept": false, - "reason": "Python-version compatibility regression is unrelated to a PyTorch version check for flex_attention import." + "reason": "Python version compatibility breakage is unrelated to an AuxRequest import version-check bug." }, { "left": "issue:44038", "right": "issue:45478", "accept": false, - "reason": "Different Qwen model variants and different failure modes; not one shared underlying bug." + "reason": "Both are Qwen MoE-related, but one is a VL-Moe loading issue and the other is a Qwen3.5 from_pretrained error." }, { "left": "issue:44222", "right": "issue:45478", "accept": false, - "reason": "Save_pretrained FP8 issue vs from_pretrained error; same family, but not the same concrete bug." + "reason": "FP8 save_pretrained for MoE and Qwen3.5 from_pretrained are different operations and likely different defects." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "SGD args not used and trainer.evaluate failing after train are not the same underlying trainer bug." }, { "left": "issue:43116", "right": "issue:43825", "accept": false, - "reason": "Example-script empty results and a pipeline() error message issue are unrelated." + "reason": "A classification example returning empty results is unrelated to a pipeline() deprecation/error-message issue." }, { "left": "issue:43937", "right": "issue:45563", "accept": false, - "reason": "GenerationConfig invalid vs stale warning in paged generate are different generation bugs." + "reason": "Invalid GenerationConfig and a stale warning for num_return_sequences are different warning/validation paths." }, { "left": "issue:45083", "right": "issue:45446", "accept": false, - "reason": "A helper-function behavior issue in qwen3_omni_moe is unrelated to a flex_attention import/version check." + "reason": "Qwen3_omni_moe helper behavior and flex_attention import version checking are unrelated." }, { "left": "issue:33453", "right": "issue:39401", "accept": false, - "reason": "Both are tokenizer regressions, but one is loading regression and the other is incorrect offset_mapping." + "reason": "Tokenizer loading regression is too broad to be the same as a specific Qwen3 offset_mapping bug." }, { "left": "issue:39401", "right": "issue:44779", "accept": false, - "reason": "Different tokenizer bugs in different models; similarity is only at the subsystem level." + "reason": "Different tokenizer models and symptoms: Qwen3 offset mapping versus Deepseek tokenization drift." }, { "left": "issue:43931", "right": "issue:44945", "accept": false, - "reason": "Model weight-shape loading mismatch is unrelated to incorrect outputs from pipeline parallelism." + "reason": "Model weight shape mismatch on load is not the same as incorrect outputs under pipeline parallelism." }, { "left": "issue:44117", "right": "issue:45588", "accept": false, - "reason": "Tokenizer mapping fallback bug and flash_attention crash on `s_aux=None` are different code paths." + "reason": "Tokenizer mapping fallback bug and flash_attention crash on s_aux=None are unrelated." }, { "left": "issue:43827", "right": "issue:45446", "accept": false, - "reason": "Docs still referencing pipeline() and a flex_attention import check are unrelated." + "reason": "Docs still referencing pipeline() and a PyTorch version-check bug do not share the same underlying code path." }, { "left": "issue:43937", "right": "issue:44977", "accept": false, - "reason": "GenerationConfig validation error vs flash-attention generation failure for Qwen3.5 are different bugs." + "reason": "GLM-5 GenerationConfig invalidity and Qwen3.5 flash-attention generation failure are different model-specific bugs." }, { - "left": "issue:45341", - "right": "issue:45561", + "left": "issue:44291", + "right": "issue:45071", "accept": false, - "reason": "A generic testing_utils bug and a pytest-xdist race in captured_info.txt are not the same underlying issue." + "reason": "init_empty_weights argument handling and PretrainedConfig type-checking are separate compatibility issues." } ] }, @@ -12141,7 +12104,6 @@ "issue:44117", "issue:44220", "issue:44261", - "issue:44291", "issue:44410", "issue:44483", "issue:44484", @@ -12149,7 +12111,6 @@ "issue:44496", "issue:44829", "issue:44933", - "issue:45071", "issue:45081", "issue:45216", "issue:45278", @@ -12161,17 +12122,18 @@ "issue:45464", "issue:45478", "issue:45542", + "issue:45561", "issue:45656" ], "soft_pairs": [ - "issue:44291|issue:45071", "issue:45405|issue:45446", + "issue:45341|issue:45561", "issue:43450|issue:43986", "issue:42673|issue:43906", "issue:44829|issue:45216", - "issue:39401|issue:43421", "issue:44410|issue:45310", "issue:38617|issue:41628", + "issue:39401|issue:43421", "issue:44117|issue:45081", "issue:44484|issue:45310", "issue:42371|issue:44261", @@ -12179,206 +12141,230 @@ "issue:43408|issue:43742", "issue:42673|issue:43582", "issue:43937|issue:44496", - "issue:42673|issue:43577", "issue:42175|issue:42222", + "issue:42673|issue:43577", "issue:38617|issue:44220", "issue:44933|issue:45278", "issue:42617|issue:45341", + "issue:43012|issue:43577", "issue:44075|issue:45656", "issue:44483|issue:45464", - "issue:43012|issue:43577", - "issue:43673|issue:45341", "issue:43066|issue:43122", + "issue:43673|issue:45341", "issue:44410|issue:45478", - "issue:44484|issue:45478", "issue:43874|issue:44261", - "issue:42222|issue:45459", - "issue:43097|issue:44493" + "issue:44484|issue:45478", + "issue:43097|issue:44493", + "issue:42222|issue:45459" ], - "prepared_review_unit_hash": "563912df192ae53e12293361fa08c2986d8dd763b014c7e320569d8350193688", + "prepared_review_unit_hash": "74f5b387c4bfc2e2c47879f5eb8659ef2b2c83beb01616c6aaeeeaf2d6c87b33", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 48, - "item_count": 48, + "node_count": 47, + "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36526, - "estimated_input_tokens": 9132, - "estimated_eval_tokens": 18520 + "serialized_chars": 35922, + "estimated_input_tokens": 8981, + "estimated_eval_tokens": 18218 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: most items are unrelated regressions, import errors, tokenizer/model bugs, or API-specific failures. I did not find any soft pair that is convincingly the same concrete bug, so no duplicates should be merged here.", - "confidence": 0.9, - "canonical_issue_reason": "Issue 45278 is the closest thing to an umbrella tracker because it is the broadest open report in the set and explicitly covers multiple import errors after an upgrade.", + "summary": "The cluster is mostly a loose collection of unrelated Transformers issues. The soft pairs are thematically similar in a few cases, but none are strong enough to be treated as the same underlying bug/change.", + "confidence": 0.79, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Issue 45278 is the best issue to keep as the cluster\u2019s representative because it is broad, open, and version-regression oriented, but it is still only a weak fit since the rest of the cluster is mostly unrelated.", + "best_issue_reason": "issue:45278 is the broadest representative symptom in this set: a generic post-upgrade import regression affecting many symbols.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44291", - "right": "issue:45071", + "left": "issue:45405", + "right": "issue:45446", "accept": false, - "reason": "Different failure modes: `init_empty_weights` gets an unexpected internal arg, while the other is a `PretrainedConfig` type-checking regression." + "reason": "Different problems: a PEFT version pin issue vs a PyTorch version check bug in flex_attention." }, { - "left": "issue:45405", - "right": "issue:45446", + "left": "issue:45341", + "right": "issue:45561", "accept": false, - "reason": "One is a dependency version bump problem; the other is a PyTorch version gate for `flex_attention`. Not the same bug." + "reason": "Same file area, but different bugs: a testing_utils bug vs an xdist race on captured_info.txt." }, { "left": "issue:43450", "right": "issue:43986", "accept": false, - "reason": "Batched video processor shape bug vs. AutoProcessor crash without torchvision. Different code paths." + "reason": "One is batched video processor shape handling; the other is an AutoProcessor crash without torchvision." }, { "left": "issue:42673", "right": "issue:43906", "accept": false, - "reason": "VRAM leak in multithreaded dataloaders is not the same concrete issue as an isolated reproduction of another bug." + "reason": "No clear same bug: a Qwen3 VRAM leak vs an isolated reproduction of another issue." }, { "left": "issue:44829", "right": "issue:45216", "accept": false, - "reason": "Flash-attention training degeneration and Qwen3.5 save_pretrained checkpoint corruption are different problems." - }, - { - "left": "issue:39401", - "right": "issue:43421", - "accept": false, - "reason": "Wrong tokenizer offset mapping is unrelated to runtime special-token/post-processor update behavior." + "reason": "Different code paths: flash_attention_3 training degeneration vs a Qwen3.5 save_pretrained checkpoint regression." }, { "left": "issue:44410", "right": "issue:45310", "accept": false, - "reason": "Missing qwen3next projections vs. Qwen3.5 MoE from_pretrained failure; different model and failure point." + "reason": "Different Qwen variants and failures: missing projections in qwen3next vs from_pretrained error in Qwen3.5 MoE." }, { "left": "issue:38617", "right": "issue:41628", "accept": false, - "reason": "Both are import errors, but they concern different missing symbols from different modules." + "reason": "Both are import errors, but for different missing symbols and different code paths." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Tokenizer offset mapping bug vs runtime special-token/post-processor sync bug; not the same defect." }, { "left": "issue:44117", "right": "issue:45081", "accept": false, - "reason": "Tokenizer mapping `None` handling and Mistral regex patch crash are separate tokenizer subsystems and bugs." + "reason": "Different tokenizer failures: a None mapping assumption vs a Mistral regex patch crash." }, { "left": "issue:44484", "right": "issue:45310", "accept": false, - "reason": "Chat/completions request handling and Qwen3.5 MoE `from_pretrained` loading are not the same concrete bug." + "reason": "Unrelated: save_pretrained shard-size question vs a Qwen3.5 MoE from_pretrained error." }, { "left": "issue:42371", "right": "issue:44261", "accept": false, - "reason": "TF32 settings warning is unrelated to the MLA `rms_norm_eps` precision issue." + "reason": "TF32 API guidance is unrelated to the MLA q_a_layernorm precision/config issue." }, { "left": "issue:42222", "right": "issue:45542", "accept": false, - "reason": "VitPose model breakage and missing TensorFlow backend from tensorboard-only install are unrelated." + "reason": "Different backend problems: vitpose model breakage vs TensorFlow missing from a torch-only install." }, { "left": "issue:43408", "right": "issue:43742", "accept": false, - "reason": "SAM3 model-type warning vs. MobileLLM key error; different model-loading issues." + "reason": "Different model-loading issues: sam3_video/sam3_tracker mismatch vs MobileLLM key error." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Backend packaging issue vs a broken vitpose model family; not the same bug." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "Qwen3 VRAM leak and Apple Silicon TypeError in caching_allocator_warmup are unrelated." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "GenerationConfig validation failure vs unrecognized model/config.json error." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "VRAM leak in dataloader threads is not the same as dtype remaining float32 on BLIP2 load." }, { "left": "issue:38617", "right": "issue:44220", "accept": false, - "reason": "ImportError for a missing symbol is unrelated to `_torch_extract_fbank_features()` behavior." + "reason": "An import-name regression is unrelated to _torch_extract_fbank_features() behavior." }, { "left": "issue:44933", "right": "issue:45278", "accept": false, - "reason": "45278 is a broad import-error umbrella, but 44933 is a specific missing `image_utils` import; not the same concrete bug." + "reason": "A specific missing import is not enough to merge with a broader many-import-errors regression." }, { "left": "issue:42617", "right": "issue:45341", "accept": false, - "reason": "Failure to run `3d_parallel.py` is unrelated to a bug in `testing_utils.py`." + "reason": "Not the same area or bug: 3d_parallel.py failure vs a small testing_utils issue." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "Both touch dtype/precision, but one is a warning during compile and the other is incorrect model dtypes on load." }, { "left": "issue:44075", "right": "issue:45656", "accept": false, - "reason": "SGD args being ignored and optimizer step being called twice under deepspeed are different optimizer bugs." + "reason": "Different optimizer defects: SGD args ignored vs optimizer.step being called twice with deepspeed." }, { "left": "issue:44483", "right": "issue:45464", "accept": false, - "reason": "Both mention chat/completions on Qwen3.5, but one is a general request-acceptance failure and the other is streaming inference; not enough to treat as the same bug." + "reason": "A chat/completions request rejection is not the same as a streaming inference failure on Qwen3.5-0.8B." }, { - "left": "issue:43012", - "right": "issue:43577", + "left": "issue:43066", + "right": "issue:43122", "accept": false, - "reason": "PyTorch warning during bfloat16 compile and BLIP2 dtype propagation remaining float32 are distinct dtype issues." + "reason": "Both tokenizer regressions, but one is a decoder-type mismatch and the other is changed tokenization output." }, { "left": "issue:43673", "right": "issue:45341", "accept": false, - "reason": "Missing GenerationMixin cache in v5.0.0 and a `testing_utils.py` bug are unrelated." + "reason": "GenerationMixin cache regression is unrelated to a testing_utils bug." }, { - "left": "issue:43066", - "right": "issue:43122", + "left": "issue:44410", + "right": "issue:45478", "accept": false, - "reason": "Wrong tokenizer decoder type and changed tokenization across versions are related at a high level, but not clearly the same underlying bug." + "reason": "Different Qwen3.5/Qwen3-next failures; no evidence they share one concrete code path." }, { - "left": "issue:44410", - "right": "issue:45478", + "left": "issue:43874", + "right": "issue:44261", "accept": false, - "reason": "qwen3next missing projections vs. Qwen3.5 MoE `from_pretrained` error are different model-loading problems." + "reason": "Image-patch counting bug vs MLA rms_norm_eps precision issue; unrelated." }, { "left": "issue:44484", "right": "issue:45478", "accept": false, - "reason": "Chat/completions API failure and Qwen3.5 MoE loading error are separate concrete code paths." + "reason": "save_pretrained shard-size discussion is unrelated to a Qwen3.5 MoE loading error." }, { - "left": "issue:43874", - "right": "issue:44261", + "left": "issue:43097", + "right": "issue:44493", "accept": false, - "reason": "Missing image-patch helper and MLA RMS norm epsilon issue are unrelated multimodal/model bugs." + "reason": "Removed tie_embeddings API vs unexpected position-id key warnings are different regressions." }, { "left": "issue:42222", "right": "issue:45459", "accept": false, - "reason": "VitPose breakage is unrelated to protobuf error handling that masks tokenizer exceptions." - }, - { - "left": "issue:43097", - "right": "issue:44493", - "accept": false, - "reason": "Removed `tie_embeddings_and_encoder_decoder` and unexpected position-id keys are distinct regressions." + "reason": "Vitpose model breakage and protobuf-related tokenizer error hiding are unrelated defects." } ] }, @@ -12452,8 +12438,8 @@ "issue:45588" ], "soft_pairs": [ - "issue:43441|issue:44829", "issue:44263|issue:44485", + "issue:43441|issue:44829", "issue:43012|issue:43408", "issue:41628|issue:45071", "issue:45071|issue:45542", @@ -12462,291 +12448,291 @@ "issue:45003|issue:45341", "issue:43931|issue:44038", "issue:43986|issue:45341", - "issue:43097|issue:43452", "issue:43335|issue:45071", "issue:44117|issue:44987", + "issue:43097|issue:43452", "issue:43874|issue:44263", "issue:43408|issue:44368", "issue:43976|issue:45071", "issue:43531|issue:45341", "issue:44829|issue:45245", "issue:43986|issue:45372", - "issue:43064|issue:44315", "issue:38617|issue:42757", "issue:44261|issue:44485", + "issue:43064|issue:44315", "issue:44410|issue:45375", "issue:44038|issue:44410", "issue:38617|issue:44351", "issue:44062|issue:44117", - "issue:44704|issue:45103", "issue:43097|issue:45588", - "issue:43986|issue:44117", + "issue:44704|issue:45103", "issue:43502|issue:43992", + "issue:43986|issue:44117", "issue:43824|issue:43986", "issue:43097|issue:43873", - "issue:43352|issue:44380", "issue:43066|issue:43906", + "issue:43352|issue:44380", "issue:43825|issue:44655", "issue:43992|issue:44484", "issue:38617|issue:43097", "issue:43012|issue:44368" ], - "prepared_review_unit_hash": "33384891a8bd5b3839455d199d713674e7dd3d924419e688ef423399c25e1ce3", + "prepared_review_unit_hash": "c5b8db053f72d16ccd60ce79e1893474d2a5304d9ebdca737ac0f5777d35d1fe", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 38, - "serialized_chars": 39093, - "estimated_input_tokens": 9774, - "estimated_eval_tokens": 19804 + "serialized_chars": 39092, + "estimated_input_tokens": 9773, + "estimated_eval_tokens": 19802 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab bag of unrelated Transformers regressions: import errors, model-specific loading failures, backend warnings, tokenizer/config quirks, and a few training/runtime issues. No soft pair looks like the same concrete bug or change, so I would not merge any of them as duplicates.", - "confidence": 0.57, - "canonical_issue_reason": "No single true duplicate emerges; if forced to pick an anchor, issue 45071 is the broadest core-library regression and the most representative release-level bug in the set.", + "summary": "These items are mostly distinct regressions and import/runtime issues across different models, backends, and utilities; I did not find any soft-edge pair that looks like the same underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43502 is the most broadly relevant and well-documented regression in the set, with substantial discussion and references around a clear loading behavior problem.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45071 is the best general triage anchor because it describes a core API regression with broad impact, but it is still not a duplicate target for the rest of the cluster.", + "best_issue_reason": "issue:43502 has the clearest reproduction and strongest user impact, making it the best representative issue despite the cluster being mostly unrelated.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43441", - "right": "issue:44829", + "left": "issue:44263", + "right": "issue:44485", "accept": false, - "reason": "Both involve FlashAttention, but one is a Ministral-3 support error and the other is a sequence-classification training degeneration; different bugs and code paths." + "reason": "Different GLM-related problems: one is a torch.split return-value issue, the other is a RoPE implementation question." }, { - "left": "issue:44263", - "right": "issue:44485", + "left": "issue:43441", + "right": "issue:44829", "accept": false, - "reason": "GlmMoeDsaIndexer torch.split handling and GLM-5 RoPE implementation are unrelated subsystems." + "reason": "Both involve attention backends, but they affect different models and failure modes; not the same code-path bug." }, { "left": "issue:43012", "right": "issue:43408", "accept": false, - "reason": "A PyTorch bfloat16 warning during compile is unrelated to the sam3_video/sam3_tracker model-type mismatch warning." + "reason": "Unrelated symptoms: a bfloat16 precision warning versus a wrong model-type warning during SAM loading." }, { "left": "issue:41628", "right": "issue:45071", "accept": false, - "reason": "Missing AutoImageProcessor import and PretrainedConfig type-checking breakage are different API surfaces." + "reason": "An import failure for AutoImageProcessor is unrelated to PretrainedConfig type-checking behavior." }, { "left": "issue:45071", "right": "issue:45542", "accept": false, - "reason": "Config type checking in Transformers v5 is unrelated to the undefined tf backend error from tensorboard-only installs." + "reason": "One is a type-checking regression; the other is a TensorFlow/backend detection error." }, { "left": "issue:43931", "right": "issue:44410", "accept": false, - "reason": "Qwen3-VL-30B weight-shape mismatch and qwen3next missing projections are different model-loading failures." + "reason": "Different Qwen model families and different loading failures: shape mismatch versus missing projections." }, { "left": "issue:43986", "right": "issue:44484", "accept": false, - "reason": "AutoProcessor crashing without torchvision is unrelated to the max_shard_size default in save_pretrained()." + "reason": "AutoProcessor/torchvision crash is unrelated to save_pretrained shard-size behavior." }, { "left": "issue:45003", "right": "issue:45341", "accept": false, - "reason": "Unsafe sys.modules access and a testing_utils bug are different code paths with no shared underlying defect." + "reason": "No evidence these are the same bug; one concerns sys.modules access, the other a testing utility issue." }, { "left": "issue:43931", "right": "issue:44038", "accept": false, - "reason": "Both are Qwen-related, but one is a VL-30B shape mismatch and the other is a Qwen3-VL-Moe issue; not the same bug." + "reason": "Both are Qwen-related, but the concrete failures and affected model variants differ." }, { "left": "issue:43986", "right": "issue:45341", "accept": false, - "reason": "A video-model AutoProcessor dependency crash and a testing_utils issue do not share the same failure." - }, - { - "left": "issue:43097", - "right": "issue:43452", - "accept": false, - "reason": "Removal of tie_embeddings_and_encoder_decoder is unrelated to gguf_file breaking AutoTokenizer/AutoModelForCausalLM loading." + "reason": "Different subsystems and different failure classes; not a duplicate." }, { "left": "issue:43335", "right": "issue:45071", "accept": false, - "reason": "SwitchTransformers sparse-layer creation and PretrainedConfig type checking are distinct regressions." + "reason": "A SwitchTransformers config bug is unrelated to PretrainedConfig typing changes." }, { "left": "issue:44117", "right": "issue:44987", "accept": false, - "reason": "TOKENIZER_MAPPING_NAMES returning None and loading physical-intelligence/fast are different tokenizer/loading problems." + "reason": "Tokenizer mapping None-handling and a physical-intelligence model load failure are not the same underlying issue." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "One is a removed config option warning, the other is a gguf tokenizer/model loading breakage." }, { "left": "issue:43874", "right": "issue:44263", "accept": false, - "reason": "Missing image-processor method in GLM46V and a torch.split return-value issue in GlmMoeDsaIndexer are unrelated." + "reason": "Different code paths: multimodal token counting versus GlmMoeDsaIndexer splitting behavior." }, { "left": "issue:43408", "right": "issue:44368", "accept": false, - "reason": "A sam3 model-type warning and a tie_word_embeddings warning in Qwen3.5 are different warnings from different models." + "reason": "Both are warning-related, but they concern different model families and different configuration checks." }, { "left": "issue:43976", "right": "issue:45071", "accept": false, - "reason": "Python version compatibility for Transformers 5.1.0 is not the same as the PretrainedConfig type-check regression." + "reason": "Python-version compatibility and config type-checking are unrelated regressions." }, { "left": "issue:43531", "right": "issue:45341", "accept": false, - "reason": "A Qwen3-MoE sliding_window bug and a testing_utils bug are unrelated." + "reason": "A sliding_window model bug is unrelated to a testing utility issue." }, { "left": "issue:44829", "right": "issue:45245", "accept": false, - "reason": "FlashAttention-3 degenerate training and a category-count RuntimeError are entirely different failures." + "reason": "FlashAttention training degeneration and a category-count runtime limit are unrelated." }, { "left": "issue:43986", "right": "issue:45372", "accept": false, - "reason": "Missing torchvision for AutoProcessor loading and missing ReasoningEffort from mistral_common are different import/dependency issues." - }, - { - "left": "issue:43064", - "right": "issue:44315", - "accept": false, - "reason": "Wrong optimizer state placement under FSDP2/PEFT is unrelated to Liger Kernel not being applied with model_init." + "reason": "Missing torchvision for video models is unrelated to a mistral_common import breakage." }, { "left": "issue:38617", "right": "issue:42757", "accept": false, - "reason": "These are both import errors, but they miss different symbols from different packages." + "reason": "Both are import errors, but they come from different packages and different missing symbols." }, { "left": "issue:44261", "right": "issue:44485", "accept": false, - "reason": "MLA RMS-norm epsilon precision discussion and GLM-5 RoPE implementation are different model-design issues." + "reason": "A missing rms_norm_eps config field is not the same as a RoPE implementation discussion." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "These are different training-stack issues: FSDP2/optimizer-state corruption versus Liger Kernel application." }, { "left": "issue:44410", "right": "issue:45375", "accept": false, - "reason": "Missing qkv/gate projections in qwen3next and a missing config field in Qwen3_5MoeVisionConfig are different bugs." + "reason": "Different Qwen vision config/model issues with different missing fields and failure modes." }, { "left": "issue:44038", "right": "issue:44410", "accept": false, - "reason": "Qwen3-VL-Moe and qwen3next layer-projection issues are different model families and failure modes." + "reason": "Both mention Qwen3 variants, but one is a general v5/moe bug and the other is missing layer projections." }, { "left": "issue:38617", "right": "issue:44351", "accept": false, - "reason": "layer_type_validation and HybridCache are different missing imports, so these are not duplicates." + "reason": "Different import errors involving different missing names and modules." }, { "left": "issue:44062", "right": "issue:44117", "accept": false, - "reason": "AddedToken special-argument duplication and TOKENIZER_MAPPING_NAMES returning None are unrelated tokenizer bugs." + "reason": "A token construction TypeError is unrelated to tokenizer mapping assumptions." + }, + { + "left": "issue:43097", + "right": "issue:45588", + "accept": false, + "reason": "A removed embedding-tie option warning is unrelated to flash_attention sink handling." }, { "left": "issue:44704", "right": "issue:45103", "accept": false, - "reason": "AutoProcessor kwargs propagation and auto_docstring future-annotations crashes are different subsystems." + "reason": "Both touch argument handling, but one is AutoProcessor kwargs propagation and the other is auto_docstring annotation parsing." }, { - "left": "issue:43097", - "right": "issue:43873", + "left": "issue:43502", + "right": "issue:43992", "accept": false, - "reason": "A removed tie_embeddings API and quantization/offloading behavior are not the same underlying issue." + "reason": "Local-files-only network requests and a missing embed_tokens weight are different loading bugs." }, { "left": "issue:43986", "right": "issue:44117", "accept": false, - "reason": "Video-model loading without torchvision and tokenizer mapping returning None are unrelated." - }, - { - "left": "issue:43502", - "right": "issue:43992", - "accept": false, - "reason": "local_files_only still making API requests is unrelated to UMT5Encoder missing embed_tokens.weight." + "reason": "Different loader paths and error causes; not the same bug." }, { "left": "issue:43824", "right": "issue:43986", "accept": false, - "reason": "A missing Qwen2_5_VL import and a video-model torchvision crash are different loading failures." + "reason": "An import error for a model class is not the same as a torchvision-dependent AutoProcessor crash." }, { "left": "issue:43097", - "right": "issue:45588", + "right": "issue:43873", "accept": false, - "reason": "Removal of tie_embeddings_and_encoder_decoder is unrelated to the flash_attention.py s_aux=None crash." + "reason": "A removed config option warning is unrelated to quantization/offloading behavior." }, { - "left": "issue:43352", - "right": "issue:44380", + "left": "issue:43066", + "right": "issue:43906", "accept": false, - "reason": "NemotronHForCausalLM not supporting FlashAttention 2 and GPT2 attention-scaling being ignored are different attention-backend problems." + "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same concrete problem." }, { - "left": "issue:43066", - "right": "issue:43906", + "left": "issue:43352", + "right": "issue:44380", "accept": false, - "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same concrete bug." + "reason": "FlashAttention support gating for Nemotron and GPT2 attention scaling under SDPA/FlashAttention are different backend bugs." }, { "left": "issue:43825", "right": "issue:44655", "accept": false, - "reason": "A misleading pipeline error message and inability to save Pipeline objects are different pipeline features." + "reason": "Pipeline translation-task messaging and Pipeline save_pretrained support are unrelated." }, { "left": "issue:43992", "right": "issue:44484", "accept": false, - "reason": "Missing embed_tokens weights and save_pretrained shard-size defaults are unrelated issues." + "reason": "A missing encoder weight during load is unrelated to the default shard-size choice in save_pretrained." }, { "left": "issue:38617", "right": "issue:43097", "accept": false, - "reason": "Missing an import from configuration_utils and removal of tie_embeddings are different regressions." + "reason": "A missing configuration import and a removed tie-embeddings option are different issues." }, { "left": "issue:43012", "right": "issue:44368", "accept": false, - "reason": "A bfloat16 precision warning and a tie_word_embeddings warning are not the same bug." + "reason": "A precision warning during compilation is unrelated to a tie_word_embeddings warning during fine-tuning." } ] }, @@ -12822,18 +12808,18 @@ "soft_pairs": [ "issue:43012|issue:43550", "issue:42886|issue:43381", + "issue:43208|issue:43550", "issue:42907|issue:44655", "issue:44380|issue:45479", - "issue:43208|issue:43550", "issue:43352|issue:43976", "issue:43064|issue:44380", - "issue:43618|issue:44380", "issue:44483|issue:44514", - "issue:36683|issue:43582", + "issue:43618|issue:44380", "issue:43208|issue:43526", + "issue:36683|issue:43582", "issue:43618|issue:43761", - "issue:43012|issue:44683", "issue:42757|issue:43824", + "issue:43012|issue:44683", "issue:44038|issue:44230", "issue:44483|issue:45290", "issue:43299|issue:43867", @@ -12842,272 +12828,272 @@ "issue:43756|issue:44485", "issue:43901|issue:44655", "issue:36683|issue:43475", - "issue:42994|issue:44655", "issue:43867|issue:45216", - "issue:44220|issue:44655", "issue:43352|issue:43937", + "issue:42994|issue:44655", + "issue:44220|issue:44655", "issue:44077|issue:44380", "issue:36683|issue:44077", - "issue:44263|issue:45440", "issue:43066|issue:45381", + "issue:44263|issue:45440", "issue:43976|issue:44938", - "issue:36683|issue:43525", "issue:36010|issue:42757", + "issue:36683|issue:43525", "issue:43064|issue:44492", - "issue:44188|issue:44380", "issue:43352|issue:45593", - "issue:43618|issue:45216" + "issue:43618|issue:45216", + "issue:44188|issue:44380" ], - "prepared_review_unit_hash": "efd4b65bd8c9cfd55a1862466cf7082bfd37f415370901298cf9a022139a20d4", + "prepared_review_unit_hash": "4a9b8094710edd79e17020882d0edee5a409009903cd0ceeaecebecf7f9d0f66", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 37, - "serialized_chars": 38396, - "estimated_input_tokens": 9599, - "estimated_eval_tokens": 19454 + "serialized_chars": 38401, + "estimated_input_tokens": 9601, + "estimated_eval_tokens": 19458 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: the items span unrelated import errors, model/config regressions, serialization bugs, and attention/backends issues. None of the soft-edge pairs look like the same underlying bug or change.", - "confidence": 0.9, - "canonical_issue_reason": "If a single issue must anchor the cluster, issue 36683 has the strongest signal (highest discussion activity and a concrete, reproducible failure), but it still does not subsume the rest of the items.", + "summary": "The cluster is highly heterogeneous: most items are unrelated regressions, import errors, model-specific bugs, and API/documentation issues. The soft-similarity links mostly reflect shared vocabulary rather than the same underlying defect.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue fits this cluster; the items are too diverse to represent one duplicate family.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 36683 is the best representative by engagement and bug clarity, though the cluster is too mixed for it to be a true duplicate canonical.", + "best_issue_reason": "No issue is a strong global representative because there is no single underlying bug or change spanning the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43012", "right": "issue:43550", "accept": false, - "reason": "Different failures: a PyTorch precision warning versus a torch.compile+SDPA model crash." + "reason": "Different problems: a PyTorch warning during compile vs a Bamba torch.compile+SDPA failure." }, { "left": "issue:42886", "right": "issue:43381", "accept": false, - "reason": "Offline cache loading and gradient checkpointing in eval mode are unrelated code paths." + "reason": "Both mention runtime behavior, but one is offline cache loading and the other is gradient checkpointing in eval mode." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated model/training bugs; only the broad topic of torch.compile overlaps." }, { "left": "issue:42907", "right": "issue:44655", "accept": false, - "reason": "Saving dequantized quantized models is different from saving Pipeline objects." + "reason": "Both involve saving, but one is dequantized model saving and the other is saving Pipeline objects." }, { "left": "issue:44380", "right": "issue:45479", "accept": false, - "reason": "Attention backend scaling bug is unrelated to single-label classification zero-loss behavior." - }, - { - "left": "issue:43208", - "right": "issue:43550", - "accept": false, - "reason": "xLSTM training bugs and Bamba SDPA/compile failure are different issues." + "reason": "Different code paths: attention scaling backend behavior vs sequence-classification zero-loss bug." }, { "left": "issue:43352", "right": "issue:43976", "accept": false, - "reason": "Flash Attention support for a model is unrelated to Python version compatibility." + "reason": "A model backend support issue is not the same as a Python-version compatibility failure." }, { "left": "issue:43064", "right": "issue:44380", "accept": false, - "reason": "Optimizer-state sharding under FSDP2/PEFT is not the same as GPT-2 attention scaling." - }, - { - "left": "issue:43618", - "right": "issue:44380", - "accept": false, - "reason": "CLIPOutput attentions regression is a different bug than GPT-2 backend scaling." + "reason": "FSDP2 optimizer state corruption is unrelated to GPT2 attention scaling under SDPA/FlashAttention." }, { "left": "issue:44483", "right": "issue:44514", "accept": false, - "reason": "Chat-completions request rejection and chat-template batching crash are different layers and symptoms." + "reason": "Both are chat-related, but one is `/v1/chat/completions` request acceptance and the other is apply_chat_template batching/padding crash." }, { - "left": "issue:36683", - "right": "issue:43582", + "left": "issue:43618", + "right": "issue:44380", "accept": false, - "reason": "Missing Gemma3Config vocab_size is unrelated to Apple Silicon allocator TypeError." + "reason": "CLIP attentions export is unrelated to GPT2 attention scaling behavior." }, { "left": "issue:43208", "right": "issue:43526", "accept": false, - "reason": "xLSTM training failures and BEiT image-processor label reduction are unrelated." + "reason": "Different xLSTM training bugs vs a BEiT image processor label-reduction bug." }, { - "left": "issue:43618", - "right": "issue:43761", + "left": "issue:36683", + "right": "issue:43582", "accept": false, - "reason": "Both involve CLIP, but one is attentions assignment and the other is hidden_states regression; not the same bug." + "reason": "Gemma3Config missing vocab_size is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." }, { - "left": "issue:43012", - "right": "issue:44683", + "left": "issue:43618", + "right": "issue:43761", "accept": false, - "reason": "Precision warning during compile is different from a compiled flex_attention failure." + "reason": "Both touch CLIP, but one is missing attentions assignment and the other is a CLIPVisionModel hidden_states regression." }, { "left": "issue:42757", "right": "issue:43824", "accept": false, - "reason": "Different import errors from different modules and symbols." + "reason": "Both are import errors, but for different missing symbols in different modules." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "A warning about bfloat16 precision is not the same as flex_attention compilation failing on torch>=2.9." }, { "left": "issue:44038", "right": "issue:44230", "accept": false, - "reason": "Qwen3-VL-Moe loading breakage and fp8 inference support are not the same concrete issue." + "reason": "Same Qwen3-VL family, but one is model loading breakage and the other is fp8 inference support." }, { "left": "issue:44483", "right": "issue:45290", "accept": false, - "reason": "A chat endpoint acceptance problem is distinct from template crashes on assistant tool-call messages." + "reason": "Both involve chat templates/APIs, but they are different failure modes with different inputs." }, { "left": "issue:43299", "right": "issue:43867", "accept": false, - "reason": "Qwen3VL-Moe loading breakage is not the same as a state_dict sorting load error." + "reason": "Qwen3VL loading failure is not the same as a state_dict sorting load error." }, { "left": "issue:44483", "right": "issue:45381", "accept": false, - "reason": "Chat-completions request handling and Qwen2.5-VL video position IDs are unrelated." + "reason": "Chat/completions request handling is unrelated to Qwen2.5-VL video vision_position_ids placement." }, { "left": "issue:38617", "right": "issue:43502", "accept": false, - "reason": "ImportError for layer_type_validation is unrelated to local_files_only still triggering network calls." + "reason": "Importing layer_type_validation and making API requests despite local_files_only are distinct issues." }, { "left": "issue:43756", "right": "issue:44485", "accept": false, - "reason": "Smollm3 RoPE layer omission and GLM-5 RoPE discussion are different bugs/models." + "reason": "Different model families and different RoPE concerns; not the same bug." }, { "left": "issue:43901", "right": "issue:44655", "accept": false, - "reason": "Docs mentioning return_all_scores is unrelated to Pipeline save_pretrained failures." + "reason": "Documentation mismatch for return_all_scores is unrelated to pipeline save_pretrained failure." }, { "left": "issue:36683", "right": "issue:43475", "accept": false, - "reason": "Gemma3Config missing vocab_size and SAM3 video encoder missing fpn_position_embeddings are unrelated." + "reason": "Different missing attributes in different model/config paths." }, { - "left": "issue:42994", - "right": "issue:44655", + "left": "issue:43867", + "right": "issue:45216", "accept": false, - "reason": "Quantized model saving failure is not the same as pipeline serialization failure." + "reason": "General load-model error from state_dict ordering is not the same as a Qwen3.5 save_pretrained regression." }, { - "left": "issue:43867", - "right": "issue:45216", + "left": "issue:43352", + "right": "issue:43937", "accept": false, - "reason": "Load failure on sorted state_dict and incorrect saved checkpoint are related broadly, but not the same concrete bug." + "reason": "Model support for Flash Attention 2 and invalid GenerationConfig are unrelated." }, { - "left": "issue:44220", + "left": "issue:42994", "right": "issue:44655", "accept": false, - "reason": "Audio feature extraction bug is unrelated to pipeline save_pretrained." + "reason": "Both are save-related, but one is quantized model saving and the other is pipeline serialization." }, { - "left": "issue:43352", - "right": "issue:43937", + "left": "issue:44220", + "right": "issue:44655", "accept": false, - "reason": "Unsupported Flash Attention and invalid GenerationConfig are different failures." + "reason": "Audio feature extraction bug is unrelated to pipeline save_pretrained." }, { "left": "issue:44077", "right": "issue:44380", "accept": false, - "reason": "post_init validation for patchtsmixer is unrelated to GPT-2 attention scaling." + "reason": "patchtsmixer post_init validation and GPT2 attention scaling are different code paths." }, { "left": "issue:36683", "right": "issue:44077", "accept": false, - "reason": "Missing vocab_size and optional post_init validation are unrelated." - }, - { - "left": "issue:44263", - "right": "issue:45440", - "accept": false, - "reason": "torch.split return semantics and DeepSeekV3 implementation divergence are different bugs." + "reason": "Gemma3Config vocab_size and patchtsmixer post_init are unrelated model-specific issues." }, { "left": "issue:43066", "right": "issue:45381", "accept": false, - "reason": "Wrong tokenizer decoder type is unrelated to Qwen2.5-VL vision_position_ids." + "reason": "Tokenizer decoder type regression is not the same as Qwen2.5-VL video position ids." }, { - "left": "issue:43976", - "right": "issue:44938", + "left": "issue:44263", + "right": "issue:45440", "accept": false, - "reason": "Both are Python compatibility reports, but they target different versions and failure modes." + "reason": "Different MoE/architecture bugs; no shared concrete failure." }, { - "left": "issue:36683", - "right": "issue:43525", + "left": "issue:43976", + "right": "issue:44938", "accept": false, - "reason": "Gemma3Config missing vocab_size and Llama4Config missing pad_token_id are different config attributes." + "reason": "Python-version compatibility for Transformers 5.1.0 is unrelated to a Python 3.14 load failure." }, { "left": "issue:36010", "right": "issue:42757", "accept": false, - "reason": "Importing GenerationMixin and importing is_offline_mode are unrelated symbol removals." + "reason": "Both are import errors, but they concern different missing names from different packages." }, { - "left": "issue:43064", - "right": "issue:44492", + "left": "issue:36683", + "right": "issue:43525", "accept": false, - "reason": "Optimizer-state corruption and a cache-strategy typo are unrelated." + "reason": "Different missing-config-attribute bugs in different model families." }, { - "left": "issue:44188", - "right": "issue:44380", + "left": "issue:43064", + "right": "issue:44492", "accept": false, - "reason": "Diverging attention kernels under torch.compile is not the same as GPT-2 attention scaling being ignored." + "reason": "FSDP optimizer state corruption has nothing to do with a cache-strategy typo." }, { "left": "issue:43352", "right": "issue:45593", "accept": false, - "reason": "Flash Attention support for Nemotron is unrelated to D-FINE auxiliary-loss behavior." + "reason": "Flash Attention support for Nemotron is unrelated to D-FINE auxiliary-loss handling." }, { "left": "issue:43618", "right": "issue:45216", "accept": false, - "reason": "CLIP attentions regression and Qwen3.5 checkpoint save regression are different code paths and bugs." + "reason": "CLIP attentions regression is unrelated to Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Both involve attention/backends, but one is kernel divergence under torch.compile and the other is GPT2 scaling being ignored." } ] }, @@ -13136,10 +13122,10 @@ "issue:36296", "issue:36683", "issue:41669", + "issue:42548", "issue:42890", "issue:43010", "issue:43064", - "issue:43065", "issue:43352", "issue:43502", "issue:43575", @@ -13156,6 +13142,7 @@ "issue:44222", "issue:44242", "issue:44246", + "issue:44351", "issue:44355", "issue:44360", "issue:44387", @@ -13181,8 +13168,8 @@ ], "soft_pairs": [ "issue:36683|issue:44242", - "issue:43010|issue:45278", "issue:43352|issue:43931", + "issue:43010|issue:45278", "issue:43992|issue:45310", "issue:41669|issue:44246", "issue:43867|issue:45375", @@ -13196,8 +13183,8 @@ "issue:44938|issue:45381", "issue:44360|issue:45440", "issue:43618|issue:44077", - "issue:43867|issue:44484", "issue:36296|issue:43575", + "issue:43867|issue:44484", "issue:43618|issue:44857", "issue:43618|issue:45588", "issue:28282|issue:33453", @@ -13207,225 +13194,225 @@ "issue:43502|issue:44062", "issue:43723|issue:44556", "issue:43010|issue:44075", - "issue:42890|issue:45341", "issue:44222|issue:45125", - "issue:43010|issue:43065", - "issue:44387|issue:44556" + "issue:42890|issue:45341", + "issue:44387|issue:44556", + "issue:42548|issue:44351" ], - "prepared_review_unit_hash": "1cc65b342d64e087b6ab7198276ab56f83f9720b30096ab789b2313708954a2f", + "prepared_review_unit_hash": "ab5815d125c51baa584e114c3312573102be1dd2343d004b954970af066b0244", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 47, - "item_count": 47, + "node_count": 48, + "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 35954, - "estimated_input_tokens": 8989, - "estimated_eval_tokens": 18234 + "serialized_chars": 36522, + "estimated_input_tokens": 9131, + "estimated_eval_tokens": 18518 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab-bag of unrelated regressions centered loosely on transformers loading/import behavior, testing flakiness, and model-specific bugs. None of the soft pairs look like the same underlying issue closely enough to merge, so I rejected them all. The broadest representative issue is the upgrade/import-regression report.", - "confidence": 0.58, - "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set, explicitly describing multiple import errors after upgrading to 5.5.0; it best represents the cluster\u2019s dominant loading/import-regression theme.", + "summary": "This looks like a loose cluster of Transformers v5 regression reports, mostly import/loading/config/model-specific failures. The soft pairs share broad themes, but they do not appear to be the same underlying bug, so I would reject them all as duplicates.", + "confidence": 0.9, + "canonical_issue_reason": "issue:45278 is the best umbrella issue: it explicitly frames the problem as many import errors after upgrading from 4.57.0 to 5.5.0, which matches the broad cluster theme better than the narrower, model-specific reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the best global issue because it is the most general and recent report about the shared upgrade/import failure pattern.", + "best_issue_reason": "issue:45278 is the most representative and general of the set; it captures the version-upgrade/import-regression theme without overfitting to one model or subsystem.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36683", "right": "issue:44242", "accept": false, - "reason": "Gemma3Config missing vocab_size is a model config bug; load-balancing loss omission is an MoE loss computation bug." + "reason": "Gemma3Config vocab_size missing and MoE load-balancing loss omission are unrelated bugs in different code paths." }, { - "left": "issue:43010", - "right": "issue:45278", + "left": "issue:43352", + "right": "issue:43931", "accept": false, - "reason": "`update(...)=@torch.no_grad` is a cache/runtime fix; 45278 is a broad import regression after upgrade." + "reason": "Flash Attention 2 support error for NemotronH and Qwen3-VL weight-shape mismatch are distinct model-specific load failures." }, { - "left": "issue:43352", - "right": "issue:43931", + "left": "issue:43010", + "right": "issue:45278", "accept": false, - "reason": "Flash Attention 2 unsupported for NemotronH is unrelated to Qwen3-VL weight-shape mismatch on load." + "reason": "Missing no_grad on cache/layer update is a functional implementation bug, not the same as broad import errors after upgrade." }, { "left": "issue:43992", "right": "issue:45310", "accept": false, - "reason": "UMT5Encoder missing `embed_tokens.weight` and Qwen3.5-Moe from_pretrained errors are different model-specific load failures." + "reason": "UMT5Encoder missing embed_tokens.weight and Qwen3.5 MoE from_pretrained failure are different checkpoint-loading issues." }, { "left": "issue:41669", "right": "issue:44246", "accept": false, - "reason": "Import-star performance cleanup is not the same bug as intermittent slow `import transformers`." + "reason": "Import * slowdown and occasional slow import are both import-performance topics, but the underlying causes are not shown to be the same." }, { "left": "issue:43867", "right": "issue:45375", "accept": false, - "reason": "State-dict sorting/load error and missing Qwen3.5 Vision config field are unrelated." + "reason": "State_dict sorting load error and missing deepstack_visual_indexes are separate config/state serialization problems." }, { "left": "issue:36683", "right": "issue:43064", "accept": false, - "reason": "Gemma3 config attribute missing is unrelated to FSDP2+PEFT optimizer-state corruption." + "reason": "Gemma3Config attribute missing and wrong optimizer states under FSDP2/PEFT are unrelated training/config bugs." }, { "left": "issue:44355", "right": "issue:44855", "accept": false, - "reason": "Compiled Python file errors are not the same as the DebertaV2Model Python 3.13 `torch.jit.script` parsing issue." + "reason": "Compiled Python file errors and PyTorch script parsing on Python 3.13 are different import/runtime failures." }, { "left": "issue:42890", "right": "issue:45561", "accept": false, - "reason": "Missing `set_seed()` flakiness is different from a pytest-xdist race on `captured_info.txt`." + "reason": "Missing set_seed in a flaky model test is unrelated to xdist races on captured_info.txt." }, { "left": "issue:44938", "right": "issue:45071", "accept": false, - "reason": "Python 3.14 import failure and `PretrainedConfig` type-checking breakage are different regression classes." + "reason": "Python 3.14 load failure and PretrainedConfig type-checking regression are both version-related, but not the same defect." }, { "left": "issue:43867", "right": "issue:45357", "accept": false, - "reason": "A generic load error from sorted state_dict is not the same as incorrect Qwen3.5 visual encoder keys on save." + "reason": "State_dict sorting load error and incorrect Qwen3.5 save_pretrained visual keys are separate serialization regressions." }, { "left": "issue:43992", "right": "issue:45478", "accept": false, - "reason": "Different models and different failure modes: UMT5Encoder missing weights vs Qwen3.5-Moe from_pretrained error." + "reason": "Missing embed_tokens.weight in UMT5Encoder and Qwen3.5 MoE from_pretrained errors involve different models and failure modes." }, { "left": "issue:43010", "right": "issue:43976", "accept": false, - "reason": "Cache update decoration and Python 3.9/3.10 compatibility are unrelated." + "reason": "no_grad decoration on cache/layer update and Python 3.9/3.10 support issue are unrelated." }, { "left": "issue:44938", "right": "issue:45381", "accept": false, - "reason": "Generic Python 3.14 load failure is not the same as Qwen2.5-VL video `vision_position_ids` being wrong." + "reason": "Python 3.14 import failure and Qwen2.5-VL video vision_position_ids bug are different problems." }, { "left": "issue:44360", "right": "issue:45440", "accept": false, - "reason": "DSA indexer ReLU discussion is unrelated to DeepseekV3MoE divergence from remote implementation." + "reason": "DSA indexer ReLU complaint and DeepSeekV3 implementation divergence are not the same code-path defect." }, { "left": "issue:43618", "right": "issue:44077", "accept": false, - "reason": "CLIPOutput attentions assignment and patchtsmixer post_init allowlist are separate model API issues." + "reason": "CLIPOutput attentions field regression and patchtsmixer post_init policy change are unrelated model API issues." }, { - "left": "issue:43867", - "right": "issue:44484", + "left": "issue:36296", + "right": "issue:43575", "accept": false, - "reason": "Sorted state_dict load failure is not the same as the 50GB shard-size default question." + "reason": "Tensor parallel training bug and Qwen2-57B-A14B-Instruct TP OOM are both TP-related, but not the same concrete failure." }, { - "left": "issue:36296", - "right": "issue:43575", + "left": "issue:43867", + "right": "issue:44484", "accept": false, - "reason": "Tensor-parallel training bug and tensor-parallel load OOM are different code paths and likely different root causes." + "reason": "State_dict sorting load error and max_shard_size default question are different save/load behaviors." }, { "left": "issue:43618", "right": "issue:44857", "accept": false, - "reason": "CLIPOutput attentions regression is unrelated to float16 AMP crash in LwDetrImageLoss." + "reason": "Missing CLIPOutput attentions assignment and LwDetrImageLoss AMP crash are separate bugs." }, { "left": "issue:43618", "right": "issue:45588", "accept": false, - "reason": "Attention output assignment bug is unrelated to flash_attention.py crashing on `s_aux=None`." + "reason": "CLIPOutput attentions regression and flash_attention.py crash on s_aux=None are unrelated." }, { "left": "issue:28282", "right": "issue:33453", "accept": false, - "reason": "AutoModel missing PyTorch dependency and tokenizer-loading regression are different import/load problems." + "reason": "PyTorch missing ImportError and tokenizer loading regression are both import/load related only at a high level; the underlying causes differ." }, { "left": "issue:42890", "right": "issue:44964", "accept": false, - "reason": "Test seed flakiness is not the same as failing to load Phi-4 multimodal with latest transformers." + "reason": "Flaky SamHQ test and Phi-4-multimodal-instruct loading error are unrelated." }, { "left": "issue:42890", "right": "issue:44112", "accept": false, - "reason": "A stale device-override CI test and missing `set_seed()` failures are different test bugs." + "reason": "Seed-related test flakiness and GraniteSpeech stale device override failure are distinct CI/test bugs." }, { "left": "issue:42890", "right": "issue:45600", "accept": false, - "reason": "Testing_utils flakiness is unrelated to stale auto_mappings references for removed Sam3LiteText configs." + "reason": "Missing set_seed in tests and removed Sam3LiteText configs breaking auto_mappings are not the same defect." }, { "left": "issue:43502", "right": "issue:44062", "accept": false, - "reason": "Offline API-request leakage and `AddedToken(..., special=...)` argument duplication are unrelated." + "reason": "local_files_only still making API requests and AddedToken duplicate special kwarg are different networking/tokenizer bugs." }, { "left": "issue:43723", "right": "issue:44556", "accept": false, - "reason": "Tokenizer loading regression in v5 and checkpoint reload failure after upgrade are related in theme but not the same concrete bug." + "reason": "Tokenizer loading regression in v5 and checkpoint reload failure after upgrading are broader version regressions, but not the same issue." }, { "left": "issue:43010", "right": "issue:44075", "accept": false, - "reason": "`torch.no_grad` decoration for cache updates is unrelated to SGD argument handling." - }, - { - "left": "issue:42890", - "right": "issue:45341", - "accept": false, - "reason": "Missing set_seed flakiness is not the same as the separate `testing_utils.py` bug." + "reason": "Cache/layer update no_grad bug and unused SGD args are separate optimizer/training issues." }, { "left": "issue:44222", "right": "issue:45125", "accept": false, - "reason": "FP8 save_pretrained MoE and missing `_tp_plan` for Qwen3_5Moe are different tensor-parallel/save issues." + "reason": "FP8 save_pretrained MoE issue and missing _tp_plan for Qwen3_5Moe are different model infrastructure problems." }, { - "left": "issue:43010", - "right": "issue:43065", + "left": "issue:42890", + "right": "issue:45341", "accept": false, - "reason": "Cache update decoration bug is unrelated to the dummy `nn.Conv2d` in Sam3PixelDecoder." + "reason": "Flaky missing set_seed tests and a bug in testing_utils.py are related to test infrastructure, but not the same concrete bug." }, { "left": "issue:44387", "right": "issue:44556", "accept": false, - "reason": "Int4 quantization memory growth and v4.57->v5.x checkpoint reload failure are different regression reports." + "reason": "Int4 quantization memory regression and reload failure after upgrading are different runtime regressions." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "cannot import PreTrainedModel and cannot import HybridCache are separate symbol/export regressions." } ] }, @@ -13461,17 +13448,17 @@ "issue:42757", "issue:42831", "issue:43010", - "issue:43012", + "issue:43065", "issue:43232", "issue:43299", "issue:43502", + "issue:43526", "issue:43576", "issue:43673", "issue:43824", "issue:43827", "issue:43874", "issue:43901", - "issue:44038", "issue:44222", "issue:44230", "issue:44261", @@ -13499,11 +13486,11 @@ "issue:45542" ], "soft_pairs": [ - "issue:42548|issue:44351", - "issue:43010|issue:43232", + "issue:43010|issue:43065", "issue:45125|issue:45478", - "issue:45125|issue:45375", + "issue:43010|issue:43232", "issue:43502|issue:44485", + "issue:45125|issue:45375", "issue:30990|issue:31795", "issue:44355|issue:44749", "issue:30333|issue:33453", @@ -13513,9 +13500,9 @@ "issue:43502|issue:44961", "issue:43299|issue:44556", "issue:44355|issue:44655", + "issue:44222|issue:44230", "issue:42831|issue:43874", "issue:30333|issue:36296", - "issue:44222|issue:44230", "issue:44355|issue:45071", "issue:33453|issue:34689", "issue:42757|issue:43576", @@ -13528,303 +13515,293 @@ "issue:38617|issue:43576", "issue:31795|issue:38617", "issue:43576|issue:44351", - "issue:44908|issue:44961", "issue:41628|issue:42548", + "issue:44908|issue:44961", "issue:44485|issue:44961", "issue:36296|issue:43901", "issue:41628|issue:44355", - "issue:36296|issue:43012", + "issue:36296|issue:43526", "issue:44261|issue:44360", "issue:44273|issue:45003", "issue:44623|issue:45230", "issue:44273|issue:45542", - "issue:43576|issue:44556", - "issue:44038|issue:45125" + "issue:43576|issue:44556" ], - "prepared_review_unit_hash": "e096a11c7f330a4553894fef8730563ff1504caced18b9b1e2b44b01152636cf", + "prepared_review_unit_hash": "bb92b5a8b9dde4bf604b9d99a07d9b185c896de88e8536caa830cbeeb24409d4", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 41, - "serialized_chars": 39276, - "estimated_input_tokens": 9819, - "estimated_eval_tokens": 19894 + "soft_pair_count": 40, + "serialized_chars": 39027, + "estimated_input_tokens": 9757, + "estimated_eval_tokens": 19770 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a grab-bag of distinct v5 regressions, import/load failures, and model-specific bugs. I only see one soft edge that plausibly reflects the same underlying issue; no PRs are present.", - "confidence": 0.66, - "canonical_issue_reason": "issue:44273 is the broad symptom report for the lazy-loading failure, while issue:45003 looks like a more specific root-cause description of the same code path.", + "summary": "The cluster is broad and mostly consists of unrelated Transformers v5 regressions, import errors, and model-specific bugs. I did not find any soft pair that clearly describes the same underlying bug/change closely enough to merge.", + "confidence": 0.92, + "canonical_issue_reason": "issue:42548 is the broadest, most central user-facing regression here: a core `PreTrainedModel` import failure with active discussion and strong overlap with the cluster\u2019s repeated import/load breakage theme.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44273 is the most general, triage-useful statement of the accepted lazy-loading bug.", + "best_issue_reason": "issue:42548 best represents the cluster because it is a high-impact, general import regression affecting core APIs rather than a narrow model-specific edge case.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:42548", - "right": "issue:44351", - "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and likely different export paths." - }, { "left": "issue:43010", - "right": "issue:43232", + "right": "issue:43065", "accept": false, - "reason": "Different generation/caching bugs; the fix paths would not plausibly collapse into one PR." + "reason": "Different bugs: cache/layer `update()` no_grad decoration vs a dummy `nn.Conv2d` in Sam3PixelDecoder." }, { "left": "issue:45125", "right": "issue:45478", "accept": false, - "reason": "Same model family, but one is missing tensor-parallel metadata and the other is a from_pretrained load failure." + "reason": "Both involve Qwen3.5 MoE, but one is missing `_tp_plan` and the other is a broader `from_pretrained` failure; not clearly the same fix." }, { - "left": "issue:45125", - "right": "issue:45375", + "left": "issue:43010", + "right": "issue:43232", "accept": false, - "reason": "Different configuration issues: tensor-parallel plan vs a missing vision config field." + "reason": "Generation/cache-related, but the concrete failure modes and code paths differ." }, { "left": "issue:43502", "right": "issue:44485", "accept": false, - "reason": "Unrelated problems: local_files_only network leakage versus a RoPE implementation bug." + "reason": "Unrelated: local file access/network requests vs GLM-5 RoPE implementation." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Both are Qwen3.5 MoE-related, but one is tensor-parallel plan support and the other is a config field silently dropped; different underlying issues." }, { "left": "issue:30990", "right": "issue:31795", "accept": false, - "reason": "A runtime loading hang and a documentation issue are not the same bug." + "reason": "Sentence Transformers loading hang is unrelated to a documentation issue about forward arguments." }, { "left": "issue:44355", "right": "issue:44749", "accept": false, - "reason": "Compiled-file errors and a performance regression are distinct symptoms and fixes." + "reason": "Compiled-file errors are unrelated to a performance regression after upgrade." }, { "left": "issue:30333", "right": "issue:33453", "accept": false, - "reason": "MLflow reporting on failed training and tokenizer loading regression are unrelated." + "reason": "MLFlow job-status reporting bug is unrelated to tokenizer loading regression." }, { "left": "issue:44315", "right": "issue:44360", "accept": false, - "reason": "Different subsystems and failure modes; no shared concrete code-path bug is evident." + "reason": "Different subsystems and bugs: Liger kernel application vs DSA indexer/ReLU." }, { "left": "issue:43010", "right": "issue:44368", "accept": false, - "reason": "Both mention warnings, but they concern different behaviors and different model/config paths." + "reason": "Cache/no_grad decoration bug is unrelated to a tied-embeddings warning in Qwen3.5 LoRA." }, { "left": "issue:43502", "right": "issue:44222", "accept": false, - "reason": "Network access despite local_files_only and FP8 save_pretrained are unrelated bugs." + "reason": "Offline-mode API calls vs FP8 `save_pretrained` on MoE are different code paths." }, { "left": "issue:43502", "right": "issue:44961", "accept": false, - "reason": "No meaningful overlap beyond being issues in the same repo." + "reason": "No relation between network requests despite `local_files_only=True` and the generic 'racoon' issue." }, { "left": "issue:43299", "right": "issue:44556", "accept": false, - "reason": "Both are loading regressions, but for different versions and different reported failure modes." + "reason": "Different version windows and different symptoms for model loading; not the same regression." }, { "left": "issue:44355", "right": "issue:44655", "accept": false, - "reason": "Compiled Python execution errors and pipeline save_pretrained problems are different code paths." + "reason": "Compiled Python file errors are unrelated to pipeline `save_pretrained` support." + }, + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "One is saving FP8 MoE checkpoints; the other is inference support for Qwen3-VL/Qwen3.5 FP8." }, { "left": "issue:42831", "right": "issue:43874", "accept": false, - "reason": "Accuracy loss in FP8 versus a missing image-patch accessor are unrelated." + "reason": "FP8 accuracy issue is unrelated to a missing image-patch helper in GLM46V image processing." }, { "left": "issue:30333", "right": "issue:36296", "accept": false, - "reason": "Training-job MLflow reporting and tensor-parallel training are different bugs." - }, - { - "left": "issue:44222", - "right": "issue:44230", - "accept": false, - "reason": "FP8 save_pretrained and FP8 inference support are related theme-wise but not the same concrete bug." + "reason": "MLFlow reporting failure is unrelated to tensor-parallel training." }, { "left": "issue:44355", "right": "issue:45071", "accept": false, - "reason": "Compiled-file errors and PretrainedConfig type-checking breakage are separate regressions." + "reason": "Compiled-file runtime errors are unrelated to `PretrainedConfig` type-checking regression." }, { "left": "issue:33453", "right": "issue:34689", "accept": false, - "reason": "Different model-loading regressions with different affected models and symptoms." + "reason": "Tokenizer loading regression and Llama 3.2 Vision model loading failure are different concrete problems." }, { "left": "issue:42757", "right": "issue:43576", "accept": false, - "reason": "The env command issue may involve imports, but this is too indirect to treat as the same bug." + "reason": "Both are v5 regressions, but one is a missing `huggingface_hub` import and the other is a broken `transformers env` command." }, { "left": "issue:43827", "right": "issue:44961", "accept": false, - "reason": "Documentation references to pipeline() and an unrelated issue titled racoon are clearly different." + "reason": "Documentation references to `pipeline()` are unrelated to the 'racoon' issue." }, { "left": "issue:33453", "right": "issue:36296", "accept": false, - "reason": "Tokenizer loading regression and tensor-parallel training bug are unrelated." + "reason": "Tokenizer loading vs tensor-parallel training are separate bugs." }, { "left": "issue:43576", "right": "issue:45071", "accept": false, - "reason": "A broken env command and a type-checking regression are different failures." + "reason": "CLI environment command breakage is unrelated to `PretrainedConfig` typing regressions." }, { "left": "issue:43576", "right": "issue:43824", "accept": false, - "reason": "Both are import-related, but they concern different symbols and different failure contexts." + "reason": "Different failures: broken env command vs missing Qwen2.5-VL export." }, { "left": "issue:43673", "right": "issue:44492", "accept": false, - "reason": "A generation cache regression and a cache-strategy typo are not the same underlying bug." + "reason": "Generation cache missing in chunked prefill is unrelated to a cache-strategy typo." }, { "left": "issue:44961", "right": "issue:45468", "accept": false, - "reason": "No substantive overlap beyond generic issue wording." + "reason": "No clear relation between the generic 'racoon' issue and Gemma-4 audio positional encoding." }, { "left": "issue:38617", "right": "issue:43576", "accept": false, - "reason": "Different import failures in different commands/code paths." + "reason": "Importing `layer_type_validation` and a broken env CLI are different issues." }, { "left": "issue:31795", "right": "issue:38617", "accept": false, - "reason": "Documentation confusion and an ImportError are unrelated." + "reason": "Documentation confusion about forward args is unrelated to an import error." }, { "left": "issue:43576", "right": "issue:44351", "accept": false, - "reason": "Both are import-related, but they point to different missing exports and different call sites." + "reason": "Broken env command and missing `HybridCache` import are separate regressions." }, { - "left": "issue:44908", - "right": "issue:44961", + "left": "issue:41628", + "right": "issue:42548", "accept": false, - "reason": "Scheduler kwargs handling and an unrelated issue are not duplicates." + "reason": "Both are import errors, but for different symbols and likely different fixes." }, { - "left": "issue:41628", - "right": "issue:42548", + "left": "issue:44908", + "right": "issue:44961", "accept": false, - "reason": "Different missing imports from transformers, with different symbols and likely different fixes." + "reason": "Scheduler kwargs bug is unrelated to the generic 'racoon' issue." }, { "left": "issue:44485", "right": "issue:44961", "accept": false, - "reason": "RoPE implementation bug and unrelated issue title do not match." + "reason": "GLM-5 RoPE implementation and 'racoon' are unrelated." }, { "left": "issue:36296", "right": "issue:43901", "accept": false, - "reason": "Tensor-parallel training and docs about return_all_scores are unrelated." + "reason": "Tensor-parallel training bug is unrelated to docs mentioning `return_all_scores`." }, { "left": "issue:41628", "right": "issue:44355", "accept": false, - "reason": "ImportError for AutoImageProcessor and compiled Python file errors are separate problems." + "reason": "Different failures: missing `AutoImageProcessor` import vs compiled-file runtime errors." }, { "left": "issue:36296", - "right": "issue:43012", + "right": "issue:43526", "accept": false, - "reason": "Tensor-parallel training bug and bfloat16 compile warning are different issues." + "reason": "Tensor-parallel training bug is unrelated to BeitImageProcessorFast label reduction." }, { "left": "issue:44261", "right": "issue:44360", "accept": false, - "reason": "Missing rms_norm_eps precision issue and a DSA ReLU complaint are unrelated." + "reason": "MLA RMS norm precision issue is unrelated to a DSA indexer ReLU concern." }, { "left": "issue:44273", "right": "issue:45003", - "accept": true, - "reason": "Both describe the lazy-loading failure: one as the user-visible symptom, the other as a likely root cause in modeling_utils/sys.modules access." + "accept": false, + "reason": "Lazy loading failure and unsafe `sys.modules` access are not the same bug." }, { "left": "issue:44623", "right": "issue:45230", "accept": false, - "reason": "Missing processor.save_pretrained files and a generic bug report are not the same concrete bug." + "reason": "Processor `save_pretrained` missing files is unrelated to a generic bug report." }, { "left": "issue:44273", "right": "issue:45542", "accept": false, - "reason": "Lazy loading and a TensorFlow backend error are unrelated." + "reason": "Lazy loading bug is unrelated to a TensorFlow backend/package-install issue." }, { "left": "issue:43576", "right": "issue:44556", "accept": false, - "reason": "Broken env command and checkpoint reload regression are different issues." - }, - { - "left": "issue:44038", - "right": "issue:45125", - "accept": false, - "reason": "Qwen3-VL-Moe load bug and missing tensor-parallel plan are different concrete failures." + "reason": "Broken env CLI and checkpoint reload failures after v5.2/v5.3 are different problems." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is conservative and grounded in the packet. The one accepted soft edge, issue:44273 vs issue:45003, is plausibly the same lazy-loading/sys.modules problem based on the titles alone, and the rest are reasonably rejected as distinct bugs. No PRs are present, so the final note is also accurate." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -13841,15 +13818,13 @@ "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-382", "nodes": [ + "issue:31515", "issue:31795", "issue:33453", "issue:34689", - "issue:37428", "issue:41669", "issue:42371", "issue:42890", - "issue:42913", - "issue:43066", "issue:43352", "issue:43408", "issue:43502", @@ -13861,6 +13836,7 @@ "issue:43723", "issue:43827", "issue:43976", + "issue:44038", "issue:44077", "issue:44246", "issue:44261", @@ -13884,6 +13860,7 @@ "issue:44945", "issue:45071", "issue:45092", + "issue:45125", "issue:45230", "issue:45310", "issue:45341", @@ -13891,6 +13868,7 @@ "issue:45478" ], "soft_pairs": [ + "issue:44038|issue:45125", "issue:41669|issue:43673", "issue:44393|issue:45071", "issue:44556|issue:45092", @@ -13916,8 +13894,8 @@ "issue:44077|issue:44861", "issue:41669|issue:44908", "issue:43723|issue:44393", - "issue:44655|issue:44861", "issue:43576|issue:44273", + "issue:44655|issue:44861", "issue:44360|issue:45468", "issue:45230|issue:45310", "issue:44273|issue:44908", @@ -13925,7 +13903,7 @@ "issue:41669|issue:43704", "issue:43519|issue:44908", "issue:31795|issue:34689", - "issue:31795|issue:37428", + "issue:31515|issue:31795", "issue:44315|issue:45092", "issue:43704|issue:44485", "issue:44485|issue:45468", @@ -13936,319 +13914,312 @@ "issue:44263|issue:45468", "issue:44261|issue:45468", "issue:43606|issue:45468", - "issue:43704|issue:45341", - "issue:42913|issue:43066" + "issue:43704|issue:45341" ], - "prepared_review_unit_hash": "5b8ab1d9e4046823481b3f3d52a58f322c245da84dfd3959ba2185e4d9904fd6", + "prepared_review_unit_hash": "e33aa2ca2e738778716846bef91e91b05c5c3fc4524790d1a6b27ef07efecce2", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 47, - "serialized_chars": 41174, - "estimated_input_tokens": 10294, - "estimated_eval_tokens": 20844 + "serialized_chars": 41139, + "estimated_input_tokens": 10285, + "estimated_eval_tokens": 20826 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly false-positive similarity pairs across unrelated Transformers issues; the only obvious true duplicate in the item set is the identical Qwen3.5 Moe from_pretrained report (issue:45310 / issue:45478).", - "confidence": 0.93, - "canonical_issue_reason": "issue:45310 is the earliest and more canonical report for the exact same Qwen3.5 Moe from_pretrained error, with issue:45478 duplicating it later.", + "summary": "The set is not a single duplicate cluster; it mixes unrelated loading, tokenizer, config, performance, and model-specific bugs. All soft-edge matches are superficial keyword overlaps, so they should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No clear canonical issue: the items are too heterogeneous and do not share one underlying bug or change.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45310 is the strongest representative issue in this cluster because it is a concrete, specific bug report and appears to subsume the later identical duplicate.", + "best_issue_reason": "No global best issue: none of these issues is a representative duplicate anchor for the whole set.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Same model family, but different failures: Qwen3-VL-Moe vs Qwen3.5 MoE tensor-parallel setup." + }, { "left": "issue:41669", "right": "issue:43673", "accept": false, - "reason": "Import-star import-time/perf problem vs a cache-missing generation bug; different code paths." + "reason": "One is import-time slowdown from broad imports; the other is a GenerationMixin cache/chunked prefill bug." }, { "left": "issue:44393", "right": "issue:45071", "accept": false, - "reason": "Vision 2D bounding-box output issue vs PretrainedConfig type-checking regression; unrelated symptoms." + "reason": "Both are about bad model behavior, but one is Qwen3-VL bbox output and the other is PretrainedConfig type checking." }, { "left": "issue:44556", "right": "issue:45092", "accept": false, - "reason": "Both involve loading old checkpoints under v5, but one is a general reload incompatibility and the other is a specific InternVL2 remote-code/meta-init failure." + "reason": "Both concern reloading old checkpoints after v5, but one is general v4.57 incompatibility and the other is InternVL2 remote-code/meta-init specific." }, { "left": "issue:42371", "right": "issue:44492", "accept": false, - "reason": "TF32 behavior settings vs a typo in cache strategies; unrelated." + "reason": "TF32 behavior settings and cache-strategy typo are unrelated." }, { "left": "issue:42890", "right": "issue:44908", "accept": false, - "reason": "Flaky integration test due to missing seed vs inverse_sqrt scheduler kwargs not forwarded; different bugs." + "reason": "Test flakiness in SamHQ is unrelated to inverse_sqrt scheduler kwargs handling." }, { "left": "issue:44704", "right": "issue:44908", "accept": false, - "reason": "AutoProcessor kwargs forwarding to cached_file vs scheduler kwargs handling; different components." + "reason": "AutoProcessor kwargs propagation and scheduler kwargs are different code paths." }, { "left": "issue:44246", "right": "issue:44273", "accept": false, - "reason": "Import sometimes being slow vs lazy loading not working properly; related theme but not the same concrete bug." + "reason": "Import slowdown and lazy loading failure overlap only loosely; not the same concrete bug." }, { "left": "issue:43408", "right": "issue:44315", "accept": false, - "reason": "Wrong model-type warning vs Liger Kernel not applied with model_init; no shared bug." + "reason": "Model class mismatch warning is unrelated to Liger Kernel not being applied with model_init." }, { "left": "issue:43976", "right": "issue:44393", "accept": false, - "reason": "Python version compatibility issue vs Qwen3-VL bbox output problem; unrelated." + "reason": "Python version compatibility and Qwen3-VL output errors are unrelated." }, { "left": "issue:43352", "right": "issue:44393", "accept": false, - "reason": "Flash Attention 2 support gating vs Qwen3-VL hallucination/error output; different issues." + "reason": "Flash-Attention support error for Nemotron is unrelated to Qwen3-VL bounding-box output issues." }, { "left": "issue:43576", "right": "issue:44908", "accept": false, - "reason": "Broken `transformers env` CLI vs scheduler kwargs ignored; unrelated." + "reason": "Broken env CLI and scheduler kwargs ignore bug are unrelated." }, { "left": "issue:44297", "right": "issue:45230", "accept": false, - "reason": "tokenizer.save_pretrained metadata mismatch vs an unspecific bug report with no evidence of the same root cause." + "reason": "Tokenizer save metadata mismatch is not the same as the generic bug report." }, { "left": "issue:44908", "right": "issue:45071", "accept": false, - "reason": "Scheduler kwargs bug vs PretrainedConfig type-checking regression; different subsystems." + "reason": "Scheduler kwargs handling is unrelated to PretrainedConfig type checking." }, { "left": "issue:44295", "right": "issue:45230", "accept": false, - "reason": "position_ids buffer access error vs generic bug report; not enough to match." + "reason": "position_ids buffer access error is unrelated to the vague bug report." }, { "left": "issue:41669", "right": "issue:44351", "accept": false, - "reason": "Import-star performance regression vs HybridCache import failure; unrelated." + "reason": "Import-time performance regression is unrelated to missing HybridCache export." }, { "left": "issue:41669", "right": "issue:45341", "accept": false, - "reason": "Import-star slowdown vs a testing_utils bug; different problems." + "reason": "Import slowdown and a testing_utils bug are unrelated." }, { "left": "issue:44492", "right": "issue:44945", "accept": false, - "reason": "Cache-strategy typo vs incorrect LLM output under pipeline parallelism; unrelated." + "reason": "Cache-strategy typo and pipeline-parallelism output corruption are unrelated." }, { "left": "issue:41669", "right": "issue:44492", "accept": false, - "reason": "Model import-star slowdown vs cache-strategy typo; not the same bug." + "reason": "Import performance regression is unrelated to cache-strategy typo." }, { "left": "issue:44393", "right": "issue:45478", "accept": false, - "reason": "Qwen3-VL 2D bbox hallucination vs Qwen3.5 Moe from_pretrained error; different models and failures." + "reason": "Different Qwen bugs: 2D bbox output error vs Qwen3.5 MoE from_pretrained failure." }, { "left": "issue:44315", "right": "issue:44829", "accept": false, - "reason": "Liger Kernel application issue vs flash_attention_3 training degeneration; different code paths." + "reason": "Liger Kernel application and flash_attention_3 training degeneration are different problems." }, { "left": "issue:31795", "right": "issue:33453", "accept": false, - "reason": "Forward-docs confusion vs tokenizer loading regression; unrelated." + "reason": "Documentation confusion in model.forward is unrelated to tokenizer loading regression." }, { "left": "issue:43827", "right": "issue:44908", "accept": false, - "reason": "Docs still referencing removed pipeline() vs scheduler kwargs bug; not the same change." + "reason": "Docs still referencing pipeline() is unrelated to inverse_sqrt scheduler kwargs." }, { "left": "issue:44077", "right": "issue:44861", "accept": false, - "reason": "patchtsmixer post_init policy vs tied-weight-key AttributeError; different areas." + "reason": "patchtsmixer post_init policy and tied-weight-key attribute crash are unrelated." }, { "left": "issue:41669", "right": "issue:44908", "accept": false, - "reason": "Import-star performance regression vs scheduler kwargs handling; unrelated." + "reason": "Import performance and scheduler kwargs are different issues." }, { "left": "issue:43723", "right": "issue:44393", "accept": false, - "reason": "AutoTokenizer loading issue vs Qwen3-VL bbox output problem; unrelated." + "reason": "Tokenizer loading in v5 is unrelated to Qwen3-VL bbox output failures." }, { - "left": "issue:44655", - "right": "issue:44861", + "left": "issue:43576", + "right": "issue:44273", "accept": false, - "reason": "Pipeline save_pretrained support vs tied-weight-key crash; different bugs." + "reason": "Broken env CLI and lazy loading are not the same bug." }, { - "left": "issue:43576", - "right": "issue:44273", + "left": "issue:44655", + "right": "issue:44861", "accept": false, - "reason": "Broken env command vs lazy loading malfunction; only broad v5 theme shared." + "reason": "Pipeline save_pretrained support and tied-weight-key crash are unrelated." }, { "left": "issue:44360", "right": "issue:45468", "accept": false, - "reason": "DSA indexer missing ReLU vs Gemma4AudioRelPositionalEncoding issue; different code paths." + "reason": "DSA indexer ReLU discussion is unrelated to Gemma-4 audio positional encoding." }, { "left": "issue:45230", "right": "issue:45310", "accept": false, - "reason": "Generic bug report vs a specific Qwen3.5 Moe from_pretrained error; no evidence they are the same issue." + "reason": "Generic bug report is not the same as the specific Qwen3.5 MoE from_pretrained regression." }, { "left": "issue:44273", "right": "issue:44908", "accept": false, - "reason": "Lazy loading failure vs scheduler kwargs ignored; unrelated." + "reason": "Lazy loading failure and scheduler kwargs bug are unrelated." }, { "left": "issue:43519", "right": "issue:44485", "accept": false, - "reason": "Incorrect timestamp calculation in Qwen3VL vs GLM-5 RoPE implementation; different models and bugs." - }, - { - "left": "issue:43519", - "right": "issue:44908", - "accept": false, - "reason": "Timestamp calculation bug vs scheduler kwargs issue; unrelated." - }, - { - "left": "issue:41669", - "right": "issue:43704", - "accept": false, - "reason": "Import-star slowdown vs VRAM leak in dataloader threads; different symptoms and code paths." + "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different code paths." }, { "left": "issue:43502", "right": "issue:43519", "accept": false, - "reason": "local_files_only still causing API requests vs timestamp calculation in a processor; unrelated." + "reason": "local_files_only network leak and timestamp calculation are unrelated." }, { "left": "issue:31795", "right": "issue:34689", "accept": false, - "reason": "Documentation confusion vs model loading breakage for Llama 3.2 Vision; unrelated." + "reason": "Forward-doc confusion and Llama 3.2 loading regression are different bugs." }, { - "left": "issue:31795", - "right": "issue:37428", + "left": "issue:31515", + "right": "issue:31795", "accept": false, - "reason": "Documentation confusion vs missing flash attention import error; unrelated." + "reason": "Slow from_pretrained checkpoint loading is unrelated to forward-API documentation confusion." }, { "left": "issue:44315", "right": "issue:45092", "accept": false, - "reason": "Liger Kernel/model_init behavior vs old InternVL2 checkpoint/meta-init incompatibility; only broad model-loading context overlaps." + "reason": "Both touch model creation/loading, but one is Liger Kernel application and the other is checkpoint compatibility with meta init." }, { "left": "issue:43704", "right": "issue:44485", "accept": false, - "reason": "VRAM leak across dataloader threads vs RoPE implementation discussion; different bugs." + "reason": "VRAM leak across dataloader threads is unrelated to RoPE implementation." }, { "left": "issue:44485", "right": "issue:45468", "accept": false, - "reason": "GLM-5 RoPE implementation issue vs Gemma4AudioRelPositionalEncoding; different models and code paths." + "reason": "GLM-5 RoPE and Gemma-4 audio positional encoding are different model-specific implementations." }, { "left": "issue:42371", "right": "issue:43704", "accept": false, - "reason": "TF32 control settings vs VRAM leak in dataloader threads; unrelated." + "reason": "TF32 settings and VRAM leak are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "local_files_only network requests and timestamp calculation are unrelated." }, { "left": "issue:43519", "right": "issue:44623", "accept": false, - "reason": "Timestamp calculation bug vs processor.save_pretrained missing files; different functionality." + "reason": "Timestamp calculation bug and processor.save_pretrained missing files are unrelated." }, { "left": "issue:43519", "right": "issue:43704", "accept": false, - "reason": "Timestamp calculation bug vs VRAM leak in dataloader threads; unrelated." + "reason": "Timestamp calculation bug and VRAM leak are unrelated." }, { "left": "issue:44263", "right": "issue:45468", "accept": false, - "reason": "torch.split return-value issue vs Gemma4AudioRelPositionalEncoding; unrelated." + "reason": "torch.split return values bug and Gemma-4 audio positional encoding are unrelated." }, { "left": "issue:44261", "right": "issue:45468", "accept": false, - "reason": "MLA q_a_layernorm epsilon precision issue vs Gemma4AudioRelPositionalEncoding; different problems." + "reason": "MLA q_a_layernorm precision issue and Gemma-4 audio positional encoding are unrelated." }, { "left": "issue:43606", "right": "issue:45468", "accept": false, - "reason": "CPU offload device-mismatch bug vs Gemma4AudioRelPositionalEncoding; unrelated." + "reason": "CPU offload device mismatch and Gemma-4 audio positional encoding are unrelated." }, { "left": "issue:43704", "right": "issue:45341", "accept": false, - "reason": "VRAM leak in dataloader threads vs testing_utils bug; different issues." - }, - { - "left": "issue:42913", - "right": "issue:43066", - "accept": false, - "reason": "Tokenizer behavior difference across v4/v5 vs wrong tokenizer decoder type in v5; related area but not the same underlying bug." + "reason": "VRAM leak in dataloader threads is unrelated to a testing_utils bug." } ] }, @@ -14274,10 +14245,11 @@ "nodes": [ "issue:30333", "issue:31515", - "issue:31795", "issue:33453", "issue:36296", "issue:38617", + "issue:42913", + "issue:43066", "issue:43298", "issue:43525", "issue:43572", @@ -14294,10 +14266,11 @@ "issue:45341" ], "soft_pairs": [ + "issue:42913|issue:43066", "issue:44908|issue:44998", "issue:44998|issue:45341", - "issue:44861|issue:45290", "issue:44492|issue:44998", + "issue:44861|issue:45290", "issue:43618|issue:44998", "issue:44556|issue:44998", "issue:43525|issue:44861", @@ -14306,141 +14279,143 @@ "issue:44230|issue:44623", "issue:31515|issue:36296", "issue:30333|issue:31515", - "issue:31515|issue:31795", "issue:44230|issue:45310", "issue:31515|issue:33453", "issue:43298|issue:44230" ], - "prepared_review_unit_hash": "19564b33abc3d26f63cb89ddc08cdb160c7b35551c3c0139dc32dde8f70c4ee6", + "prepared_review_unit_hash": "3263df6e3d0cd2c544a1d4e3b1281a090277b200d366c9374a61048b5e041367", "component_budget": { "node_count": 382, "item_count": 382, "soft_pair_count": 1431, - "serialized_chars": 737863, - "estimated_input_tokens": 184466, - "estimated_eval_tokens": 369188 + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 }, "budget": { - "node_count": 20, - "item_count": 20, + "node_count": 21, + "item_count": 21, "soft_pair_count": 16, - "serialized_chars": 15992, - "estimated_input_tokens": 3998, - "estimated_eval_tokens": 8252 + "serialized_chars": 16524, + "estimated_input_tokens": 4131, + "estimated_eval_tokens": 8518 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "No soft edge looks like a true duplicate: the items span unrelated bugs, docs, typos, import errors, and model-specific regressions. The cluster appears heterogeneous rather than one underlying issue.", - "confidence": 0.79, - "canonical_issue_reason": "No single canonical issue stands out because the set is not a duplicate cluster; similarities are mostly superficial (e.g. loading-related wording) across different code paths and subsystems.", - "canonical_pr_reason": "No pull requests are present in this cluster.", - "best_issue_reason": "Issue 45310 is the strongest standalone bug report here: it describes a concrete, reproducible from_pretrained failure in a specific model/version combination, making it the most actionable representative issue among these unrelated reports.", - "best_pr_reason": "No pull requests are present in this cluster.", + "summary": "The cluster is mostly mixed issue noise. The tokenizer/v5 items are the only somewhat related theme, but the evidence is too thin to merge 42913 and 43066 as duplicates from titles/metadata alone, so that soft edge should be rejected conservatively.", + "confidence": 0.64, + "canonical_issue_reason": "issue:43066 is the most concrete tokenizer/v5 report, but the packet does not support treating it as a confirmed duplicate of 42913.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43066 is the best single representative for the limited tokenizer-regression theme because its title is specific and actionable.", + "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both mention tokenizer/v5 behavior, but the packet only gives titles/metadata and does not establish the same underlying bug or the same decoder regression." + }, { "left": "issue:44908", "right": "issue:44998", "accept": false, - "reason": "Scheduler kwargs bug vs an unrelated placeholder/title issue; no shared code path or underlying defect." + "reason": "Inverse_sqrt scheduler kwargs handling and a vague unrelated title are not the same bug." }, { "left": "issue:44998", "right": "issue:45341", "accept": false, - "reason": "Completely unrelated: one is nonspecific/'Unemployment', the other is a bug in testing_utils.py." + "reason": "No shared code path or concrete bug is visible from the titles." }, { - "left": "issue:44861", - "right": "issue:45290", + "left": "issue:44492", + "right": "issue:44998", "accept": false, - "reason": "Different failures in different areas: tied-weight key handling vs chat template crashes on tool-call messages." + "reason": "A cache-strategy typo is unrelated to the unrelated title." }, { - "left": "issue:44492", - "right": "issue:44998", + "left": "issue:44861", + "right": "issue:45290", "accept": false, - "reason": "A typo in cache strategy docs is unrelated to the other issue." + "reason": "Tied-weight key crashes and chat-template tool-call crashes are different failure modes in different paths." }, { "left": "issue:43618", "right": "issue:44998", "accept": false, - "reason": "CLIPOutput attentions regression is unrelated to the other issue." + "reason": "CLIPOutput attentions missing assignment is unrelated to the unrelated title." }, { "left": "issue:44556", "right": "issue:44998", "accept": false, - "reason": "Checkpoint reload regression after version upgrade is unrelated to the other issue." + "reason": "Checkpoint reload compatibility after v5 upgrade is not the same as the unrelated title." }, { "left": "issue:43525", "right": "issue:44861", "accept": false, - "reason": "Both mention missing attributes, but they affect different configs and different failure modes; not the same bug." + "reason": "Missing pad_token_id in Llama4Config and tied-weight-key list handling are distinct bugs." }, { "left": "issue:43572", "right": "issue:44861", "accept": false, - "reason": "StableLmConfig pad_token_idx missing and tied-weight-key crash are different code paths and symptoms." + "reason": "StableLmConfig pad_token_idx regression is unrelated to tied-weight key attribute handling." }, { "left": "issue:31515", "right": "issue:38617", "accept": false, - "reason": "from_pretrained loading slowness is unrelated to an ImportError for layer_type_validation." + "reason": "Slow checkpoint loading and an ImportError for layer_type_validation are different problems." }, { "left": "issue:44230", "right": "issue:44623", "accept": false, - "reason": "FP8 inference support for Qwen models is unrelated to processor.save_pretrained missing files." + "reason": "FP8 inference support and missing processor.save_pretrained files are not the same issue." }, { "left": "issue:31515", "right": "issue:36296", "accept": false, - "reason": "Checkpoint loading performance and tensor-parallel training behavior are different problems." + "reason": "Checkpoint load slowness and tensor-parallel training bugs are unrelated." }, { "left": "issue:30333", "right": "issue:31515", "accept": false, - "reason": "MLFlow reporting on failed training jobs is unrelated to slow checkpoint loading." - }, - { - "left": "issue:31515", - "right": "issue:31795", - "accept": false, - "reason": "A documentation confusion issue is not the same as a checkpoint loading performance bug." + "reason": "MLflow reporting after failure is unrelated to checkpoint loading performance." }, { "left": "issue:44230", "right": "issue:45310", "accept": false, - "reason": "Both involve Qwen3.x/MoE wording, but one is FP8 inference support while the other is a specific from_pretrained error; not the same underlying fix." + "reason": "Both mention Qwen3.5, but one is fp8 support and the other is a from_pretrained error; too different to merge." }, { "left": "issue:31515", "right": "issue:33453", "accept": false, - "reason": "Tokenizer loading regression and checkpoint loading slowness are distinct bugs." + "reason": "Slow checkpoint loading and tokenizer loading regression are adjacent but not clearly the same bug." }, { "left": "issue:43298", "right": "issue:44230", "accept": false, - "reason": "Continuous batching audio-model support and Qwen FP8 inference support are different feature gaps/issues." + "reason": "Continuous batching for audio models and Qwen fp8 inference support are different feature gaps." } ] }, - "evaluator_result": null, - "evaluator_used": false, - "retried": false, + "evaluator_result": { + "accept": false, + "feedback": "The cluster summary is a bit too strong for the packet. In particular, accepting issue:42913 \u2194 issue:43066 as a duplicate pair overstates the evidence: the packet only gives titles/metadata, and while the titles are similar, it does not establish the same underlying bug or the specific v4\u2192v5 decoder regression described in the reason. The rest of the rejections are mostly conservative, but the one accepted soft-edge verdict is not sufficiently grounded." + }, + "evaluator_used": true, + "retried": true, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null @@ -14464,40 +14439,40 @@ "soft_pairs": [ "issue:26350|issue:33260" ], - "prepared_review_unit_hash": "e07539fb78260ca37d25f11817fb66f7b34bc63333eafef7c7e452c090b255fe", + "prepared_review_unit_hash": "f376d05e9a02fb0d34006cb1687481b6600033eacf328ec2134ad349f8b5be6c", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1587, - "estimated_input_tokens": 397, - "estimated_eval_tokens": 1050 + "serialized_chars": 1590, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1587, - "estimated_input_tokens": 397, - "estimated_eval_tokens": 1050 + "serialized_chars": 1590, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two distinct community contribution requests for different capabilities: Flash Attention 2 support vs GGUF support. They should not be deduplicated or merged.", - "confidence": 0.96, - "canonical_issue_reason": "If a single representative issue is needed, #26350 is the better anchor because it is the more active and older thread, but it covers a different feature request than #33260.", + "summary": "These are two separate feature-request/community-contribution issues in the same general area but for different capabilities: Flash Attention 2 support vs GGUF support. They are not duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "#26350 has higher discussion activity and is the more established issue, so it is the better canonical representative for this cluster, though not a duplicate of #33260.", + "best_issue_reason": "Issue #26350 is the better representative of the cluster only because it is older and has substantially more discussion activity, but it is not a duplicate of #33260.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:26350", "right": "issue:33260", "accept": false, - "reason": "Different requested features and code paths: Flash Attention 2 support is unrelated to GGUF support. They are not the same underlying issue." + "reason": "Different requested features and different target problems: Flash Attention 2 architecture support versus GGUF support for more architectures. Shared 'community contribution' framing is not enough to treat them as the same issue." } ] }, @@ -14534,77 +14509,80 @@ "issue:43834|issue:44365", "issue:44365|issue:44370" ], - "prepared_review_unit_hash": "974c89f09ffe2fd9eae05daeaba518983802956c2ffae6badec7401e3b9399a5", + "prepared_review_unit_hash": "5b6a86a9415c6eabd8000c4c95e0358dc6f1e7138960cfc5bb27a425dd5f29b8", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4452, - "estimated_input_tokens": 1113, - "estimated_eval_tokens": 2482 + "serialized_chars": 4453, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4452, - "estimated_input_tokens": 1113, - "estimated_eval_tokens": 2482 + "serialized_chars": 4453, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These issues all follow the same i18n translation template, but they appear to represent different language-specific translation tasks rather than duplicates. The Turkish issue is the only concrete one; the others look like generic placeholder/template instances.", - "confidence": 0.84, - "canonical_issue_reason": "Issue 27088 is the only concrete, language-specific report ([i18n-TR] Turkish) rather than a placeholder template, so it is the clearest real issue in the cluster.", + "summary": "These are all documentation-translation requests, but only one is concrete (Turkish). The other three are near-identical placeholder-style issues and look like duplicates of each other, not of the Turkish-specific request.", + "confidence": 0.77, + "canonical_issue_reason": "issue:27088 is the only specific, language-filled request ([i18n-TR] Translating docs to Turkish) and it is the earliest issue in the set, so it is the clearest canonical representative.", "canonical_pr_reason": null, - "best_issue_reason": "27088 is the best representative because it names an actual target language and appears to be a specific actionable translation request, while the others are generic placeholders with no concrete language identity in the title.", + "best_issue_reason": "issue:27088 is the most informative and concrete issue in the cluster; the others are generic placeholder variants, so this is the best global issue anchor.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:27088", "right": "issue:44370", "accept": false, - "reason": "Same translation template, but no evidence they are the same language or same underlying task; likely separate language-specific docs translation requests." + "reason": "Turkish-specific request versus a generic placeholder translation issue; same broad topic, but not the same concrete request." }, { "left": "issue:27088", "right": "issue:44365", "accept": false, - "reason": "This is another generic i18n translation issue, not shown to be the Turkish request or the same concrete translation work." + "reason": "Turkish-specific request versus a generic placeholder translation issue; related, but not a true duplicate." }, { "left": "issue:27088", "right": "issue:43834", "accept": false, - "reason": "Titles indicate different language placeholders / language-specific translation tasks, so they should not be merged as duplicates." + "reason": "Turkish-specific request versus a generic placeholder translation issue; same subsystem, different underlying request." }, { "left": "issue:43834", "right": "issue:44370", - "accept": false, - "reason": "Both are generic translation-template issues, but there is no evidence they refer to the same language or same work item." + "accept": true, + "reason": "Both are generic placeholder translation issues with near-identical wording and no visible distinguishing target, so they look like the same underlying issue." }, { "left": "issue:43834", "right": "issue:44365", - "accept": false, - "reason": "Highly similar template text is not enough; these could be different language translation requests and are not the same bug/change." + "accept": true, + "reason": "Near-identical placeholder translation request; no concrete difference is visible, so they appear duplicate." }, { "left": "issue:44365", "right": "issue:44370", - "accept": false, - "reason": "No concrete overlap beyond the shared template; the underlying translation targets are not shown to be identical." + "accept": true, + "reason": "Same placeholder title pattern and very similar body size; they look like duplicate copies of the same request." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet: issue:27088 is genuinely specific ([i18n-TR] Turkish), while issue:43834, issue:44365, and issue:44370 share the same placeholder title and extremely similar body sizes. The soft-edge verdicts are appropriately conservative: Turkish vs. placeholder issues are rejected, and the placeholder-to-placeholder pairs are accepted based on strong textual similarity. No overstatement is obvious from the available evidence." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -14666,100 +14644,100 @@ "issue:43837|issue:45027", "issue:43837|issue:45177" ], - "prepared_review_unit_hash": "6e02068c22fb9ee187c34528184deb12ec083c1ffa49ab4de71b97d5fbc68afb", + "prepared_review_unit_hash": "98a69ed920b7bf0130804fc2aefba7bab2bfbc632d256000ca3f046be5e99188", "component_budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 28, - "serialized_chars": 17647, - "estimated_input_tokens": 4412, - "estimated_eval_tokens": 9080 + "serialized_chars": 17649, + "estimated_input_tokens": 4413, + "estimated_eval_tokens": 9082 }, "budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 28, - "serialized_chars": 17647, - "estimated_input_tokens": 4412, - "estimated_eval_tokens": 9080 + "serialized_chars": 17649, + "estimated_input_tokens": 4413, + "estimated_eval_tokens": 9082 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is a loose set of model-request issues across different model families and modalities (BERT variants, audio/TTS/ASR, OCR, sparse transformers, vision). They share only a generic 'add support' theme, so none are true duplicates.", + "summary": "These are not duplicates; they\u2019re separate model-request issues spanning different architectures and modalities (BERT variants, audio/TTS/ASR, OCR, perception, sparse transformers). The shared wording is mostly from a common \u201cadd model\u201d template, not the same underlying request.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "issue:37015 is the most representative generic model-add request and has the strongest discussion activity, but it still describes a different model family from the others.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36817", "right": "issue:41211", "accept": false, - "reason": "EuroBert config addition vs DEIMv2 are unrelated model families." + "reason": "Different model families: EuroBERT config support vs DEIMv2 architecture addition." }, { "left": "issue:36817", "right": "issue:43671", "accept": false, - "reason": "EuroBert vs Qwen3-TTS is a different architecture and modality." + "reason": "EuroBERT config support is unrelated to Qwen3-TTS support." }, { "left": "issue:37015", "right": "issue:43837", "accept": false, - "reason": "NeoBERT request and Qwen3-ASR support are separate speech/model additions." + "reason": "NeoBERT and Qwen3-ASR are different models and different modalities." }, { "left": "issue:37015", "right": "issue:44121", "accept": false, - "reason": "NeoBERT and the weight-sparse transformer are different concrete models." + "reason": "NeoBERT request is unrelated to the OpenAI weight-sparse transformer request." }, { "left": "issue:37015", "right": "issue:43909", "accept": false, - "reason": "NeoBERT is unrelated to LFM2.5 Audio." + "reason": "NeoBERT vs LFM2.5 Audio is a different model family and task." }, { "left": "issue:37015", "right": "issue:42503", "accept": false, - "reason": "NeoBERT and ModernVBERT are different BERT variants, not the same request." + "reason": "NeoBERT and ModernVBERT are separate model requests, not the same change." }, { "left": "issue:37015", "right": "issue:45027", "accept": false, - "reason": "NeoBERT vs Voxtral TTS is a different model and task." + "reason": "NeoBERT request is unrelated to Voxtral TTS support." }, { "left": "issue:41115", "right": "issue:43837", "accept": false, - "reason": "MiniCPM3 architecture request and Qwen3-ASR are distinct model integrations." + "reason": "MiniCPM3 architecture support and Qwen3-ASR support target different models." }, { "left": "issue:41115", "right": "issue:44121", "accept": false, - "reason": "MiniCPM3 and the sparse transformer request target different code paths/models." + "reason": "MiniCPM3 architecture addition is not the same as adding a sparse transformer model." }, { "left": "issue:41115", "right": "issue:42503", "accept": false, - "reason": "MiniCPM3 and ModernVBERT are unrelated model families." + "reason": "MiniCPM3 and ModernVBERT are distinct architectures with different implementation needs." }, { "left": "issue:41115", "right": "issue:45027", "accept": false, - "reason": "MiniCPM3 vision/LLM support and Voxtral TTS are different additions." + "reason": "MiniCPM3 support is unrelated to Voxtral TTS support." }, { "left": "issue:41211", @@ -14771,97 +14749,85 @@ "left": "issue:42503", "right": "issue:43837", "accept": false, - "reason": "ModernVBERT models and Qwen3-ASR are different architectures/tasks." + "reason": "ModernVBERT and Qwen3-ASR differ in both model family and modality." }, { "left": "issue:42503", "right": "issue:43671", "accept": false, - "reason": "ModernVBERT is not the same underlying change as Qwen3-TTS support." + "reason": "ModernVBERT is not the same underlying request as Qwen3-TTS support." }, { "left": "issue:42503", "right": "issue:42738", "accept": false, - "reason": "ModernVBERT models vs a general BERT-like RoPE proposal are not the same concrete request." + "reason": "ModernVBERT and BERT-like models with RoPE are only loosely related; not the same concrete change." }, { "left": "issue:42738", "right": "issue:45027", "accept": false, - "reason": "A BERT-like RoPE discussion is unrelated to Voxtral TTS support." + "reason": "BERT-like RoPE support and Voxtral TTS support are different changes." }, { "left": "issue:43671", "right": "issue:43837", "accept": false, - "reason": "Qwen3-TTS and Qwen3-ASR are different capabilities and code paths." + "reason": "TTS support and ASR support are separate model integrations." }, { "left": "issue:43671", "right": "issue:45027", "accept": false, - "reason": "Qwen3-TTS support vs Voxtral TTS support are different model integrations." + "reason": "Qwen3-TTS and Voxtral TTS are different models, so not duplicates." }, { "left": "issue:43671", "right": "issue:45177", "accept": false, - "reason": "Qwen3-TTS and DeepSeek-OCR2 target different modalities." + "reason": "Qwen3-TTS support is unrelated to DeepSeek-OCR2." }, { "left": "issue:43671", "right": "issue:44121", "accept": false, - "reason": "Qwen3-TTS support is unrelated to the sparse transformer request." + "reason": "TTS support is not the same as the weight-sparse transformer request." }, { "left": "issue:43671", "right": "issue:43909", "accept": false, - "reason": "Qwen3-TTS and LFM2.5 Audio are separate audio model requests." + "reason": "Qwen3-TTS and LFM2.5 Audio are different audio model requests." }, { "left": "issue:43837", "right": "issue:43909", "accept": false, - "reason": "Qwen3-ASR and LFM2.5 Audio are distinct speech-model additions." + "reason": "Qwen3-ASR and LFM2.5 Audio request different models and likely different code paths." }, { "left": "issue:43837", - "right": "issue:45322", - "accept": false, - "reason": "Qwen3-ASR is unrelated to EUPE (a perception encoder) addition." - }, - { - "left": "issue:43837", - "right": "issue:44121", - "accept": false, - "reason": "Qwen3-ASR support and weight-sparse transformer support are different concrete changes." - }, - { - "left": "issue:43909", "right": "issue:44205", "accept": false, - "reason": "LFM2.5 Audio and SAM3-LiteText are different model families and tasks." + "reason": "ASR support is unrelated to SAM3-LiteText." }, { "left": "issue:44121", "right": "issue:45322", "accept": false, - "reason": "Weight-sparse transformer support and EUPE are not the same model change." + "reason": "Sparse transformer request and EUPE perception encoder are different model additions." }, { "left": "issue:43837", "right": "issue:45027", "accept": false, - "reason": "Qwen3-ASR and Voxtral TTS are different speech models." + "reason": "Qwen3-ASR and Voxtral TTS are different modalities and models." }, { "left": "issue:43837", "right": "issue:45177", "accept": false, - "reason": "Qwen3-ASR and DeepSeek-OCR2 are different modalities and code paths." + "reason": "ASR support is unrelated to DeepSeek-OCR2." } ] }, @@ -14902,12 +14868,12 @@ "issue:43668|issue:44031", "issue:43668|issue:43813" ], - "prepared_review_unit_hash": "98166addb8cb62f96eb40c4d108e0420e8de09056ab6ca8ab88516a3549f4ece", + "prepared_review_unit_hash": "6658af031150217ced8fe72d37f47c3914a50e95ad48f4f35711c24160143df4", "component_budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6291, + "serialized_chars": 6292, "estimated_input_tokens": 1573, "estimated_eval_tokens": 3402 }, @@ -14915,7 +14881,7 @@ "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6291, + "serialized_chars": 6292, "estimated_input_tokens": 1573, "estimated_eval_tokens": 3402 }, @@ -14924,60 +14890,60 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is not a duplicate set: the issues cover unrelated areas (normalization semantics, Mask2Former loss kwargs, ModernBERT config typing, a typo in a quantization attribute, and a tokenizer regex warning). The shared similarity is mostly superficial/model-library phrasing, not the same underlying bug.", - "confidence": 0.93, - "canonical_issue_reason": "If one issue must serve as the anchor, issue 43668 is the clearest single-bug report: a concrete ModernBERTConfig type-hint defect with a narrow, actionable fix. The rest of the items are unrelated enough that there is no natural canonical duplicate.", + "summary": "All items are distinct issue reports touching different components and bug types; none look like duplicates of the same underlying defect.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out: the reports span unrelated areas (Llama4 norm behavior, Mask2Former loss handling, ModernBERT config typing, a quantization typo, and a tokenizer warning regression).", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43668 is the best representative issue because it is precise, self-contained, and clearly describes one code-path problem. It is also less ambiguous than the broader or more speculative reports in the cluster.", + "best_issue_reason": "No issue is a safe cluster representative because there is no shared bug or code path across these reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:37934", "right": "issue:43668", "accept": false, - "reason": "Different models and different defect types: one asks whether Llama4TextL2Norm should be RMSNorm, the other reports an incorrect ModernBERTConfig type hint." + "reason": "Different components and defects: Llama4 norm semantics vs ModernBERT config type-hint correctness." }, { "left": "issue:42936", "right": "issue:43240", "accept": false, - "reason": "Mask2Former ignore_value usage is a model-specific loss-parameter issue, while 43240 is a broader kwargs-not-forwarded bug; not the same underlying code path." + "reason": "Mask2Former ignore_value usage is unrelated to kwargs not being forwarded to a loss function." }, { "left": "issue:42936", "right": "issue:44031", "accept": false, - "reason": "Mask2Former loss handling and tokenizer regex warnings are unrelated subsystems and symptoms." + "reason": "A model loss-parameter bug and a tokenizer regex warning regression are separate code paths." }, { "left": "issue:42936", "right": "issue:43813", "accept": false, - "reason": "A missing ignore_value use in Mask2Former is unrelated to a typo in a quantization-operation attribute." + "reason": "Unrelated issues: unused ignore_value in Mask2Former vs a typo in a quantization attribute name." }, { "left": "issue:42936", "right": "issue:43668", "accept": false, - "reason": "These concern different models and different bug classes: unused loss parameter versus config type annotation." + "reason": "Different bug classes and modules: loss handling vs a config annotation issue." }, { "left": "issue:43240", "right": "issue:43668", "accept": false, - "reason": "kwargs not reaching a loss function is not the same problem as an incorrect ModernBERTConfig type hint." + "reason": "Forwarding kwargs to loss code is unrelated to correcting a type hint on norm_eps." }, { "left": "issue:43668", "right": "issue:44031", "accept": false, - "reason": "A config type-hint mistake and tokenizer regex warning behavior do not share a concrete code-path or fix." + "reason": "ModernBERT config typing and tokenizer regex warnings do not share the same underlying bug." }, { "left": "issue:43668", "right": "issue:43813", "accept": false, - "reason": "These are distinct issues: one is a ModernBERT config typing problem, the other a literal typo in a different quantization attribute." + "reason": "A config type-hint mistake is not the same as a typo in a quantization operation name." } ] }, @@ -15007,7 +14973,7 @@ "soft_pairs": [ "issue:41084|issue:43037" ], - "prepared_review_unit_hash": "48c3c14d20d2970e6656506d03e9dbb40c57c43de89e9d838d8746aaa8e06bbf", + "prepared_review_unit_hash": "d059631273127e4ccb067ad6053f73242467806506ceaa74dcfb02c3fea8d0ed", "component_budget": { "node_count": 2, "item_count": 2, @@ -15029,18 +14995,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two issues are not duplicates: one is a decoding/runtime behavior request, the other is a model support request for DeepSeek v3.2. They share only weak topical similarity.", - "confidence": 0.97, + "summary": "These two issues do not appear to describe the same underlying problem: one is about block decoding behavior, the other is a feature/support request for DeepSeek v3.2. No duplicate relationship is supported by the titles or available metadata.", + "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Issue #43037 is the more concrete and actionable request (specific model support) and is the better representative of the cluster if one must be chosen, though it is not a duplicate of #41084.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41084", "right": "issue:43037", "accept": false, - "reason": "Different underlying problems: 'Set Block Decoding' concerns decoding behavior, while 'DeepSeek v3.2 support' is a model compatibility/support request. No evidence they describe the same bug or change." + "reason": "Different problem domains: decoding behavior vs model support request. The shared evidence is only weak title similarity, not the same bug or change." } ] }, @@ -15070,12 +15036,12 @@ "soft_pairs": [ "issue:43598|issue:43600" ], - "prepared_review_unit_hash": "a8d1844885ea6751f9eb77c9d77737177ff561c42062f8495a36f2fc46bd9bf0", + "prepared_review_unit_hash": "b7a0cd17bdc0eca6d60c1fa68df1b187f0c5ee5e9c6acf07c69ddceaffa75237", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1584, + "serialized_chars": 1583, "estimated_input_tokens": 396, "estimated_eval_tokens": 1048 }, @@ -15083,7 +15049,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1584, + "serialized_chars": 1583, "estimated_input_tokens": 396, "estimated_eval_tokens": 1048 }, @@ -15092,18 +15058,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related Trainer-customization issues, but they describe different problems: one is about `remove_unused_column` behavior, the other about FLOPS estimation for custom trainers. They should not be merged as duplicates.", - "confidence": 0.93, - "canonical_issue_reason": "Issue #43598 is a clearer concrete product bug/behavior question centered on a specific Trainer setting (`remove_unused_column`) and customization impact.", + "summary": "The two issues are related to Trainer customization, but they describe different problems: one is about `remove_unused_column` behavior, the other about FLOPs estimation for custom trainers. They do not look like duplicates.", + "confidence": 0.96, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "#43598 is the better representative because it is narrower and more actionable; #43600 is adjacent but addresses a different Trainer metric path.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43598", "right": "issue:43600", "accept": false, - "reason": "They share the broad theme of custom trainers, but the underlying issues differ: column pruning/customizability versus FLOPS estimation. No evidence they would be fixed by the same change." + "reason": "Different underlying concerns: one targets input-column pruning/customizability in Trainer, the other targets FLOPs counting for custom trainer classes. Shared theme is too broad to treat as the same bug." } ] }, @@ -15133,7 +15099,7 @@ "soft_pairs": [ "issue:43684|issue:44405" ], - "prepared_review_unit_hash": "30fdcb8b399d5b838e8c66c24c6f5657a65aaae4bf4b9a01e65e13971d54706c", + "prepared_review_unit_hash": "73af4585b6689c78e3d0fea10f3248f9f604e095ae640e3e273868974fdd2173", "component_budget": { "node_count": 2, "item_count": 2, @@ -15155,18 +15121,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related only at a broad Qwen/AutoModel level, but they describe different feature requests for different model/config paths, so they are not duplicates.", + "summary": "These are not duplicates: one requests registration for Qwen3-Omni in AutoModel/AutoModelForConditionalGeneration, while the other requests AutoModelForSequenceClassification support for Qwen3.5 (Qwen3_5Config). They concern different model families and different auto-class capabilities.", "confidence": 0.95, - "canonical_issue_reason": null, + "canonical_issue_reason": "No single canonical issue: the two tickets ask for different model support in different auto classes, so they should remain separate.", "canonical_pr_reason": null, - "best_issue_reason": "No strong canonical issue here; if forced to pick a representative, #43684 is the broader AutoModel registration request, while #44405 is a narrower task-specific support request for a different config.", + "best_issue_reason": "Neither issue is a duplicate of the other; if a representative is needed, issue 43684 is a clearer standalone registration request, but it should not absorb 44405.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43684", "right": "issue:44405", "accept": false, - "reason": "Reject: #43684 requests registering Qwen3-Omni with AutoModel/AutoModelForConditionalGeneration, while #44405 requests AutoModelForSequenceClassification support for Qwen3.5 (Qwen3_5Config). Different model families and different heads/code paths, so they are not the same underlying bug or change." + "reason": "Different model families and different API targets (registration for Qwen3-Omni vs sequence classification support for Qwen3.5); not the same underlying bug or change." } ] }, @@ -15203,70 +15169,70 @@ "issue:43845|issue:43859", "issue:43846|issue:43859" ], - "prepared_review_unit_hash": "766204b068f433a5383d9a9c73edd38a11261436dc67196c9b84c88261b7847b", + "prepared_review_unit_hash": "c1db08501838b8d1b61ae45ba0fc7cd912b92f4706cb0275ccd9107783903653", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4266, - "estimated_input_tokens": 1067, - "estimated_eval_tokens": 2390 + "serialized_chars": 4262, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4266, - "estimated_input_tokens": 1067, - "estimated_eval_tokens": 2390 + "serialized_chars": 4262, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "One issue ('Fett') is unrelated; the other three are bare 'huggingface ' issues with almost no detail. They look duplicate-like, but there is not enough concrete information to safely merge any pair.", - "confidence": 0.72, - "canonical_issue_reason": "issue:43845 is the earliest of the near-identical 'huggingface ' issues, so it is the best placeholder canonical issue among the duplicates.", + "summary": "The cluster contains one unrelated-looking issue ('Fett') and three near-identical placeholder issues titled 'huggingface '. With no bodies, targets, or file references, the safest read is that the huggingface items are the only plausible duplicates, but there is not enough concrete evidence to merge any pair with confidence.", + "confidence": 0.78, + "canonical_issue_reason": "Issue 43845 is the earliest of the three identical 'huggingface ' reports and is the most reasonable representative for that likely duplicate cluster; 43835 has a different title and looks unrelated.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43845 is the best overall issue candidate because it is the earliest and has the same sparse 'huggingface ' title as the other likely-duplicate issues.", + "best_issue_reason": "43845 is the best issue to keep as the cluster representative because it is the earliest of the matching-title issues and has no evidence of being distinct from 43846/43859.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43835", "right": "issue:43845", "accept": false, - "reason": "Different titles ('Fett' vs 'huggingface '); no body or metadata shows the same underlying bug." + "reason": "Different titles and no body/detail evidence; this does not look like the same underlying issue." }, { "left": "issue:43835", "right": "issue:43846", "accept": false, - "reason": "Different titles and no substantive details; cannot infer the same issue." + "reason": "Different titles and no substantive evidence of a shared bug or report." }, { "left": "issue:43835", "right": "issue:43859", "accept": false, - "reason": "Different titles and no concrete description to connect them." + "reason": "Different titles and no substantive evidence of a shared bug or report." }, { "left": "issue:43845", "right": "issue:43846", "accept": false, - "reason": "Titles match, but both issues are too empty to confirm the same underlying bug." + "reason": "The titles match, but there is no body or target evidence to confirm they are the same underlying issue; too little information to merge." }, { "left": "issue:43845", "right": "issue:43859", "accept": false, - "reason": "Same broad title, but no concrete problem statement or evidence to safely merge." + "reason": "The titles match, but the available metadata is too thin to safely conclude these are the same issue." }, { "left": "issue:43846", "right": "issue:43859", "accept": false, - "reason": "Same broad title, but the issues lack enough detail to establish they are duplicates." + "reason": "The titles match, but there is not enough concrete evidence to confirm a true duplicate." } ] }, @@ -15307,95 +15273,98 @@ "issue:44572|issue:44937", "issue:44573|issue:44937" ], - "prepared_review_unit_hash": "0fc50ec049d7ab61d4bc110913ef2421c36d9ecd7c4aac97953090990e870718", + "prepared_review_unit_hash": "d0f082238dff6040eb2e79771ae6ddb9525cb49e2ac8c779a90db1f9be16c8c7", "component_budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 9, - "serialized_chars": 5794, - "estimated_input_tokens": 1449, - "estimated_eval_tokens": 3154 + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 }, "budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 9, - "serialized_chars": 5794, - "estimated_input_tokens": 1449, - "estimated_eval_tokens": 3154 + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are low-content, mostly spam-like closed issues with generic or unrelated titles. There is no evidence that any pair describes the same underlying bug or change, so none of the soft edges should be merged.", - "confidence": 0.97, - "canonical_issue_reason": null, + "summary": "The three \"\" issues form a likely duplicate-spam subgroup, but the packet only supports high similarity rather than exact duplication. The other two issues have different titles and do not look like the same underlying report.", + "confidence": 0.8, + "canonical_issue_reason": "issue:44371 is the earliest of the highly similar \"\" issues and is a reasonable representative of that likely duplicate subgroup.", "canonical_pr_reason": null, - "best_issue_reason": "No issue has enough substantive detail to serve as a reliable canonical duplicate target; the cluster appears unrelated/spammy.", + "best_issue_reason": "issue:44371 is the best representative for the likely duplicate spam subgroup because it is earliest and matches the other two \"\" issues in the limited metadata shown.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44371", "right": "issue:44572", - "accept": false, - "reason": "Both are just '' with no body or target; identical spam text is not enough to establish the same underlying bug/change." + "accept": true, + "reason": "Same \"\" title and identical minimal metadata; likely duplicate spam reports, though the body text is not visible here." }, { "left": "issue:44371", "right": "issue:44573", - "accept": false, - "reason": "Same as above: identical spam titles, but no substantive evidence of a shared issue." + "accept": true, + "reason": "Same \"\" title and identical minimal metadata; likely duplicate spam reports, though the body text is not visible here." }, { "left": "issue:44371", "right": "issue:44418", "accept": false, - "reason": "One is '' and the other is a generic documentation suggestion; titles/body do not indicate the same problem." + "reason": "Different title and no evidence that the documentation suggestion is the same report as the spam issue." }, { "left": "issue:44371", "right": "issue:44937", "accept": false, - "reason": "'' and 'Check out \"Google LLC\"' are unrelated and lack any shared bug/change context." + "reason": "Different title and no indication it is the same underlying issue as the \"\" report." }, { "left": "issue:44418", "right": "issue:44572", "accept": false, - "reason": "A documentation enhancement suggestion is not the same underlying issue as a spam placeholder." + "reason": "Documentation enhancement and \"\" are unrelated on the face of the packet." }, { "left": "issue:44418", "right": "issue:44573", "accept": false, - "reason": "No shared concrete problem: generic documentation suggestion versus spam placeholder." + "reason": "Documentation enhancement and \"\" are unrelated on the face of the packet." }, { "left": "issue:44572", "right": "issue:44573", - "accept": false, - "reason": "Both are titled '', but there is no substantive description to confirm they are the same underlying issue." + "accept": true, + "reason": "Same \"\" title and identical minimal metadata; likely duplicate spam reports, though the body text is not visible here." }, { "left": "issue:44572", "right": "issue:44937", "accept": false, - "reason": "Spam placeholder and unrelated named title; no evidence of a shared bug or change." + "reason": "Different title and no evidence that it is the same report as the spam issue." }, { "left": "issue:44573", "right": "issue:44937", "accept": false, - "reason": "Spam placeholder and unrelated named title; not the same underlying issue." + "reason": "Different title and no evidence that it is the same report as the spam issue." } ] }, - "evaluator_result": null, - "evaluator_used": false, - "retried": false, - "accepted_nontrivial_soft_edge": false, + "evaluator_result": { + "accept": false, + "feedback": "The grouping is directionally plausible, but the writeup overstates the evidence. The packet only shows identical titles and equal body lengths for the three \"\" items; it does not expose the actual body text or any explicit linkage. Calling them \"exact duplicates\" and \"the same spam report\" is stronger than what the packet supports. The safer wording would be that they appear highly similar or likely duplicate spam reports, while the other two have different titles and are not clearly the same issue." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -15418,7 +15387,7 @@ "soft_pairs": [ "issue:45145|issue:45151" ], - "prepared_review_unit_hash": "9d6af88b5d36fb18d0e0fa36a489105390d020310d5255cae224e6c6c451bfe6", + "prepared_review_unit_hash": "c28c5ff211d3f984c8e638f1585ac868965da7104519ccaf3857237af2c87eff", "component_budget": { "node_count": 2, "item_count": 2, @@ -15440,28 +15409,28 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two closed issues are likely near-duplicates based on their identical titles and similar metadata, but the packet does not include body text or any stronger linkage, so exact duplication cannot be confirmed from the available evidence alone.", - "confidence": 0.85, - "canonical_issue_reason": "Issue #45145 is the better canonical issue because it was opened first and has an inbound reference, so it is the more established thread.", + "summary": "These two closed issues look like duplicates of the same request: they have identical titles and very similar body content, with no visible evidence of distinct underlying work.", + "confidence": 0.84, + "canonical_issue_reason": "#45145 is the better canonical issue because it shares the same title as #45151 and has slightly more external attention (1 inbound reference vs. 0).", "canonical_pr_reason": null, - "best_issue_reason": "Issue #45145 is also the best overall issue candidate for the same reasons: earliest timestamp and slightly stronger reference history.", + "best_issue_reason": "#45145 is the best issue representative for this cluster for the same reason: same request, plus marginally more linkage.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45145", "right": "issue:45151", - "accept": false, - "reason": "The titles are identical and the text similarity is strong, but the packet does not show the issue bodies or any explicit linkage, so this is better treated as a likely near-duplicate rather than a confirmed duplicate." + "accept": true, + "reason": "Same title and strongly similar content suggest the same underlying issue/request; this is a reasonable duplicate match, though the evidence is only soft." } ] }, "evaluator_result": { "accept": false, - "feedback": "The pair looks plausibly related, but the writeup overstates the evidence. The packet only shows identical titles and soft similarity; it does not establish that the bodies have the same wording or that the issues are exact duplicates. The soft-edge accept is therefore too strong, and the summary should be toned down to a cautious near-duplicate/likely duplicate judgment grounded only in the available metadata." + "feedback": "The packet supports strong similarity, but the analyst is a bit too confident for a soft match: only soft_similarity is present, and there\u2019s no deterministic linkage or other distinguishing evidence. Phrasing like \u201ceffectively identical\u201d and the 0.98 confidence overstate what the packet proves. " }, "evaluator_used": true, "retried": true, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -15538,7 +15507,7 @@ "soft_pairs": [ "issue:45672|issue:45674" ], - "prepared_review_unit_hash": "ff702842e9a59eb4704f5816e91300fe7aeae97ef96beb6f0d42857353d0252c", + "prepared_review_unit_hash": "d935e3c283a5e01ec84b71f69e1cb43ee80da1f65e7475f7356fe9c0e58a2005", "component_budget": { "node_count": 2, "item_count": 2, @@ -15560,18 +15529,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two issues are both in the 4-bit/quantization area, but they describe different failures and likely different code paths: one is a Gemma4 audio-module uint8/torch.finfo TypeError, the other is a BitsAndBytesConfig regression where llm_int8_skip_modules drops the default lm_head exclusion and triggers an AssertionError.", - "confidence": 0.95, - "canonical_issue_reason": "issue #45674 is the better representative because it describes a broader, core BitsAndBytesConfig regression with a clearer configuration-triggered repro path.", + "summary": "These two issues both concern 4-bit inference, but they describe different failures and code paths: one is a Gemma4/audio-module `torch.finfo()` TypeError on `uint8` weights, the other is a BitsAndBytesConfig regression where `llm_int8_skip_modules` drops the default `lm_head` exclusion and triggers an AssertionError. They should not be clustered as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "Issue #45674 is the better canonical issue because it describes a concrete, likely upstream configuration bug with a clear mechanism (`llm_int8_skip_modules` clearing the default exclusion) and broader applicability than the Gemma4-specific symptom.", "canonical_pr_reason": null, - "best_issue_reason": "issue #45674 is the most generally useful issue to keep as the cluster representative; it is more central to the shared 4-bit inference stack than the Gemma4-specific audio-module crash.", + "best_issue_reason": "Issue #45674 is the strongest representative for the cluster: it has the clearer root-cause description and is not tied to a single model family or module layout.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45672", "right": "issue:45674", "accept": false, - "reason": "Do not merge: the bugs are different. #45672 is a Gemma4/audio-module uint8 dtype error during NF4 inference, while #45674 is a BitsAndBytesConfig lm_head exclusion regression causing an AssertionError. Same broad area, but not the same underlying defect." + "reason": "Different underlying bugs: Gemma4 audio-module `torch.finfo()` on `uint8` weights vs. BitsAndBytesConfig `lm_head` exclusion being cleared and causing an AssertionError. Same broad area, but not the same fix." } ] }, @@ -15585,7 +15554,7 @@ { "label": "pull_request", "component_index": 1, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15628,148 +15597,148 @@ "pull_request:43823|pull_request:45213", "pull_request:43251|pull_request:43254" ], - "prepared_review_unit_hash": "732503934ff0f9b9287be40d7a05351e3ecf6d58c228794510837d307c9d4fbb", + "prepared_review_unit_hash": "89399897cfe3c9dc2ab8eac4f5cb32919fa3032c844f5935fe36fe6b7f032a01", "component_budget": { "node_count": 12, "item_count": 12, "soft_pair_count": 18, - "serialized_chars": 32872, - "estimated_input_tokens": 8218, - "estimated_eval_tokens": 16692 + "serialized_chars": 32873, + "estimated_input_tokens": 8219, + "estimated_eval_tokens": 16694 }, "budget": { "node_count": 12, "item_count": 12, "soft_pair_count": 18, - "serialized_chars": 32872, - "estimated_input_tokens": 8218, - "estimated_eval_tokens": 16692 + "serialized_chars": 32873, + "estimated_input_tokens": 8219, + "estimated_eval_tokens": 16694 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The cluster is mostly a loose similarity bucket of unrelated PRs; the only true duplicate pair is the two `fixed_cross_entropy` changes (43251 and 43254).", - "confidence": 0.79, + "summary": "This cluster is mostly a loose collection of unrelated PRs (model additions, refactors, docs, and tests). The only true duplicate-style pair is the `fixed_cross_entropy` kwargs fix: PRs 43251 and 43254 address the same code path and are plausibly mergeable into one change.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "If a PR must represent the cluster, 43251 is the best canonical representative of the only real duplicate pair because it is the cleaner, more complete fix for issue 43240; 43254 is an alternate minimal patch for the same code path.", + "canonical_pr_reason": "PR 43251 is the best canonical representative of the only real duplicate pair: it has the explicit issue link, a clear `fixed_cross_entropy` kwargs pass-through change, and a slightly fuller implementation context.", "best_issue_reason": null, - "best_pr_reason": "43251 is the strongest representative because it directly addresses the tracked bug, adds the supported kwargs to `cross_entropy`, and most clearly subsumes 43254.", + "best_pr_reason": "PR 43251 is the strongest representative of the duplicate pair because it directly fixes the same `loss_utils.fixed_cross_entropy` path as 43254 and is the more explicit, issue-linked version of the change.", "soft_edge_verdicts": [ { "left": "pull_request:39895", "right": "pull_request:43424", "accept": false, - "reason": "Different features in different areas: adding VideoPrism model support vs adding an ExecuTorch dynamic-shape test." + "reason": "Videoprism model/docs addition and an executorch dynamic-shapes test are unrelated changes." }, { "left": "pull_request:41855", "right": "pull_request:43823", "accept": false, - "reason": "Unrelated model areas: Mistral tokenizer methods vs adding MobileLLM model code." + "reason": "A tokenizer method addition for Mistral common is unrelated to adding the MobileLLM model." }, { "left": "pull_request:41855", "right": "pull_request:43424", "accept": false, - "reason": "Tokenizer utility fix vs export-test addition; not the same underlying bug or change." + "reason": "Tokenizer API work and executorch export tests touch different code paths." }, { "left": "pull_request:42668", "right": "pull_request:43823", "accept": false, - "reason": "Both are processor/model plumbing, but they target different models and different behaviors, so not duplicates." + "reason": "Processor auto-loading robustness changes are unrelated to the MobileLLM model addition." }, { "left": "pull_request:43251", "right": "pull_request:43823", "accept": false, - "reason": "Cross-entropy loss kwargs fix is unrelated to adding a new MobileLLM model." + "reason": "Loss-function kwargs plumbing is unrelated to adding a new model implementation." }, { "left": "pull_request:43251", "right": "pull_request:44827", "accept": false, - "reason": "Loss helper change vs Mistral4 test cleanup/fixes; different code paths and purposes." + "reason": "The cross-entropy kwargs fix and Mistral4 test/model changes are separate bugs/changes." }, { "left": "pull_request:43424", "right": "pull_request:44827", "accept": false, - "reason": "ExecuTorch dynamic-shape test and Mistral4 test fixes are not the same change." + "reason": "Executorch dynamic-shape export tests are unrelated to Mistral4 test fixes." }, { "left": "pull_request:43424", "right": "pull_request:43823", "accept": false, - "reason": "Export testing for decoder-only LM vs new model implementation; unrelated." + "reason": "Dynamic-shape export testing and MobileLLM model addition are unrelated." }, { "left": "pull_request:43823", "right": "pull_request:45655", "accept": false, - "reason": "New model addition vs core class-attribute resolution fix; completely different underlying changes." + "reason": "A new model addition and a config-resolution order fix are unrelated." }, { "left": "pull_request:43823", "right": "pull_request:43995", "accept": false, - "reason": "MobileLLM model addition is unrelated to Falcon output-interface refactoring." + "reason": "MobileLLM model addition and Falcon output-interface refactoring are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "Adding MobileLLM and fixing Mistral4 tests are separate model efforts." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "Model addition and the model-creation skill docs are not the same change." }, { "left": "pull_request:43995", "right": "pull_request:45213", "accept": false, - "reason": "Falcon refactor and model-creation skill are not the same bug or mergeable code change." + "reason": "Falcon refactoring and a new-model skill document are unrelated." }, { "left": "pull_request:43995", "right": "pull_request:44827", "accept": false, - "reason": "Falcon refactoring and Mistral4 test fixes affect different models and code paths." + "reason": "Falcon interface refactoring and Mistral4 test fixes do not address the same bug." }, { "left": "pull_request:44827", "right": "pull_request:45170", "accept": false, - "reason": "Mistral4 test fixes vs a CLIP-like `layrnorm` renaming change; unrelated." + "reason": "Mistral4 test/model updates and CLIP-like `layrnorm` renaming are unrelated." }, { "left": "pull_request:44827", "right": "pull_request:45655", "accept": false, - "reason": "Model-specific test fixes do not match the global config-resolution fix." + "reason": "Mistral4 test/model changes and config-resolution ordering are unrelated." }, { "left": "pull_request:43823", "right": "pull_request:44827", "accept": false, - "reason": "New MobileLLM model code and Mistral4 test fixes are different artifacts with no shared bug." - }, - { - "left": "pull_request:44827", - "right": "pull_request:45213", - "accept": false, - "reason": "Mistral4 test fixes and the model-creation skill are unrelated; not a duplicate pair." - }, - { - "left": "pull_request:43823", - "right": "pull_request:45213", - "accept": false, - "reason": "Adding MobileLLM and adding an AI skill for model creation are separate changes." + "reason": "Adding MobileLLM and fixing Mistral4 tests are different model-specific changes." }, { "left": "pull_request:43251", "right": "pull_request:43254", "accept": true, - "reason": "Same underlying bug in `fixed_cross_entropy`, same file, same tracked issue 43240, and both patches implement the same kwargs pass-through to `nn.functional.cross_entropy`." + "reason": "Both PRs patch `loss_utils.fixed_cross_entropy` to forward supported `cross_entropy` kwargs for the same issue; they target the same code path and are mergeable into one fix." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is well grounded in the packet, and the soft-edge verdicts are conservative. The only clearly supported duplicate pair is 43251/43254: same file, same issue target (43240), and both patch `fixed_cross_entropy` with the same kwargs-forwarding intent. The rest are reasonably described as unrelated or only loosely similar." + "feedback": "The summary is grounded in the packet and stays conservative overall. The lone accepted pair, 43251/43254, is supported by the shared `loss_utils.fixed_cross_entropy` edit and common issue target 43240, and the other pairs are reasonably described as unrelated based on their filenames/diffs. No obvious overstatement beyond the evidence in the packet." }, "evaluator_used": true, "retried": false, @@ -15780,7 +15749,7 @@ { "label": "pull_request", "component_index": 2, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15796,7 +15765,7 @@ "soft_pairs": [ "pull_request:40546|pull_request:42781" ], - "prepared_review_unit_hash": "14e1014f41ab9eb6f72c646db91ed65b03c2760e8171872932721daadd258e7d", + "prepared_review_unit_hash": "0e326ed65acd29634b8f23ef19080e471471d8101d5e8a767ca63fa5cce0320e", "component_budget": { "node_count": 2, "item_count": 2, @@ -15818,18 +15787,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both pull requests are about VibeVoice, but they are not the same change: PR 40546 is the initial VibeVoice implementation, while PR 42781 adds a separate VibeVoice Realtime/acoustic-tokenizer stack and touches some shared integration files as part of that extension.", - "confidence": 0.86, + "summary": "These PRs overlap on the VibeVoice family and shared plumbing, but they target different changes: one adds the base VibeVoice model, the other adds VibeVoice Realtime plus an acoustic tokenizer and generation changes. They are related, not duplicates.", + "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 42781 is the broader, newer VibeVoice-related change and appears to subsume the base model docs/integration while adding realtime-specific components, so it is the better representative PR for the cluster.", + "canonical_pr_reason": "PR 40546 is the cleaner canonical representative for the core VibeVoice implementation: it introduces the base model, docs, and auto-mapping integration. PR 42781 is a distinct follow-up feature focused on realtime support and the acoustic tokenizer.", "best_issue_reason": null, - "best_pr_reason": "PR 42781 is the most complete artifact in this cluster and reflects the later, more expansive VibeVoice work; PR 40546 is an earlier foundational implementation rather than the same concrete change.", + "best_pr_reason": "PR 40546 is the best single representative of the underlying core feature because it covers the foundational VibeVoice integration. PR 42781 is a separate extension rather than the same change set.", "soft_edge_verdicts": [ { "left": "pull_request:40546", "right": "pull_request:42781", "accept": false, - "reason": "Reject: these look like related but distinct PRs. 40546 implements VibeVoice itself, whereas 42781 adds VibeVoice Realtime plus an acoustic tokenizer and related generation/processing updates. Shared filenames alone are not enough to treat them as duplicates." + "reason": "Do not merge: 40546 implements the base VibeVoice model, while 42781 adds VibeVoice Realtime and an acoustic tokenizer. Shared files reflect common infrastructure, but the concrete feature scope differs and they would not plausibly be one PR." } ] }, @@ -15843,7 +15812,7 @@ { "label": "pull_request", "component_index": 3, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15859,12 +15828,12 @@ "soft_pairs": [ "pull_request:40755|pull_request:41224" ], - "prepared_review_unit_hash": "460a1864c2ffa2a4b74279a87fe8442e98559a726e9a6343be11827c5a47e4c1", + "prepared_review_unit_hash": "90b3197db5b5e65697c0274d8104792e8877b24c2867c21ce3f0c565e7501dc1", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4676, + "serialized_chars": 4675, "estimated_input_tokens": 1169, "estimated_eval_tokens": 2594 }, @@ -15872,7 +15841,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4676, + "serialized_chars": 4675, "estimated_input_tokens": 1169, "estimated_eval_tokens": 2594 }, @@ -15881,18 +15850,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two PRs are unrelated feature additions in different model families: TimesFM covariate forecasting vs. DINOv3 image-classification support.", + "summary": "The two pull requests are unrelated: one adds TimesFM covariate forecasting, the other adds DINOv3 image classification support. They touch different models, different code paths, and different documentation/tests.", "confidence": 0.99, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: they target different codepaths and different models, so they are not duplicates.", + "canonical_pr_reason": "PR 41224 is the clearer standalone feature PR, with a focused DINOv3 image-classification scope and substantial review activity.", "best_issue_reason": null, - "best_pr_reason": "If a single representative PR is needed, 41224 is the more self-contained feature addition with an explicit model-support path and tests; 40755 is a separate, unrelated feature.", + "best_pr_reason": "PR 41224 is the best representative of the cluster only in the sense that it is more self-contained and better-scoped; it is not a duplicate of PR 40755.", "soft_edge_verdicts": [ { "left": "pull_request:40755", "right": "pull_request:41224", "accept": false, - "reason": "Different model families and different changesets; they do not fix the same bug or implement the same feature, so they should not be merged as duplicates." + "reason": "Different features for different model families (TimesFM covariates vs DINOv3 image classification); no shared bug or mergeable code-path change." } ] }, @@ -15906,7 +15875,7 @@ { "label": "pull_request", "component_index": 4, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15969,7 +15938,7 @@ { "label": "pull_request", "component_index": 5, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15985,7 +15954,7 @@ "soft_pairs": [ "pull_request:41797|pull_request:45186" ], - "prepared_review_unit_hash": "7c5868338bc92203958bdf57e7276f96fc5d51ef6aaaebd352052b120c6c9661", + "prepared_review_unit_hash": "bd95b45c458a92b821abe01f81e0db7a79f0363b9f20801d8fec555ef94e95dc", "component_budget": { "node_count": 2, "item_count": 2, @@ -16007,18 +15976,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both PRs are new-model onboarding changes, but they add different models and different code paths: DeepSeekOCR vs Isaac. The overlap is mostly repository scaffolding and shared auto-registration files, not the same underlying feature or bug fix.", + "summary": "These are two separate model-addition pull requests for different code paths (DeepSeekOCR vs Isaac). The overlap in boilerplate auto-registration/docs files is incidental, not evidence of a duplicate change.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR 41797 is the stronger representative of the cluster by review activity and overall maturity, but it is a DeepSeekOCR integration and not the same change as Isaac.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "PR 41797 is the best standalone PR in the cluster on completeness/review signal, but it should not be conflated with PR 45186 because they implement different models.", "soft_edge_verdicts": [ { "left": "pull_request:41797", "right": "pull_request:45186", "accept": false, - "reason": "Different model additions with distinct configs, processors, and model code. Shared framework files are expected for any new model; this is not the same concrete change and would not be merged as one PR." + "reason": "Reject: these are different model integrations with different filenames, titles, and implementation targets. Shared auto/registry/doc scaffolding is expected and does not indicate the same underlying change." } ] }, @@ -16032,7 +16001,7 @@ { "label": "pull_request", "component_index": 6, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16048,7 +16017,7 @@ "soft_pairs": [ "pull_request:42975|pull_request:42976" ], - "prepared_review_unit_hash": "98239013c9cb2bed2a1fa08096b03e7d40dc8ede89b7ef83d0e0da3b9b0a67f1", + "prepared_review_unit_hash": "35e5b2fdc40d7c9f05e53aaa501aaf19073439ab9907d89d10fa14d979dff577", "component_budget": { "node_count": 2, "item_count": 2, @@ -16070,18 +16039,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two open PRs both update GitHub Actions workflow dependencies, but they target different action families and different upgrade goals. They overlap in workflow files, yet they are not the same concrete change and should not be merged as duplicates.", - "confidence": 0.94, + "summary": "Two open PRs both update GitHub Actions, but they target different action families and different maintenance goals: one is a Node 24 compatibility sweep for core workflow actions, the other bumps docker/build-push-action and a few related workflow dependencies. They are related in theme but not the same underlying change, so the soft duplicate edge should be rejected.", + "confidence": 0.89, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #42975 is the broader, more specific workflow-maintenance change: it upgrades core GitHub Actions used across many workflows for Node 24 compatibility, which makes it the better representative of this cluster.", + "canonical_pr_reason": "pull_request:42975 is the better canonical representative because it is the more central, broader workflow-actions compatibility upgrade and affects many common CI workflows.", "best_issue_reason": null, - "best_pr_reason": "PR #42975 is the best representative because its scope is clearer and broader, while #42976 is a narrower docker/build-push-action version bump with a different purpose.", + "best_pr_reason": "pull_request:42975 is the best overall representative of the cluster: it has the clearest single migration goal (Node 24-compatible GitHub Actions) and the wider cross-workflow impact.", "soft_edge_verdicts": [ { "left": "pull_request:42975", "right": "pull_request:42976", "accept": false, - "reason": "Both are workflow dependency updates, but #42975 upgrades actions/* for Node 24 compatibility while #42976 upgrades docker/build-push-action and related release workflows. They touch overlapping files without fixing the same underlying problem." + "reason": "They both update workflow actions, but 42975 upgrades core actions/checkout, cache, and upload-artifact for Node 24 compatibility, while 42976 bumps docker/build-push-action versions in docker/image workflows. Different action ecosystems and different concrete changes, so not the same PR." } ] }, @@ -16095,7 +16064,7 @@ { "label": "pull_request", "component_index": 7, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16103,14 +16072,12 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-42978-7", + "cluster_id": "cluster-42978-5", "nodes": [ "pull_request:42978", "pull_request:43448", "pull_request:43451", - "pull_request:44601", "pull_request:45490", - "pull_request:45626", "pull_request:45630" ], "soft_pairs": [ @@ -16120,99 +16087,78 @@ "pull_request:43448|pull_request:43451", "pull_request:43448|pull_request:45490", "pull_request:43451|pull_request:45490", - "pull_request:44601|pull_request:45626", - "pull_request:45490|pull_request:45626", - "pull_request:45490|pull_request:45630", - "pull_request:45626|pull_request:45630" + "pull_request:45490|pull_request:45630" ], - "prepared_review_unit_hash": "35bff1064e07742cdc192f3ac01a0c9f4c14506ced14c8b1c9d8b8a6a968dd7c", + "prepared_review_unit_hash": "bc78d513262d493a9a34744ebf0ff31c03f8bd10ecd28f2436312014ed33b058", "component_budget": { - "node_count": 7, - "item_count": 7, - "soft_pair_count": 10, - "serialized_chars": 23982, - "estimated_input_tokens": 5996, - "estimated_eval_tokens": 12248 + "node_count": 5, + "item_count": 5, + "soft_pair_count": 7, + "serialized_chars": 17625, + "estimated_input_tokens": 4407, + "estimated_eval_tokens": 9070 }, "budget": { - "node_count": 7, - "item_count": 7, - "soft_pair_count": 10, - "serialized_chars": 23982, - "estimated_input_tokens": 5996, - "estimated_eval_tokens": 12248 + "node_count": 5, + "item_count": 5, + "soft_pair_count": 7, + "serialized_chars": 17625, + "estimated_input_tokens": 4407, + "estimated_eval_tokens": 9070 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are not duplicates: most are separate model-addition PRs for different models, and one PR is a distributed pipeline-parallel feature. They share some auto/doc filenames, but the underlying changes are distinct.", - "confidence": 0.97, + "summary": "All five PRs are separate model-addition changes, not duplicates. The only overlap is the usual Transformers boilerplate (docs, auto mappings, tests), but each PR targets a different model family and codepath.", + "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: the cluster mixes unrelated model integrations and an infrastructure feature, so no single PR subsumes the rest.", + "canonical_pr_reason": "No canonical PR: ViT NEPA, Molmo, Molmo2, CTSM, and Kimi2-6 are distinct model additions with different configs/modeling files and separate targets.", "best_issue_reason": null, - "best_pr_reason": "No strong global best PR exists; if forced, 45626 is the broadest model-support PR, but it still does not represent the other model families or the PP infrastructure change.", + "best_pr_reason": "No single best PR for deduplication; each PR is a standalone model integration rather than a duplicate of another.", "soft_edge_verdicts": [ { "left": "pull_request:42978", "right": "pull_request:43448", "accept": false, - "reason": "Different model additions: ViT NEPA vs Molmo. Shared docs/auto wiring is generic, not the same change." + "reason": "ViT NEPA vs Molmo are unrelated model additions; shared auto/docs boilerplate is not the same change." }, { "left": "pull_request:42978", "right": "pull_request:43451", "accept": false, - "reason": "Different model additions: ViT NEPA vs Molmo2. No shared code-path or bugfix." + "reason": "ViT NEPA and Molmo2 add different models with different implementations and artifacts." }, { "left": "pull_request:42978", "right": "pull_request:45490", "accept": false, - "reason": "Different model families: ViT NEPA vs CTSM. Similar repo plumbing only." + "reason": "ViT NEPA and CTSM are separate model integrations, not one concrete bugfix/change." }, { "left": "pull_request:43448", "right": "pull_request:43451", "accept": false, - "reason": "Molmo and Molmo2 are distinct model releases; not the same concrete implementation change." + "reason": "Molmo and Molmo2 are different model families/releases with different codepaths; similarity is only boilerplate." }, { "left": "pull_request:43448", "right": "pull_request:45490", "accept": false, - "reason": "Molmo vs CTSM are unrelated model additions." + "reason": "Molmo and CTSM are distinct additions; overlapping docs/auto files do not indicate the same underlying change." }, { "left": "pull_request:43451", "right": "pull_request:45490", "accept": false, - "reason": "Molmo2 vs CTSM are unrelated model additions." - }, - { - "left": "pull_request:44601", - "right": "pull_request:45626", - "accept": false, - "reason": "PP support infrastructure vs PP-FormulaNet model support are different code paths and changes." - }, - { - "left": "pull_request:45490", - "right": "pull_request:45626", - "accept": false, - "reason": "CTSM and PP-FormulaNet are different model integrations; shared auto/docs edits are not enough." + "reason": "Molmo2 and CTSM add different models and cannot plausibly be merged as one change." }, { "left": "pull_request:45490", "right": "pull_request:45630", "accept": false, - "reason": "CTSM vs Kimi2-6 are separate model additions." - }, - { - "left": "pull_request:45626", - "right": "pull_request:45630", - "accept": false, - "reason": "PP-FormulaNet and Kimi2-6 are different model integrations, not one mergeable PR." + "reason": "CTSM and Kimi2-6 are unrelated model additions; shared transformer scaffolding is expected boilerplate." } ] }, @@ -16226,7 +16172,7 @@ { "label": "pull_request", "component_index": 8, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16245,7 +16191,7 @@ "pull_request:43291|pull_request:44815", "pull_request:43665|pull_request:44815" ], - "prepared_review_unit_hash": "41e5395d41b3a49f378b19fe4921ac86983a40eedca17aec64c9d01154d205b9", + "prepared_review_unit_hash": "569d063804485ce8219b8fb25bf5d9fdb14c3efa5548a4467dfbc41349c125fc", "component_budget": { "node_count": 3, "item_count": 3, @@ -16267,30 +16213,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are three unrelated pull requests: one Whisper test fix, one apparent test-only/prank edit in CLIP/ViT, and one substantive FP8 dequantization fix. The only plausible canonical PR is the FP8 dequant fix because it contains the real code-path change; the others are not duplicates of it or each other.", - "confidence": 0.93, + "summary": "These three PRs are not duplicates of one another: two are test-only edits with placeholder assertions, while the third is a substantive FP8/dequantization fix in a different code path. The cluster looks like low-quality soft similarity rather than one underlying change.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44815 is the only substantive implementation fix, centered on FP8 dequant/padding and model loading behavior across core code paths. The other PRs are test-only or test-focused and do not appear to address the same underlying change.", + "canonical_pr_reason": "PR 44815 is the best representative because it contains the only clearly substantive implementation fix (model loading / FP8 dequant padding) and touches real runtime code, unlike the other two PRs which are mostly test placeholders.", "best_issue_reason": null, - "best_pr_reason": "PR 44815 is the strongest standalone candidate because it fixes a concrete runtime/quantization path and has the broadest real impact. PR 43291 is narrow to Whisper tests, and PR 43665 is not a credible code fix.", + "best_pr_reason": "PR 44815 is the strongest single PR in the set: it addresses a concrete technical problem with code changes in core loading and finegrained FP8 integration, with supporting tests. The other PRs do not appear to fix the same bug/change.", "soft_edge_verdicts": [ { "left": "pull_request:43291", "right": "pull_request:43665", "accept": false, - "reason": "Both touch tests, but they target different models and different problems. Whisper tokenizer/model test fixes are not the same underlying bug as CLIP/ViT test edits." + "reason": "Different models and different edits: Whisper tokenization/runtime behavior versus CLIP/ViT test stubs. They do not describe the same bug or change." }, { "left": "pull_request:43291", "right": "pull_request:44815", "accept": false, - "reason": "Whisper test changes are unrelated to FP8 dequantization/core model loading behavior. No shared code path or fix target." + "reason": "Whisper decode/timestamp handling is unrelated to FP8 dequantization and model-loading padding fixes; no shared concrete code path." }, { "left": "pull_request:43665", "right": "pull_request:44815", "accept": false, - "reason": "Despite superficial similarity, these are not the same change: one is test-only edits in CLIP/ViT, the other fixes FP8 dequant/padding and loading logic in production code." + "reason": "One is test placeholder churn for CLIP/ViT, the other is a real FP8 dequant/loading fix. They are not mergeable as one fix and do not target the same issue." } ] }, @@ -16304,7 +16250,7 @@ { "label": "pull_request", "component_index": 9, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16323,12 +16269,12 @@ "pull_request:43488|pull_request:45476", "pull_request:43532|pull_request:45476" ], - "prepared_review_unit_hash": "3b902df82d97b280d7a5fe6b88ef2c6484cb25e3d812a03a8aaaba1fc1332302", + "prepared_review_unit_hash": "a5f36c85c1d79de8048a3aad1a6bc6df55d756bfcb36a676874dd9f316ae7458", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 7467, + "serialized_chars": 7468, "estimated_input_tokens": 1867, "estimated_eval_tokens": 3990 }, @@ -16336,7 +16282,7 @@ "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 7467, + "serialized_chars": 7468, "estimated_input_tokens": 1867, "estimated_eval_tokens": 3990 }, @@ -16345,30 +16291,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All three pull requests are unrelated test/bot-infra changes and should not be clustered as duplicates. Shared filenames (notably a vit test) appear incidental and do not indicate the same underlying fix.", - "confidence": 0.98, + "summary": "These three PRs are unrelated synthetic/don't-merge changes that only overlap on a shared test filename. They do not fix the same bug or implement the same change, so none should be clustered as duplicates.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "No single canonical PR: the three PRs target different areas and purposes (setup/dependency check, workflow/comment CI changes, and a CI-caller/tokenizer edit), so there is no common underlying change to represent.", + "canonical_pr_reason": "No canonical PR: the three PRs target different areas (packaging/versioning, comment-driven CI workflow, and a PR CI caller plus unrelated edits).", "best_issue_reason": null, - "best_pr_reason": "None is a strong representative duplicate candidate. If forced, the most substantive is 43532 because it changes CI/workflow logic, but it is still not a duplicate of the others.", + "best_pr_reason": "PR 43532 is the most coherent standalone change and the closest to a real repository fix, but it is still not a duplicate of the others.", "soft_edge_verdicts": [ { "left": "pull_request:43488", "right": "pull_request:43532", "accept": false, - "reason": "Different purposes and code paths: one is a setup/dependency/repo-bot check, the other rewires self-comment CI/workflows. Shared vit test filename looks incidental." + "reason": "Different changes: 43488 is setup/dependency/version-bump oriented, while 43532 rewrites CI comment handling and related workflow logic. Shared vit test filename is incidental." }, { "left": "pull_request:43488", "right": "pull_request:45476", "accept": false, - "reason": "Unrelated changes: setup/dependency versioning versus adding a PR CI caller plus CLIP/tokenizer edits. No same underlying bug or mergeable fix." + "reason": "No same underlying bug/change: 43488 updates packaging metadata and deps, whereas 45476 adds a new PR CI workflow and unrelated code/test edits." }, { "left": "pull_request:43532", "right": "pull_request:45476", "accept": false, - "reason": "Both touch CI-related files, but they address different workflows and unrelated dummy edits; not the same concrete problem and not something to merge into one PR." + "reason": "Both touch CI/testing files, but they address different code paths: comment-triggered self-CI vs a new PR CI caller workflow. Not mergeable as one PR." } ] }, @@ -16382,7 +16328,7 @@ { "label": "pull_request", "component_index": 10, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16398,12 +16344,12 @@ "soft_pairs": [ "pull_request:43612|pull_request:43613" ], - "prepared_review_unit_hash": "82421673d006c06fd573c9be9173cb64a83211e85e987f97614b050bd94abaae", + "prepared_review_unit_hash": "83ab1c3708109244e9f2657acfdfe05127621e27e7a2b4f88ad2b555f9d07743", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6082, + "serialized_chars": 6081, "estimated_input_tokens": 1521, "estimated_eval_tokens": 3298 }, @@ -16411,7 +16357,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6082, + "serialized_chars": 6081, "estimated_input_tokens": 1521, "estimated_eval_tokens": 3298 }, @@ -16420,18 +16366,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related new-feature PRs for pipeline additions, but they target different tasks and different model stacks: promptable concept segmentation vs promptable visual segmentation. The overlap is mostly shared docs/registry plumbing, not the same underlying change.", - "confidence": 0.95, + "summary": "These are two separate feature PRs for different pipelines: promptable concept segmentation vs promptable visual segmentation. They share scaffolding changes in docs/registry utilities, but they target different APIs, filenames, and model/test sets, so they are not duplicates.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 43613 is the broader cluster representative: it adds the promptable visual segmentation pipeline with wider model/test integration, while 43612 is a separate concept-segmentation feature.", + "canonical_pr_reason": "PR 43613 is the broader, more integrated feature addition among the two separate pipeline PRs, but it is not a duplicate of 43612.", "best_issue_reason": null, - "best_pr_reason": "PR 43613 is the strongest representative of the cluster because it has the larger surface area and more complete integration, but it should not be treated as a duplicate of 43612.", + "best_pr_reason": "If a single representative is needed, 43613 is the better one to surface because it covers a fuller pipeline integration and more model/test support; however, it remains a distinct change from 43612.", "soft_edge_verdicts": [ { "left": "pull_request:43612", "right": "pull_request:43613", "accept": false, - "reason": "Different feature requests and code paths: concept segmentation vs visual segmentation. Shared docs, __init__ exports, and auto-mapping changes are expected for parallel pipeline additions, but they do not indicate the same underlying bug/change or a single mergeable PR." + "reason": "Different concrete features and code paths: 43612 adds Promptable Concept Segmentation, while 43613 adds Promptable Visual Segmentation with different pipeline modules, docs, model files, and tests. Shared registry/docs edits are insufficient to treat them as the same change." } ] }, @@ -16445,7 +16391,7 @@ { "label": "pull_request", "component_index": 11, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16461,7 +16407,7 @@ "soft_pairs": [ "pull_request:43651|pull_request:43663" ], - "prepared_review_unit_hash": "dcac9d568842f999e84830b91153bcdade4a2fe1f5545263f1ce765d56cd3ba9", + "prepared_review_unit_hash": "360e4f4563064f8ad354ecb55675fc7b3756e32b9a9dbff51c607fd893ea267b", "component_budget": { "node_count": 2, "item_count": 2, @@ -16483,18 +16429,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two separate Trainer refactors in the same file, but they address different code paths: gradient-accumulation loss scaling vs. signature-column filtering for model inputs. They should not be clustered as duplicates.", - "confidence": 0.98, + "summary": "Two separate Trainer extensibility PRs in the same file: one changes gradient-accumulation loss scaling behavior, the other factors out signature-column inspection for column filtering. They are related to `trainer.py` but address different code paths and bugs.", + "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "Neither PR is a duplicate of the other. PR #43651 is about loss scaling during training steps for custom trainers; PR #43663 is about signature-column extraction for filtering inputs, especially with PEFT models.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "PR #43651 is the best representative for the loss-scaling change because it introduces the new `_loss_is_scaled_for_ga` hook in the training step path. PR #43663 is the best representative for the column-filtering refactor because it adds `_get_signature_columns` as a separate override point. Each is the clearest canonical item for its own change, but they are not the same underlying PR.", "soft_edge_verdicts": [ { "left": "pull_request:43651", "right": "pull_request:43663", "accept": false, - "reason": "Different functionality and different failure modes: 43651 changes loss scaling during training_step, while 43663 adds an overridable helper for input signature column filtering. They share a file but not the same underlying bug or mergeable change." + "reason": "Both touch `trainer.py`, but one changes gradient-accumulation loss normalization and the other changes input signature column filtering. Different behaviors, different call sites, and not the same concrete bug or mergeable change." } ] }, @@ -16508,7 +16454,7 @@ { "label": "pull_request", "component_index": 12, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16526,7 +16472,7 @@ "pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842" ], - "prepared_review_unit_hash": "a146a4b714e9e3aa09c479d4fdc23daab7efd3a04617b45738b4b57e2e465f9e", + "prepared_review_unit_hash": "bb74750cd955e06f1edf487578f6147a88c95ac2d6d7f100ae43de664b0c3b5c", "component_budget": { "node_count": 3, "item_count": 3, @@ -16548,30 +16494,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two PRs are near-duplicates around the same `TypeAdapter` annotation/runtime-import problem in `src/transformers/cli/serve.py`; the third is a broader TYPE_CHECKING/future-annotations fix that looks related but not the same concrete bug. The cleanest representative is the smallest, most targeted PR.", - "confidence": 0.69, + "summary": "All three PRs are variations of the same CLI type-annotation NameError fix in `src/transformers/cli/serve.py`, with 43842 being the narrowest and clearest patch. 43836 is broader but still targets the same runtime annotation-evaluation problem.", + "confidence": 0.82, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 43842 is the most focused fix for the concrete runtime `TypeAdapter` NameError: it imports `TypeAdapter` only under `TYPE_CHECKING` and string-literalizes the annotation, directly matching the stated bug and avoiding broader unrelated annotation churn.", + "canonical_pr_reason": "`pull_request:43842` is the most focused, clearly scoped fix for the `TypeAdapter` NameError: it imports `TypeAdapter` only under `TYPE_CHECKING` and quotes the annotation, directly matching the reported runtime-import failure.", "best_issue_reason": null, - "best_pr_reason": "PR 43842 is the best representative because it is the most surgical change for the reported failure mode, has an explicit matching issue target, and directly addresses the `TypeAdapter` NameError path without extra refactoring.", + "best_pr_reason": "`pull_request:43842` is the best representative PR because it addresses the concrete failure mode with the smallest change set and the clearest title/body alignment to the bug.", "soft_edge_verdicts": [ { "left": "pull_request:43656", "right": "pull_request:43842", - "accept": false, - "reason": "Same file and same general theme, but 43656 is a broader postponed-annotations cleanup tied to a different issue target. It does not look like the same concrete bug fix as 43842's specific `TypeAdapter`/pydantic import workaround." + "accept": true, + "reason": "Both patch the same `serve.py` annotation-evaluation NameError around `TypeAdapter`/CLI import time. They use different implementations (`from __future__ import annotations` vs quoted annotations), but they are solving the same underlying bug and could plausibly be merged into one fix." }, { "left": "pull_request:43836", "right": "pull_request:43842", "accept": true, - "reason": "Both target the same underlying `TypeAdapter` NameError in `serve.py` and use the same core remedy of deferring annotation evaluation / string-literalizing the type. They are plausibly mergeable into one PR for the same code-path problem." + "reason": "Both target the same `transformers cli serve` type-annotation NameError in the same file. `43836` is broader and more refactor-like, but its quoted-TypeAdapter workaround is the same underlying fix class as `43842`." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is mostly grounded in the packet. The distinction between 43842 as the smallest, most targeted fix and 43836 as the broader annotations cleanup is conservative enough, and the accept/deny pairings are supported by the shared-file/shared-target evidence. I don\u2019t see an overstatement that would require rejection." + "feedback": "The summary is grounded in the packet: all three PRs touch `src/transformers/cli/serve.py` and are clearly about the same CLI `TypeAdapter`/annotation NameError, with `43842` the narrowest fix. The soft-edge verdicts are also conservative enough; they describe the same underlying bug without claiming identical implementations." }, "evaluator_used": true, "retried": false, @@ -16582,7 +16528,7 @@ { "label": "pull_request", "component_index": 13, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16648,7 +16594,7 @@ { "label": "pull_request", "component_index": 14, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 2, "status": "reviewed", @@ -16706,8 +16652,8 @@ "pull_request:44019|pull_request:44071", "pull_request:44068|pull_request:44116", "pull_request:44068|pull_request:44722", - "pull_request:44018|pull_request:44116", "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44116", "pull_request:44018|pull_request:44086", "pull_request:44018|pull_request:44085", "pull_request:44068|pull_request:44086", @@ -16760,298 +16706,322 @@ "pull_request:44044|pull_request:44129", "pull_request:44044|pull_request:44154" ], - "prepared_review_unit_hash": "5d52e5d46919ac290eb4495faa9e05e58a8050fd052750de61be38eb1ad68b3b", + "prepared_review_unit_hash": "0e5808184fb4085c52130d8e1120ad4fe868c91b59dabfafc01381b907f7ad61", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 78, - "serialized_chars": 84900, - "estimated_input_tokens": 21225, - "estimated_eval_tokens": 42706 + "serialized_chars": 84884, + "estimated_input_tokens": 21221, + "estimated_eval_tokens": 42698 }, "budget": { "node_count": 27, "item_count": 27, "soft_pair_count": 72, - "serialized_chars": 75096, - "estimated_input_tokens": 18774, - "estimated_eval_tokens": 37804 + "serialized_chars": 75082, + "estimated_input_tokens": 18771, + "estimated_eval_tokens": 37798 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a broad theme of model-by-model output-tracing refactors, not one duplicate set. The only clear duplicate-style overlaps are within the GPT-J/CodeGen and GPT-Neo branches; most other pairs are different models and should stay separate.", - "confidence": 0.82, + "summary": "This cluster is mostly a set of model-specific output-tracing refactors under the same umbrella issue, but the concrete code paths are usually different. The only clear duplicates are within GPTJ/CodeGen and GPT-Neo; the rest should stay separate.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "Best representative of the strongest duplicate subcluster is PR 44722: it is a later, more complete GPT-J/CodeGen standardized-output-tracing refactor, shares the same concrete code paths as the earlier GPT-J variants, and has the clearest overlap with the accepted duplicate candidates.", + "canonical_pr_reason": "PR 44066 is the best canonical representative: it has an explicit tracking issue, touches the core GPTJ/CodeGen output-tracing refactor, and has a true duplicate-style overlap with 44722.", "best_issue_reason": null, - "best_pr_reason": "PR 44722 is the best single PR to anchor triage for the true duplicates in this cluster because it covers the shared GPT-J/CodeGen refactor most completely and appears to supersede the earlier GPT-J-only iterations.", + "best_pr_reason": "PR 44066 is the strongest global representative for this cluster because it is the most complete concrete implementation of the shared GPTJ/CodeGen output-tracing change and cleanly anchors the duplicate set.", "soft_edge_verdicts": [ { "left": "pull_request:43996", "right": "pull_request:44085", "accept": false, - "reason": "Different model families and code paths (CVT/FNet vs GPT-J); shared tracking issue alone is not enough." + "reason": "Same umbrella refactor theme, but different models and code paths (CVT/FNet vs GPTJ); not one mergeable change." }, { "left": "pull_request:43996", "right": "pull_request:44044", "accept": false, - "reason": "Different models and implementation paths; both are output-tracing refactors but not the same change." + "reason": "Different architectures and files; only the shared issue label matches, not the underlying bug/change." }, { "left": "pull_request:44066", "right": "pull_request:44085", "accept": true, - "reason": "Both are GPT-J output-tracing refactors in the same file/code path; they look like overlapping iterations of the same change." + "reason": "Both are GPTJ output-tracing refactors in the same file; 44085 looks like the GPTJ slice of 44066's broader GPTJ/CodeGen change." }, { "left": "pull_request:44007", "right": "pull_request:44072", "accept": false, - "reason": "RegNet/ResNet-family refactor versus EfficientNet; same initiative, different concrete code changes." + "reason": "Both are standardized output-tracing refactors, but for different vision models and separate code paths." }, { "left": "pull_request:44072", "right": "pull_request:44722", "accept": false, - "reason": "EfficientNet output tracing and GPT-J/CodeGen output tracing are unrelated model implementations." + "reason": "EfficientNet output tracing and GPTJ/CodeGen output tracing are unrelated model-specific changes." }, { "left": "pull_request:44066", "right": "pull_request:44072", "accept": false, - "reason": "GPT-J/CodeGen refactor versus EfficientNet refactor; different code paths." + "reason": "GPTJ/CodeGen vs EfficientNet; no shared concrete code-path problem." }, { "left": "pull_request:44013", "right": "pull_request:44044", "accept": false, - "reason": "MobileNetV2 and DeBERTa are unrelated model changes." + "reason": "MobileNetV2 and DeBERTa are unrelated implementations despite similar refactor wording." }, { "left": "pull_request:44066", "right": "pull_request:44086", "accept": false, - "reason": "GPT-J/CodeGen versus MGP-STR; same theme but different model-specific refactors." + "reason": "GPTJ/CodeGen and MGP-STR are different model families and different forward paths." }, { "left": "pull_request:44066", "right": "pull_request:44071", "accept": false, - "reason": "GPT-J/CodeGen and MPT are different concrete implementations." + "reason": "MPT output tracing is a separate model-specific refactor, not the same change as GPTJ/CodeGen." }, { "left": "pull_request:44018", "right": "pull_request:44068", "accept": true, - "reason": "Both are GPT-Neo output-tracing refactors in the same file/tests and appear to be the same underlying change." + "reason": "Both PRs target GPT-Neo's output-tracing refactor in the same file and appear to overlap on the same concrete implementation." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPTJ/CodeGen vs GPT-Neo; same umbrella pattern, different model-specific code paths." }, { "left": "pull_request:44019", "right": "pull_request:44722", "accept": false, - "reason": "ResNet refactor versus GPT-J/CodeGen refactor; no shared code path." + "reason": "ResNet output tracing and GPTJ/CodeGen output tracing are unrelated changes." }, { "left": "pull_request:44086", "right": "pull_request:44722", "accept": false, - "reason": "MGP-STR and GPT-J/CodeGen are unrelated models." + "reason": "MGP-STR and GPTJ/CodeGen touch different models and different forward logic." }, { "left": "pull_request:44019", "right": "pull_request:44085", "accept": false, - "reason": "ResNet refactor versus GPT-J refactor; different subsystems." + "reason": "ResNet and GPTJ are separate model implementations; no shared underlying bug." }, { "left": "pull_request:44018", "right": "pull_request:44019", "accept": false, - "reason": "GPT-Neo and ResNet are different model code paths." + "reason": "GPT-Neo and ResNet are unrelated code paths; only the refactor pattern is similar." }, { "left": "pull_request:44019", "right": "pull_request:44071", "accept": false, - "reason": "ResNet and MPT output tracing are unrelated concrete changes." + "reason": "ResNet and MPT are different model families with different output handling." }, { "left": "pull_request:44068", "right": "pull_request:44116", "accept": false, - "reason": "GPT-Neo versus Flaubert/XLM; different model families." + "reason": "GPT-Neo and Flaubert/XLM are unrelated model-specific refactors." }, { - "left": "pull_request:44018", - "right": "pull_request:44116", + "left": "pull_request:44068", + "right": "pull_request:44722", "accept": false, - "reason": "GPT-Neo and Flaubert are unrelated code paths." + "reason": "GPT-Neo output tracing is separate from GPTJ/CodeGen; similar decorator migration only." }, { "left": "pull_request:44071", "right": "pull_request:44722", "accept": false, - "reason": "MPT refactor versus GPT-J/CodeGen refactor; same architectural pattern, different change." + "reason": "MPT and GPTJ/CodeGen are different code paths and should not be merged as duplicates." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert/XLM are unrelated model implementations." }, { "left": "pull_request:44018", "right": "pull_request:44086", "accept": false, - "reason": "GPT-Neo and MGP-STR are different model implementations." + "reason": "GPT-Neo and MGP-STR are different model families; no same underlying bug." }, { "left": "pull_request:44018", "right": "pull_request:44085", "accept": false, - "reason": "GPT-Neo and GPT-J are different code paths, even if both touch output tracing." + "reason": "GPT-Neo and GPTJ are different models, so this is not the same change." }, { "left": "pull_request:44068", - "right": "pull_request:44086", + "right": "pull_request:44071", "accept": false, - "reason": "GPT-Neo versus MGP-STR; different model-specific refactors." + "reason": "GPT-Neo and MPT are separate model-specific output tracing refactors." }, { "left": "pull_request:44059", "right": "pull_request:44086", "accept": false, - "reason": "GPT-2 and MGP-STR are unrelated model changes." + "reason": "GPT2 and MGP-STR are unrelated code paths despite similar refactor language." }, { "left": "pull_request:44085", "right": "pull_request:44086", "accept": false, - "reason": "GPT-J versus MGP-STR; not the same underlying bug/change." + "reason": "GPTJ and MGP-STR are different model families; not a duplicate change." }, { "left": "pull_request:44098", "right": "pull_request:44101", "accept": false, - "reason": "ViLT and Flaubert/XLM are unrelated implementations." + "reason": "ViLT and Flaubert/XLM are different architectures and do not share the same concrete fix." }, { - "left": "pull_request:44059", - "right": "pull_request:44722", + "left": "pull_request:43997", + "right": "pull_request:44017", "accept": false, - "reason": "GPT-2 refactor and GPT-J/CodeGen refactor are different concrete code paths." + "reason": "RegNet and Segformer are different model-specific refactors." }, { - "left": "pull_request:44068", - "right": "pull_request:44071", + "left": "pull_request:43997", + "right": "pull_request:44056", "accept": false, - "reason": "GPT-Neo and MPT are different model-specific changes." + "reason": "RegNet and MPNet are unrelated implementations; only the umbrella issue is shared." }, { - "left": "pull_request:44017", - "right": "pull_request:44722", + "left": "pull_request:44071", + "right": "pull_request:44161", "accept": false, - "reason": "SegFormer and GPT-J/CodeGen are unrelated models." + "reason": "MPT and LongT5 are different model families with separate output-tracing code." }, { - "left": "pull_request:44017", + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet and GPT-Neo are unrelated model code paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet and Flaubert/XLM are separate model-specific changes." + }, + { + "left": "pull_request:44056", "right": "pull_request:44161", "accept": false, - "reason": "SegFormer and LongT5 are different concrete refactors." + "reason": "MPNet and LongT5 do not share the same concrete bug or fix." }, { - "left": "pull_request:44017", - "right": "pull_request:44073", + "left": "pull_request:43997", + "right": "pull_request:44071", "accept": false, - "reason": "SegFormer and VisualBert are separate model code paths." + "reason": "RegNet and MPT are different model implementations." }, { - "left": "pull_request:44068", + "left": "pull_request:44010", "right": "pull_request:44071", "accept": false, - "reason": "GPT-Neo and MPT are unrelated changes." + "reason": "SqueezeBert and MPT are unrelated code paths." }, { - "left": "pull_request:44071", - "right": "pull_request:44076", + "left": "pull_request:44010", + "right": "pull_request:44073", "accept": false, - "reason": "MPT and ImageGPT are different model implementations." + "reason": "SqueezeBert and VisualBert are different model-specific output handling changes." }, { - "left": "pull_request:44074", - "right": "44101", + "left": "pull_request:44017", + "right": "pull_request:44722", "accept": false, - "reason": "TextNet and Flaubert/XLM are unrelated code paths." + "reason": "Segformer and GPTJ/CodeGen are unrelated changes." }, { - "left": "pull_request:44056", + "left": "pull_request:44017", "right": "pull_request:44161", "accept": false, - "reason": "MPNet and LongT5 are different model-specific refactors." + "reason": "Segformer and LongT5 do not share the same underlying code-path problem." }, { - "left": "pull_request:43997", - "right": "pull_request:44018", + "left": "pull_request:44017", + "right": "pull_request:44073", "accept": false, - "reason": "RegNet and GPT-Neo are unrelated concrete changes." + "reason": "Segformer and VisualBert are separate model refactors." }, { - "left": "pull_request:44074", - "right": "pull_request:44161", + "left": "pull_request:44073", + "right": "pull_request:44101", "accept": false, - "reason": "TextNet and LongT5 are different models; same issue theme is not enough." + "reason": "VisualBert and Flaubert/XLM are different model families and fixes." }, { - "left": "pull_request:43997", - "right": "pull_request:44161", + "left": "pull_request:44056", + "right": "pull_request:44101", "accept": false, - "reason": "RegNet versus LongT5; different code paths." + "reason": "MPNet and Flaubert/XLM are unrelated code paths." }, { "left": "pull_request:44074", - "right": "pull_request:44071", + "right": "pull_request:44161", "accept": false, - "reason": "TextNet and MPT are unrelated implementations." + "reason": "TextNet and LongT5 are different model-specific output-tracing changes." }, { - "left": "pull_request:44010", - "right": "pull_request:44071", + "left": "pull_request:44018", + "right": "pull_request:44074", "accept": false, - "reason": "SqueezeBert and MPT are different models." + "reason": "GPT-Neo and TextNet are separate model implementations." }, { - "left": "pull_request:44010", - "right": "pull_request:44073", + "left": "pull_request:44071", + "right": "pull_request:44129", "accept": false, - "reason": "SqueezeBert and VisualBert are not the same underlying change." + "reason": "MPT and SpeechT5 are different model families; shared issue target is just umbrella grouping." }, { - "left": "pull_request:44056", - "right": "pull_request:44101", + "left": "pull_request:43996", + "right": "pull_request:44161", "accept": false, - "reason": "MPNet and Flaubert/XLM are different concrete code paths." + "reason": "CVT/FNet and LongT5 are unrelated model-specific refactors." }, { - "left": "pull_request:43997", + "left": "pull_request:43996", "right": "pull_request:44017", "accept": false, - "reason": "RegNet and SegFormer are unrelated model refactors." + "reason": "CVT/FNet and Segformer are separate code paths." }, { "left": "pull_request:44010", "right": "pull_request:44044", "accept": false, - "reason": "SqueezeBert and DeBERTa are separate model implementations." + "reason": "SqueezeBert and DeBERTa are unrelated implementations." }, { "left": "pull_request:44044", "right": "pull_request:44074", "accept": false, - "reason": "DeBERTa and TextNet are unrelated concrete changes." + "reason": "DeBERTa and TextNet are different model-specific refactors." } ] }, "evaluator_result": { "accept": true, - "feedback": "Grounded overall. The summary stays conservative about the broad model-family theme and only elevates the GPT-J/CodeGen and GPT-Neo overlaps as duplicate-like subclusters. The rejected pairs are well-justified by clearly different model/code paths, and the chosen representative PR is a reasonable anchor for the GPT-J/CodeGen branch." + "feedback": "Grounded overall and appropriately conservative. The canonical/representative choices are supported by the packet, and the only accepted soft edges are the two same-model overlaps with shared files/issue context (GPTJ/CodeGen and GPT-Neo). The cross-model rejections are cautious and consistent with the evidence." }, "evaluator_used": true, "retried": false, @@ -17062,7 +17032,7 @@ { "label": "pull_request", "component_index": 14, - "component_count": 24, + "component_count": 25, "review_unit_index": 2, "review_unit_count": 2, "status": "reviewed", @@ -17089,70 +17059,70 @@ "pull_request:43997|pull_request:44010", "pull_request:44010|pull_request:44129" ], - "prepared_review_unit_hash": "d672bba9d2ca13e147d7d8f3ad87c9b284693e19bad802b74d281616d603c376", + "prepared_review_unit_hash": "1a8ec5328f39833263149c348bfd3c2e8bf9f749560d265e188fd9a0fc235bc6", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 78, - "serialized_chars": 84900, - "estimated_input_tokens": 21225, - "estimated_eval_tokens": 42706 + "serialized_chars": 84884, + "estimated_input_tokens": 21221, + "estimated_eval_tokens": 42698 }, "budget": { "node_count": 8, "item_count": 8, "soft_pair_count": 6, - "serialized_chars": 17125, - "estimated_input_tokens": 4282, - "estimated_eval_tokens": 8820 + "serialized_chars": 17123, + "estimated_input_tokens": 4281, + "estimated_eval_tokens": 8818 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These PRs are all model-specific migrations to standardized output tracing/capture, but they target different architectures and code paths. They look thematically similar rather than true duplicates, so none of the soft edges should be merged.", - "confidence": 0.9, + "summary": "These PRs are all part of the same broad migration pattern to standardized output tracing, but they touch different model implementations and different code paths, so they should not be merged as duplicates.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44129 is the clearest representative: it has an explicit issue target, a concrete SpeechT5-specific refactor, and a more complete change set than the smaller model migrations.", + "canonical_pr_reason": "PR #44129 is the strongest representative of the set because it has the clearest scoped refactor, an explicit issue target, and a more concrete implementation change in SpeechT5.", "best_issue_reason": null, - "best_pr_reason": "PR 44129 is the best single representative for the cluster because it is the most explicit and self-contained standardized-output refactor among the set.", + "best_pr_reason": "PR #44129 is the best standalone PR to track this work because it is the most explicitly tied to a specific task and appears more complete than the other similarly named refactors.", "soft_edge_verdicts": [ { "left": "pull_request:44024", "right": "pull_request:44076", "accept": false, - "reason": "Both are output-capture refactors, but they affect different models (FocalNet vs ImageGPT) and different layer classes; they are not the same underlying change." + "reason": "Both are output-tracing refactors, but one is FocalNet and the other is ImageGPT; they modify different model internals and are not the same underlying bug/change." }, { "left": "pull_request:44073", "right": "pull_request:44074", "accept": false, - "reason": "VisualBert and TextNet are unrelated code paths; the similarity is only the shared output-tracing refactor pattern, not a duplicate fix." + "reason": "VisualBert vs TextNet are different model code paths; these are related migration style changes, not duplicate fixes for one concrete issue." }, { "left": "pull_request:44056", "right": "pull_request:44073", "accept": false, - "reason": "MPNet and VisualBert are separate model implementations with distinct tracing hooks, so these are parallel refactors rather than one mergeable PR." + "reason": "MPNet and VisualBert each implement separate output-capture plumbing; same theme, but not the same change or mergeable duplicate." }, { "left": "pull_request:44056", "right": "pull_request:44074", "accept": false, - "reason": "These touch different models and different output-capture wiring; they do not appear to fix the same concrete bug or change." + "reason": "MPNet and TextNet touch unrelated model implementations, so this is not the same underlying bug or code-path fix." }, { "left": "pull_request:43997", "right": "pull_request:44010", "accept": false, - "reason": "RegNet and SqueezeBert are unrelated architectures; both PRs migrate output handling, but they are not the same underlying code-path problem." + "reason": "RegNet and SqueezeBert are distinct architectures with separate forward/output handling; these are not duplicates." }, { "left": "pull_request:44010", "right": "pull_request:44129", "accept": false, - "reason": "SqueezeBert and SpeechT5 have different model-specific output plumbing, and 44129 is tied to an explicit SpeechT5 issue target, so this is not a duplicate merge candidate." + "reason": "SqueezeBert and SpeechT5 differ in model family and behavior; both refactor output tracing, but they do not fix the same concrete problem." } ] }, @@ -17166,7 +17136,7 @@ { "label": "pull_request", "component_index": 15, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17235,286 +17205,292 @@ "pull_request:44028|pull_request:44030", "pull_request:44029|pull_request:44030" ], - "prepared_review_unit_hash": "7a0958c1a74035d0ee051ed4123f34208b5739d93cbd1581ac83c149e2e6b62e", + "prepared_review_unit_hash": "44d2b9eb696568a2592cd3e762d9a47a4aac3e40ab679849cc3d945b8560ca69", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 43, - "serialized_chars": 41319, - "estimated_input_tokens": 10330, - "estimated_eval_tokens": 20916 + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 43, - "serialized_chars": 41319, - "estimated_input_tokens": 10330, - "estimated_eval_tokens": 20916 + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs share a broad 'output tracing/refactor' theme, but they are separate model-specific changes in different code paths. They are not true duplicates, so no soft edge should be merged. 44025 is the best representative PR because it is a concrete, multi-file Depth Anything refactor and sits near the center of the similarity cluster.", - "confidence": 0.91, + "summary": "This cluster is a set of similar-but-separate model-specific output-tracing refactors. The edits follow the same general pattern, but they target different architectures and code paths, so they should not be merged as duplicates.", + "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "No issue exists in this cluster; as a PR representative, 44025 is the most central and concrete example of the shared refactor pattern, touching two related Depth Anything files.", + "canonical_pr_reason": "PR 44003 is the broadest representative: it spans two related model files and introduces the shared tracing helpers, making it the most central exemplar of the cluster theme.", "best_issue_reason": null, - "best_pr_reason": "44025 is the best single anchor for the cluster because it is substantial, spans two closely related files, and exemplifies the common output-tracing refactor without conflating unrelated model implementations.", + "best_pr_reason": "PR 44003 is the best single representative of this cluster because it is a substantive, cross-file implementation of the common output-tracing refactor pattern.", "soft_edge_verdicts": [ { "left": "pull_request:43998", "right": "pull_request:44002", "accept": false, - "reason": "Same refactor theme, but different model families and code paths (timm_backbone vs upernet)." + "reason": "Same refactor theme, but timm_backbone and upernet are different model code paths." }, { "left": "pull_request:43998", "right": "pull_request:44000", "accept": false, - "reason": "Both are output-tracing refactors, but they target unrelated architectures and implementations." + "reason": "Both are output-tracing refactors, but they affect unrelated model implementations." }, { "left": "pull_request:43998", "right": "pull_request:44001", "accept": false, - "reason": "Different model code paths; shared wording is only a generic refactor pattern." + "reason": "Different architectures and forward paths; not the same underlying bug/change." }, { "left": "pull_request:43998", "right": "pull_request:44003", "accept": false, - "reason": "timm_backbone and mamba/falcon_mamba are unrelated fixes; not the same underlying change." + "reason": "Mamba/falcon_mamba tracing changes are unrelated to timm_backbone's output handling." }, { "left": "pull_request:43998", "right": "pull_request:44029", "accept": false, - "reason": "Unrelated architectures; similarity is only at the high-level output-tracing refactor theme." + "reason": "Different model families and separate tracing logic; similarity is only at the refactor pattern level." }, { "left": "pull_request:43999", "right": "pull_request:44001", "accept": false, - "reason": "MobileNetV1 and UnivNet are different model paths; not a duplicate change." + "reason": "MobileNetV1 and UnivNet are distinct models with separate output semantics." }, { "left": "pull_request:43999", "right": "pull_request:44002", "accept": false, - "reason": "Different subsystems and code paths; only the refactor theme matches." + "reason": "Shared refactor style only; no shared concrete code path." }, { "left": "pull_request:43999", "right": "pull_request:44025", "accept": false, - "reason": "MobileNetV1 refactor is unrelated to Depth Anything; not mergeable as one PR." + "reason": "Different architectures and model files; not the same bug or change." }, { "left": "pull_request:43999", "right": "pull_request:44026", "accept": false, - "reason": "Separate model implementations with no shared bug or patch target." + "reason": "MobileNetV1 output tracing is unrelated to vision_encoder_decoder." }, { "left": "pull_request:43999", "right": "pull_request:44027", "accept": false, - "reason": "MobileNetV1 vs speech_encoder_decoder are distinct refactors, not duplicates." + "reason": "Separate seq2seq wrapper and image model refactors; not duplicates." }, { "left": "pull_request:44000", "right": "pull_request:44001", "accept": false, - "reason": "Different models (vision-text dual encoder vs UnivNet); same generic pattern only." + "reason": "Vision-text dual encoder and UnivNet are unrelated implementations." }, { "left": "pull_request:44000", "right": "pull_request:44002", "accept": false, - "reason": "Different concrete code paths and model families; not the same fix." + "reason": "Different model code paths; same broad theme only." }, { "left": "pull_request:44000", "right": "pull_request:44025", "accept": false, - "reason": "CLIP-style dual encoder refactor is unrelated to Depth Anything refactor." + "reason": "These are separate model-specific tracing changes, not one fix." }, { "left": "pull_request:44000", "right": "pull_request:44026", "accept": false, - "reason": "Separate model implementations; no shared underlying bug/change." + "reason": "Distinct architectures and file-level changes; not mergeable as one PR." }, { "left": "pull_request:44001", "right": "pull_request:44002", "accept": false, - "reason": "UnivNet and UperNet are different models; the overlap is only topical." + "reason": "UnivNet and UperNet are unrelated models with different forward behavior." }, { "left": "pull_request:44001", "right": "pull_request:44025", "accept": false, - "reason": "Different architectures and code paths; not the same change." + "reason": "No shared concrete code path; only a common refactor motif." }, { "left": "pull_request:44001", "right": "pull_request:44026", "accept": false, - "reason": "UnivNet vs vision_encoder_decoder are unrelated refactors." + "reason": "Seq2seq wrapper vs audio model; separate changes." }, { "left": "pull_request:44002", "right": "pull_request:44029", "accept": false, - "reason": "UperNet and RWKV are distinct code paths; only the refactor framing matches." + "reason": "UperNet and RWKV are unrelated model implementations." }, { "left": "pull_request:44002", "right": "pull_request:44025", "accept": false, - "reason": "Different model families; no evidence of one shared bug or patch." + "reason": "Different models and distinct output-tracing code." }, { "left": "pull_request:44001", "right": "pull_request:44003", "accept": false, - "reason": "UnivNet and mamba/falcon_mamba are unrelated model-specific refactors." + "reason": "UnivNet audio tracing is unrelated to Mamba/FalconMamba output tracing." }, { "left": "pull_request:44002", "right": "pull_request:44003", "accept": false, - "reason": "UperNet and mamba/falcon_mamba do not fix the same underlying issue." + "reason": "UperNet and Mamba touch unrelated forward paths." }, { "left": "pull_request:44000", "right": "pull_request:44003", "accept": false, - "reason": "Vision-text dual encoder and mamba/falcon_mamba are different implementations and changes." + "reason": "Vision-text dual encoder and Mamba are separate subsystems." }, { "left": "pull_request:44003", "right": "pull_request:44028", "accept": false, - "reason": "Mamba/FalconMamba vs SuperPoint are unrelated code paths; same theme only." + "reason": "Mamba tracing changes are unrelated to SuperPoint's output handling." }, { "left": "pull_request:44003", "right": "pull_request:44004", "accept": false, - "reason": "Different model families and distinct forward paths; not a single PR-worthy duplicate." + "reason": "CodeGen cache/output refactor is a different concrete code-path change." }, { "left": "pull_request:43998", "right": "pull_request:44004", "accept": false, - "reason": "timm_backbone and codegen are separate model implementations; only loosely similar." + "reason": "Timm backbone and CodeGen are unrelated models." }, { "left": "pull_request:44001", "right": "pull_request:44004", "accept": false, - "reason": "UnivNet and CodeGen are unrelated fixes despite similar refactor wording." + "reason": "UnivNet and CodeGen do not share the same underlying fix." }, { "left": "pull_request:44002", "right": "pull_request:44004", "accept": false, - "reason": "UperNet and CodeGen do not share the same concrete bug or code change." + "reason": "UperNet segmentation and CodeGen generation paths are different." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44004", + "accept": false, + "reason": "Different model families and separate output semantics." }, { "left": "pull_request:44025", "right": "pull_request:44026", "accept": false, - "reason": "Depth Anything and VisionEncoderDecoder are different model paths; not one duplicate change." + "reason": "DepthAnything and VisionEncoderDecoder are distinct model implementations." }, { "left": "pull_request:44025", "right": "pull_request:44027", "accept": false, - "reason": "Depth Anything refactor vs speech_encoder_decoder refactor are unrelated." + "reason": "DepthAnything and SpeechEncoderDecoder are not the same code path." }, { "left": "pull_request:44025", "right": "pull_request:44028", "accept": false, - "reason": "Separate model implementations; shared output-tracing theme is too broad." + "reason": "DepthAnything and SuperPoint are unrelated changes." }, { "left": "pull_request:44025", "right": "pull_request:44029", "accept": false, - "reason": "Depth Anything and RWKV are unrelated code paths and cannot be merged as one fix." + "reason": "Different models; shared refactor pattern only." }, { "left": "pull_request:44025", "right": "pull_request:44030", "accept": false, - "reason": "Different models (Depth Anything vs DPR); no shared underlying bug/change." + "reason": "DepthAnything and DPR are separate architectures with separate forward logic." }, { "left": "pull_request:44026", "right": "pull_request:44029", "accept": false, - "reason": "VisionEncoderDecoder and RWKV are distinct implementations; not duplicates." + "reason": "VisionEncoderDecoder and RWKV are unrelated model paths." }, { "left": "pull_request:44026", "right": "pull_request:44027", "accept": false, - "reason": "VisionEncoderDecoder vs speech_encoder_decoder are separate refactors." + "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are different wrappers with different internals." }, { "left": "pull_request:44026", "right": "pull_request:44028", "accept": false, - "reason": "Different model families and code paths; same broad pattern only." + "reason": "Different models and distinct output-tracing implementations." }, { "left": "pull_request:44026", "right": "pull_request:44030", "accept": false, - "reason": "VisionEncoderDecoder and DPR do not fix the same concrete issue." + "reason": "No shared concrete bug/change; only similar refactor style." }, { "left": "pull_request:44027", "right": "pull_request:44028", "accept": false, - "reason": "Speech encoder-decoder and SuperPoint are unrelated model-specific changes." + "reason": "SpeechEncoderDecoder and SuperPoint are unrelated." }, { "left": "pull_request:44027", "right": "pull_request:44029", "accept": false, - "reason": "Different architectures and code paths; not mergeable as one PR." + "reason": "Different architectures and output semantics." }, { "left": "pull_request:44027", "right": "pull_request:44030", "accept": false, - "reason": "speech_encoder_decoder and DPR are unrelated refactors." + "reason": "SpeechEncoderDecoder and DPR are separate model-specific changes." }, { "left": "pull_request:44028", "right": "pull_request:44029", "accept": false, - "reason": "SuperPoint and RWKV are different model implementations; no duplicate fix." + "reason": "SuperPoint and RWKV do not share a concrete fix." }, { "left": "pull_request:44028", "right": "pull_request:44030", "accept": false, - "reason": "SuperPoint and DPR are unrelated code paths and changes." + "reason": "Different model families; not duplicates." }, { "left": "pull_request:44029", "right": "pull_request:44030", "accept": false, - "reason": "RWKV and DPR are separate model refactors; shared theme is too generic." + "reason": "RWKV and DPR have unrelated forward/output code paths." } ] }, @@ -17528,7 +17504,7 @@ { "label": "pull_request", "component_index": 16, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17547,52 +17523,52 @@ "pull_request:44662|pull_request:45075", "pull_request:45075|pull_request:45401" ], - "prepared_review_unit_hash": "d48b271e59261ec5d11c571c8e2c15e060eee0476ae14c9f7c507d7573fea544", + "prepared_review_unit_hash": "5d88ba93d54992148acbb17f2093420b331e115b35efa2afb23ff60ba4453316", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 10912, - "estimated_input_tokens": 2728, - "estimated_eval_tokens": 5712 + "serialized_chars": 10913, + "estimated_input_tokens": 2729, + "estimated_eval_tokens": 5714 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 10912, - "estimated_input_tokens": 2728, - "estimated_eval_tokens": 5712 + "serialized_chars": 10913, + "estimated_input_tokens": 2729, + "estimated_eval_tokens": 5714 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Three separate model-addition pull requests for unrelated models (PenguinVL, DeepSeek-OCR-2, and Voxtral TTS) with only superficial overlap in framework registration/docs.", + "summary": "These are three unrelated model-addition PRs (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They share only common Transformers boilerplate like auto-registration and docs, not the same underlying bug or change.", "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45075 is the most mature and self-contained representative in this cluster: non-draft, substantial implementation/test coverage, and the highest review activity among the three.", + "canonical_pr_reason": "No PR is a true canonical duplicate here; the three changes target different models and code paths. If one had to be chosen as the most self-contained representative, PR 45075 is the most complete, but it is not a duplicate of the others.", "best_issue_reason": null, - "best_pr_reason": "PR #45075 is the strongest overall PR candidate in the cluster because it appears complete, non-draft, and actively reviewed; the others are either a different model addition (#44662) or a draft (#45401).", + "best_pr_reason": "PR 45075 is the strongest standalone PR in this set because it has the most complete model integration and an explicit issue target, but it still does not represent the others' changes.", "soft_edge_verdicts": [ { "left": "pull_request:44662", "right": "pull_request:45401", "accept": false, - "reason": "Different model additions for different products; shared auto-registration/docs are expected boilerplate, not the same underlying change." + "reason": "Different model integrations for PenguinVL vs Voxtral TTS; shared auto/docs files are boilerplate, not the same fix." }, { "left": "pull_request:44662", "right": "pull_request:45075", "accept": false, - "reason": "Separate model implementations for different architectures and code paths; similarity is limited to common Transformers scaffolding." + "reason": "Separate model additions (PenguinVL vs DeepSeek-OCR-2) with different architectures and files beyond shared registry/docs plumbing." }, { "left": "pull_request:45075", "right": "pull_request:45401", "accept": false, - "reason": "Both add distinct new models, not the same bug or fix; overlapping filenames are generic integration files only." + "reason": "DeepSeek-OCR-2 and Voxtral TTS are unrelated model ports; overlap is limited to standard Transformers registration and docs." } ] }, @@ -17606,7 +17582,7 @@ { "label": "pull_request", "component_index": 17, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17622,7 +17598,7 @@ "soft_pairs": [ "pull_request:44979|pull_request:45363" ], - "prepared_review_unit_hash": "6b863d41bda90243224521c80f7042969cbb13d6a9756c84fcf51fdd79689ce5", + "prepared_review_unit_hash": "c64539ac5d04838d43edc2791a7547d2585d478efe4be471449ecf0d4ab1195a", "component_budget": { "node_count": 2, "item_count": 2, @@ -17644,18 +17620,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs share a broad \u201cfusion\u201d theme, but they target different layers: one introduces a general Module Fusion API, while the other wires in n-to-1 kernel fusion via KernelConfig and integrations. They are related conceptually, but not the same underlying change.", - "confidence": 0.94, + "summary": "Two PRs in the same broad fusion area, but they target different layers: one adds a generic module-fusion API, the other wires in a specific KernelConfig-based n-to-1 fusion path. They are related, but not duplicates.", + "confidence": 0.84, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45363 is the more concrete, implementation-driven change: it adds KernelConfig plumbing, integration hooks, and a runnable example for a specific fusion workflow.", + "canonical_pr_reason": "No issue artifacts here. If choosing a canonical PR, #45363 is the more concrete feature implementation, while #44979 is the lower-level API scaffolding.", "best_issue_reason": null, - "best_pr_reason": "PR #45363 is the best representative of the cluster because it centers on a specific code-path feature with clear integration and modeling changes, rather than a more generic API scaffold.", + "best_pr_reason": "#45363 is the best representative of the cluster because it implements a concrete kernel-fusion workflow across runtime codepaths, whereas #44979 mainly introduces the reusable module-fusion abstraction.", "soft_edge_verdicts": [ { "left": "pull_request:44979", "right": "pull_request:45363", "accept": false, - "reason": "Both involve fusion, but #44979 is a general module-fusion API and #45363 is a KernelConfig-based n-to-1 kernel fusion feature. Different mechanisms, different affected files, and not plausibly one merged PR without conflating separate designs." + "reason": "Related but not the same change: #44979 adds a generic Module Fusion API, while #45363 adds KernelConfig-driven n-to-1 kernel fusion and integration plumbing. They could coexist in separate PRs." } ] }, @@ -17669,7 +17645,7 @@ { "label": "pull_request", "component_index": 18, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17685,7 +17661,7 @@ "soft_pairs": [ "pull_request:45086|pull_request:45317" ], - "prepared_review_unit_hash": "0e3ba9e2de7708ded75705a0466845fe444217fbdde11f94e6625ebd1800f3be", + "prepared_review_unit_hash": "56bec37046a24cac0f3aef64095c14cf4c286c9776e739bdc106c73a8eab4d9b", "component_budget": { "node_count": 2, "item_count": 2, @@ -17707,18 +17683,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both PRs fix the same AttributeError in `_patch_mistral_regex` for `fix_mistral_regex=True` by switching from `tokenizer.backend_tokenizer.pre_tokenizer` to `tokenizer.pre_tokenizer` in the same file. PR 45317 is the more complete duplicate because it includes the code fix plus a test update.", + "summary": "Both PRs fix the same AttributeError in `_patch_mistral_regex` by switching from `tokenizer.backend_tokenizer.pre_tokenizer` to `tokenizer.pre_tokenizer`; the newer PR also adds test coverage.", "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45317 is the better canonical PR because it contains the same core fix as 45086 plus test coverage, making it the more complete representative of this duplicate change.", + "canonical_pr_reason": "PR #45317 is the better canonical PR: it fixes the same bug as #45086 but is more complete, with test coverage and a clearer title/reproduction context.", "best_issue_reason": null, - "best_pr_reason": "PR 45317 is the best overall representative: same underlying bug, same code path, and it adds validation via tests.", + "best_pr_reason": "PR #45317 is the best overall representative because it addresses the concrete code-path bug directly and includes validation, while #45086 is the earlier minimal patch.", "soft_edge_verdicts": [ { "left": "pull_request:45086", "right": "pull_request:45317", "accept": true, - "reason": "Same concrete bug fix in the same function and same file; the later PR is the same change with added tests, so they could plausibly be merged into one PR." + "reason": "Same underlying AttributeError fix in the same file and code path; the second PR is essentially the same patch plus tests, so they are mergeable as one change." } ] }, @@ -17732,7 +17708,7 @@ { "label": "pull_request", "component_index": 19, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17748,12 +17724,12 @@ "soft_pairs": [ "pull_request:45115|pull_request:45569" ], - "prepared_review_unit_hash": "dd7efacf3a056a4c317643391fc3bb52f472d2f12dcc6ff5776e4f779f51cb2f", + "prepared_review_unit_hash": "fc9b1583a87af08f923334243e05b6bce86a9a238eb9eba85a845f313a5efb98", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6065, + "serialized_chars": 6066, "estimated_input_tokens": 1517, "estimated_eval_tokens": 3290 }, @@ -17761,7 +17737,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6065, + "serialized_chars": 6066, "estimated_input_tokens": 1517, "estimated_eval_tokens": 3290 }, @@ -17770,18 +17746,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two open PRs touch NemotronH, but they address different changes: one is a conversion/refactor toward GraniteMoeHybrid-compatible naming, the other is a broader split into NemotronH Dense/Sparse models with docs and auto-mapping updates. They overlap on shared model files, but are not duplicates.", - "confidence": 0.87, + "summary": "Both PRs touch NemotronH, but they are not the same change: #45115 is a narrow refactor to inherit from GraniteMoeHybrid and update conversion mappings, while #45569 is a larger architectural split into NemotronHDense/NemotronHSparse with docs and test updates. Related area, but not a duplicate change.", + "confidence": 0.88, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45569 is the better representative because it is the more complete user-facing model change: it introduces the Dense/Sparse model variants, updates docs and auto-mapping, and keeps NemotronH as a compatibility dispatcher.", + "canonical_pr_reason": "#45569 is the broader end-state refactor and more complete implementation; #45115 is a narrower precursor focused on inheritance/conversion mapping.", "best_issue_reason": null, - "best_pr_reason": "PR #45569 is the best single PR to stand in for this cluster since it covers the larger architectural outcome and most of the visible API/documentation surface.", + "best_pr_reason": "#45569 is the most representative and comprehensive PR in the cluster, covering the model split plus docs/tests, whereas #45115 is a limited structural refactor.", "soft_edge_verdicts": [ { "left": "pull_request:45115", "right": "pull_request:45569", "accept": false, - "reason": "Not the same underlying change. #45115 is a targeted refactor/conversion-mapping update for NemotronH-to-GraniteMoeHybrid naming, while #45569 is a broader model split adding NemotronHDense/NemotronHSparse plus docs and auto/model wiring. Shared files alone are not enough to treat them as duplicates." + "reason": "Same model family, but different concrete change sets and scopes: one refactors NemotronH toward GraniteMoeHybrid, the other splits NemotronH into dense/sparse variants with broader API/docs/test updates. Not mergeable as one duplicate PR." } ] }, @@ -17795,7 +17771,7 @@ { "label": "pull_request", "component_index": 20, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17811,12 +17787,12 @@ "soft_pairs": [ "pull_request:45332|pull_request:45333" ], - "prepared_review_unit_hash": "07c064175a776eb59571abb65ae299dc0b092be98802beb1cb5f10212a912460", + "prepared_review_unit_hash": "d98028daaa0886f8fd66e32aa2fbb8a413e577b7e328cc0f6c96b9734fd00554", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5398, + "serialized_chars": 5397, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17824,7 +17800,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5398, + "serialized_chars": 5397, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17833,18 +17809,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two related but distinct PRs: one adds heterogeneous per-layer config plumbing, the other adds broader heterogeneous model/runtime support on top of it. They share files and theme, but they are not the same concrete change.", - "confidence": 0.78, + "summary": "Two related but distinct PRs: one adds heterogeneous per-layer configuration support, the other adds broader heterogeneous model/runtime support that likely builds on it. They are not the same underlying change, so this is not a duplicate pair.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "No issue in the cluster; among the PRs, #45332 is the better canonical representative because it is the broader end-to-end heterogeneous model support change and subsumes the config groundwork.", + "canonical_pr_reason": "PR 45332 is the broader end-to-end change, touching cache/modeling/masking plus tests for heterogeneous behavior, so it is the better representative PR for the cluster. PR 45333 is a narrower foundational config-only piece.", "best_issue_reason": null, - "best_pr_reason": "#45332 is the best cluster representative since it covers the main feature area and includes the downstream modeling/cache changes, whereas #45333 is a narrower config-only prerequisite.", + "best_pr_reason": "PR 45332 is the most complete implementation of the heterogeneous feature set in this cluster and best captures the overall work. PR 45333 is useful groundwork but not the main user-facing change.", "soft_edge_verdicts": [ { "left": "pull_request:45332", "right": "pull_request:45333", "accept": false, - "reason": "Related feature work, but not the same underlying change. #45333 adds per-layer heterogeneous config support; #45332 adds heterogeneous model/runtime support that uses that config. They could be adjacent PRs, not one duplicate PR." + "reason": "Different scope and likely dependency chain: 45333 adds per-layer config plumbing, while 45332 adds broader modeling/cache support. They are related, but not the same concrete fix/change and would not plausibly be merged as one PR." } ] }, @@ -17858,7 +17834,7 @@ { "label": "pull_request", "component_index": 21, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17874,7 +17850,7 @@ "soft_pairs": [ "pull_request:45415|pull_request:45425" ], - "prepared_review_unit_hash": "fd3295105cfcdcb6d21429a84e8077ed678ff2ce0e5480e71c271b64bc488c44", + "prepared_review_unit_hash": "4367f3fd7cdd6bb8bef75ea4b9aec4d4088df0e06db845e82338f83c18ce3cb1", "component_budget": { "node_count": 2, "item_count": 2, @@ -17896,18 +17872,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both items are typing-related pull requests, but they are not the same change: one is a broad repo-wide type-checking sweep, the other is a focused typing-compatibility update around modeling_utils/PEFT. They overlap in a few files, but not in the same concrete fix.", - "confidence": 0.93, + "summary": "Both PRs are typing-related, but they target different changes: one is a broad repo-wide type-checking sweep, the other adds specific typing protocols/helpers for model utilities and PEFT integration. They are not the same underlying change.", + "confidence": 0.89, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45425 is the more self-contained change: it introduces typing aliases/protocols and targeted fixes for specific typing gaps. PR 45415 is a much broader type-checking pass across many files and config, so it is less suitable as the canonical representative of this cluster.", + "canonical_pr_reason": "PR #45425 is the more concrete and representative typing change: it adds shared typing helpers/protocols and updates specific call sites, whereas #45415 is a broad type-checking/config sweep across many files.", "best_issue_reason": null, - "best_pr_reason": "PR 45425 is the best single PR for the cluster because it is narrowly scoped and concrete, whereas PR 45415 is a sweeping typing cleanup rather than one specific change.", + "best_pr_reason": "PR #45425 is the better representative for this cluster because it makes a focused code change in the typing layer instead of a wide lint/type-checking pass.", "soft_edge_verdicts": [ { "left": "pull_request:45415", "right": "pull_request:45425", "accept": false, - "reason": "Reject: 45415 is a broad type-checking enablement/sweep (pyproject plus many files), while 45425 adds specific typing aliases/protocols and targeted fixes for modeling_utils/PEFT. Shared files do not mean the same underlying bug or mergeable PR." + "reason": "They share a typing theme and some files, but #45415 is a broad type-checking enablement sweep while #45425 adds specific typing protocols and related code updates. Different concrete changes, so not duplicates." } ] }, @@ -17921,7 +17897,7 @@ { "label": "pull_request", "component_index": 22, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17937,7 +17913,7 @@ "soft_pairs": [ "pull_request:45487|pull_request:45615" ], - "prepared_review_unit_hash": "b6a8f3528cacf24a90d0ef36311cdcb3ada051fec0a8269f1f71e0607061f4ff", + "prepared_review_unit_hash": "ea9c697b5955024aa34eb08b9df3ca5e131cc0a0ee0e29bbb3b44ccab8f74acc", "component_budget": { "node_count": 2, "item_count": 2, @@ -17959,18 +17935,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two PRs are unrelated: one fixes a device placement/model-parallel bug in AltCLIP/ChineseCLIP, while the other only updates Qianfan OCR XPU test expectations.", - "confidence": 0.98, + "summary": "These two PRs are unrelated: one fixes a real model-parallel device placement bug in AltCLIP/ChineseCLIP and related models, while the other only adds XPU test expectations for Qianfan OCR.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45487 is the substantive code fix: it changes model code to move buffered token type IDs onto the correct device, addressing the actual parallelism bug. PR 45615 only adjusts tests and has no comparable code-path fix.", + "canonical_pr_reason": "PR #45487 is the substantive code fix: it patches device placement in model-parallel forward paths across several model implementations, addressing a concrete runtime bug.", "best_issue_reason": null, - "best_pr_reason": "PR 45487 is the best representative because it targets the underlying runtime bug in model code, whereas PR 45615 is just platform-specific expected-output maintenance.", + "best_pr_reason": "PR #45487 is the best overall candidate because it changes production code to resolve the underlying bug; PR #45615 is tests-only and does not fix the code path itself.", "soft_edge_verdicts": [ { "left": "pull_request:45487", "right": "pull_request:45615", "accept": false, - "reason": "Different models and different concerns: 45487 fixes AltCLIP/ChineseCLIP device placement in forward passes; 45615 only adds XPU golden values for Qianfan OCR tests. They do not appear to be the same underlying change or bug." + "reason": "Reject: they target different problems. #45487 fixes token type id/device handling in model parallel execution, while #45615 only updates expected outputs for XPU integration tests in Qianfan OCR." } ] }, @@ -17984,7 +17960,7 @@ { "label": "pull_request", "component_index": 23, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -18000,7 +17976,7 @@ "soft_pairs": [ "pull_request:45548|pull_request:45662" ], - "prepared_review_unit_hash": "c2a79796764afcca5ea92160eefa6918654edf3b441e2f01c8141708a5df18e4", + "prepared_review_unit_hash": "b54b89bf068b785ef6230723022e4f4a5b2d4a3f9d8bb3c05c68eb3741039059", "component_budget": { "node_count": 2, "item_count": 2, @@ -18022,18 +17998,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related EP-loading fixes, but they target different failure modes: one fixes DeepSpeed ZeRO-3/accelerate launch loading and config serialization, while the other fixes FSDP2 rank-0 broadcast overwriting experts. I would not treat them as duplicates.", - "confidence": 0.88, + "summary": "These are related only at a broad EP/model-loading level, but they fix different bugs: PR 45548 preserves distributed_config during config serialization and adds a has_ep flag for DeepSpeed ZeRO-3/accelerate launch loading, while PR 45662 prevents expert parameters from being overwritten by rank-0 broadcast in EP + FSDP2 loading.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45662 is the better representative because it targets a specific, concrete EP corruption bug during distributed loading and the diff is centered on that failure path.", + "canonical_pr_reason": null, "best_issue_reason": null, - "best_pr_reason": "PR 45662 is the strongest standalone PR in this cluster: it describes a precise user-visible bug and has a focused fix in the loading path, whereas 45548 is a different EP-related loading issue.", + "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "pull_request:45548", "right": "pull_request:45662", "accept": false, - "reason": "Both involve expert parallelism and loading, but 45548 fixes EP + DeepSpeed ZeRO-3/accelerate launch config handling, while 45662 fixes EP + FSDP2 expert tensors being overwritten by rank-0 broadcast. Different root causes and different code paths." + "reason": "Different concrete code-paths and failure modes: 45548 is about config serialization and EP detection during loading, while 45662 is about post-shard wrapping to avoid expert weights being overwritten under FSDP2. They are not mergeable as one PR." } ] }, @@ -18047,7 +18023,7 @@ { "label": "pull_request", "component_index": 24, - "component_count": 24, + "component_count": 25, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -18063,12 +18039,12 @@ "soft_pairs": [ "pull_request:45639|pull_request:45645" ], - "prepared_review_unit_hash": "670d56d4d5b66a04c33c66949919b0a723cff2ea04fb544f0398a5bc26554037", + "prepared_review_unit_hash": "c2e5e57c354c65043464bee80ceb77bf902a280202a053c433a06bc497ca7bda", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4589, + "serialized_chars": 4590, "estimated_input_tokens": 1148, "estimated_eval_tokens": 2552 }, @@ -18076,7 +18052,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4589, + "serialized_chars": 4590, "estimated_input_tokens": 1148, "estimated_eval_tokens": 2552 }, @@ -18085,18 +18061,81 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both PRs target the same underlying xdist collision problem around captured_info output in testing_utils; 45645 is the broader follow-up that also updates CI handling, while 45639 is the narrower core fix.", - "confidence": 0.9, + "summary": "Both PRs address the same xdist/captured_info collision in testing debug logs. PR 45639 fixes the file handling in `testing_utils.py`; PR 45645 includes that same fix and also updates CI log collection to read multiple `captured_info*.txt` files, making it the broader and more complete change.", + "confidence": 0.88, "canonical_issue_reason": null, - "canonical_pr_reason": "pull_request:45645 is the better canonical PR because it contains the core xdist-safe logging fix plus the workflow change needed to read multiple captured_info artifacts, making it the more complete representative of the change set.", + "canonical_pr_reason": "PR 45645 is the better canonical PR because it contains the core xdist-safe fix in `testing_utils.py` and also updates the workflow so CI can collect the new per-worker log files.", "best_issue_reason": null, - "best_pr_reason": "pull_request:45645 is the strongest representative for the cluster: it addresses the same captured_info xdist collision and adds the downstream CI log preservation behavior, whereas 45639 only covers the file-writing side.", + "best_pr_reason": "PR 45645 is the best overall match: it addresses the same underlying xdist collision bug as 45639, but also covers the downstream CI artifact handling needed for the fix to work end-to-end.", "soft_edge_verdicts": [ { "left": "pull_request:45639", "right": "pull_request:45645", "accept": true, - "reason": "Same underlying bug: both fix xdist collisions for captured_info generated by patched testing methods. The second PR extends the same fix with CI artifact aggregation, so they could plausibly be merged into one change set." + "reason": "Same underlying bug: xdist collisions when writing `captured_info` debug logs. 45645 builds on the same code-path fix in `testing_utils.py` and adds workflow support for multiple output files, so they could plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 25, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45683-2", + "nodes": [ + "pull_request:45683", + "pull_request:45734" + ], + "soft_pairs": [ + "pull_request:45683|pull_request:45734" + ], + "prepared_review_unit_hash": "b52ca6e04dd943e7cbb6fcbfa4fd68de7103de43e930c9387db6e618a3dfe6df", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 3891, + "estimated_input_tokens": 973, + "estimated_eval_tokens": 2202 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 3891, + "estimated_input_tokens": 973, + "estimated_eval_tokens": 2202 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs touch the same quantizer helper, but they fix different bugs: one excludes audio tower/embed_audio modules from quantization, while the other makes user-supplied skip_modules additive with the default auto-detected skips and adds tests.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45734 is the better cluster representative because it fixes the core skip_modules regression in the quantizer flow and includes test coverage; PR 45683 is a separate audio-module exclusion change.", + "best_issue_reason": null, + "best_pr_reason": "PR 45734 best captures the main quantizer behavior bug and is more complete due to the added regression test; PR 45683 addresses a different model-specific audio-path issue.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45683", + "right": "pull_request:45734", + "accept": false, + "reason": "Different underlying bugs: 45683 skips audio modules for multimodal models, while 45734 changes how skip_modules combines with default protected modules to prevent lm_head quantization. Shared file/issue target is not enough to treat them as duplicates." } ] }, diff --git a/analysis/current/manifest.json b/analysis/current/manifest.json index 8f6561a0f6df26df33393db229257754e6e27b72..c7d8df273f5c469b4ceda385cead5c05bf8c2ac6 100644 --- a/analysis/current/manifest.json +++ b/analysis/current/manifest.json @@ -1,8 +1,8 @@ { - "analysis_id": "hybrid-model-20260430t120024z", + "analysis_id": "hybrid-model-20260501t113108z", "archived_artifacts": { - "hybrid": "snapshots/20260430T120024Z/analysis-runs/hybrid-model-20260430t120024z/analysis-report-hybrid.json", - "hybrid_reviews": "snapshots/20260430T120024Z/analysis-runs/hybrid-model-20260430t120024z/analysis-report-hybrid.llm-reviews.json" + "hybrid": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.llm-reviews.json" }, "artifacts": { "hybrid": "analysis/current/analysis-report-hybrid.json", @@ -10,9 +10,9 @@ }, "channel": "canonical", "model": null, - "published_at": "2026-04-30T12:09:48Z", + "published_at": "2026-05-01T11:39:27Z", "repo": "huggingface/transformers", "schema_version": 1, - "snapshot_id": "20260430T120024Z", + "snapshot_id": "20260501T113108Z", "variant": "hybrid" } diff --git a/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.json b/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.json new file mode 100644 index 0000000000000000000000000000000000000000..69e932252beb6c7eb7d0a4c156c222110fceffa4 --- /dev/null +++ b/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.json @@ -0,0 +1,2146 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260501T113108Z", + "generated_at": "2026-05-01T11:39:25Z", + "evidence_quality": "full", + "llm_enrichment": true, + "meta_bugs": [ + { + "cluster_id": "cluster-43979-11", + "summary": "Cluster of 1 issues and 10 PRs centered on issue #43979.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43979, + "canonical_pr_number": 44007, + "issue_numbers": [ + 43979 + ], + "pr_numbers": [ + 43996, + 44007, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43996, + "right_pr_number": 44007, + "code_similarity": 0.179, + "size_similarity": 0.576, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.429, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44013, + "code_similarity": 0.122, + "size_similarity": 0.318, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.392, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44044, + "code_similarity": 0.245, + "size_similarity": 0.864, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.479, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44066, + "code_similarity": 0.225, + "size_similarity": 0.818, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.408, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44072, + "code_similarity": 0.14, + "size_similarity": 0.303, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.528, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44085, + "code_similarity": 0.216, + "size_similarity": 0.783, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44129, + "code_similarity": 0.163, + "size_similarity": 0.643, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.229, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44154, + "code_similarity": 0.153, + "size_similarity": 0.535, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.31, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44722, + "code_similarity": 0.225, + "size_similarity": 0.848, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.368, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44013, + "code_similarity": 0.19, + "size_similarity": 0.553, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.531, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44044, + "code_similarity": 0.186, + "size_similarity": 0.667, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.354, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44066, + "code_similarity": 0.188, + "size_similarity": 0.704, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.315, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44072, + "code_similarity": 0.212, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.708, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44085, + "code_similarity": 0.195, + "size_similarity": 0.735, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.318, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44129, + "code_similarity": 0.103, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.191, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44154, + "code_similarity": 0.238, + "size_similarity": 0.93, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.344, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44722, + "code_similarity": 0.178, + "size_similarity": 0.679, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.28, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44044, + "code_similarity": 0.126, + "size_similarity": 0.368, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.351, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44066, + "code_similarity": 0.127, + "size_similarity": 0.389, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.325, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44072, + "code_similarity": 0.29, + "size_similarity": 0.952, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.667, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44085, + "code_similarity": 0.131, + "size_similarity": 0.406, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.329, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44129, + "code_similarity": 0.07, + "size_similarity": 0.205, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.192, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44154, + "code_similarity": 0.177, + "size_similarity": 0.594, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.389, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44722, + "code_similarity": 0.118, + "size_similarity": 0.375, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.287, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44066, + "code_similarity": 0.25, + "size_similarity": 0.947, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.404, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44072, + "code_similarity": 0.136, + "size_similarity": 0.351, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.442, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44085, + "code_similarity": 0.24, + "size_similarity": 0.906, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.394, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44129, + "code_similarity": 0.147, + "size_similarity": 0.555, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.243, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44154, + "code_similarity": 0.17, + "size_similarity": 0.62, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.306, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44722, + "code_similarity": 0.257, + "size_similarity": 0.982, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.402, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44072, + "code_similarity": 0.133, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.393, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44085, + "code_similarity": 0.763, + "size_similarity": 0.957, + "file_overlap": 0.5, + "area_overlap": 0.825, + "patch_similarity": 0.887, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ] + } + ] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44129, + "code_similarity": 0.145, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.263, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44154, + "code_similarity": 0.174, + "size_similarity": 0.654, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.286, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44722, + "code_similarity": 0.907, + "size_similarity": 0.964, + "file_overlap": 1.0, + "area_overlap": 0.808, + "patch_similarity": 0.874, + "shared_filenames": [ + "src/transformers/models/codegen/modeling_codegen.py", + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/codegen/modeling_codegen.py", + "left_ranges": [ + [ + 245, + 266 + ] + ], + "right_ranges": [ + [ + 228, + 234 + ] + ] + }, + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44085, + "code_similarity": 0.137, + "size_similarity": 0.387, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44129, + "code_similarity": 0.074, + "size_similarity": 0.195, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.231, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44154, + "code_similarity": 0.175, + "size_similarity": 0.566, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.414, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44722, + "code_similarity": 0.124, + "size_similarity": 0.357, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.347, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44129, + "code_similarity": 0.141, + "size_similarity": 0.503, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.272, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44154, + "code_similarity": 0.18, + "size_similarity": 0.684, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.289, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44722, + "code_similarity": 0.728, + "size_similarity": 0.923, + "file_overlap": 0.5, + "area_overlap": 0.791, + "patch_similarity": 0.78, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44154, + "code_similarity": 0.099, + "size_similarity": 0.344, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.199, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44722, + "code_similarity": 0.146, + "size_similarity": 0.545, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.247, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44154, + "right_pr_number": 44722, + "code_similarity": 0.164, + "size_similarity": 0.631, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.255, + "shared_filenames": [], + "shared_file_areas": [] + } + ] + }, + { + "cluster_id": "cluster-41211-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41211.", + "status": "open", + "confidence": 0.55, + "canonical_issue_number": 41211, + "canonical_pr_number": 44339, + "issue_numbers": [ + 41211 + ], + "pr_numbers": [ + 41356, + 44339 + ], + "evidence_types": [ + "closing_reference" + ], + "pr_comparisons": [ + { + "left_pr_number": 41356, + "right_pr_number": 44339, + "code_similarity": 0.155, + "size_similarity": 0.094, + "file_overlap": 0.273, + "area_overlap": 0.078, + "patch_similarity": 0.183, + "shared_filenames": [ + "docs/source/en/model_doc/deimv2.md", + "src/transformers/models/deimv2/__init__.py", + "src/transformers/models/deimv2/configuration_deimv2.py", + "src/transformers/models/deimv2/modeling_deimv2.py", + "tests/models/deimv2/__init__.py", + "tests/models/deimv2/test_modeling_deimv2.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/deimv2.md", + "left_ranges": [ + [ + 1, + 132 + ] + ], + "right_ranges": [ + [ + 1, + 65 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/__init__.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/configuration_deimv2.py", + "left_ranges": [ + [ + 1, + 74 + ] + ], + "right_ranges": [ + [ + 1, + 266 + ] + ] + }, + { + "filename": "tests/models/deimv2/test_modeling_deimv2.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 1734 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43656-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43656, + "issue_numbers": [ + 43824 + ], + "pr_numbers": [ + 43656, + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43656, + "right_pr_number": 43836, + "code_similarity": 0.36, + "size_similarity": 0.176, + "file_overlap": 1.0, + "area_overlap": 0.051, + "patch_similarity": 0.048, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ] + } + ] + }, + { + "left_pr_number": 43656, + "right_pr_number": 43842, + "code_similarity": 0.405, + "size_similarity": 0.5, + "file_overlap": 1.0, + "area_overlap": 0.0, + "patch_similarity": 0.036, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + }, + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-41115-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41115.", + "status": "open", + "confidence": 0.55, + "canonical_issue_number": 41115, + "canonical_pr_number": 45613, + "issue_numbers": [ + 41115 + ], + "pr_numbers": [ + 41116, + 45613 + ], + "evidence_types": [ + "closing_reference" + ], + "pr_comparisons": [ + { + "left_pr_number": 41116, + "right_pr_number": 45613, + "code_similarity": 0.805, + "size_similarity": 0.885, + "file_overlap": 0.75, + "area_overlap": 0.854, + "patch_similarity": 0.693, + "shared_filenames": [ + "docs/source/en/_toctree.yml", + "docs/source/en/model_doc/minicpm3.md", + "src/transformers/models/auto/modeling_auto.py", + "src/transformers/models/minicpm3/__init__.py", + "src/transformers/models/minicpm3/configuration_minicpm3.py", + "src/transformers/models/minicpm3/modeling_minicpm3.py", + "src/transformers/models/minicpm3/modular_minicpm3.py", + "tests/models/minicpm3/__init__.py", + "tests/models/minicpm3/test_modeling_minicpm3.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/_toctree.yml", + "left_ranges": [ + [ + 713, + 720 + ] + ], + "right_ranges": [ + [ + 713, + 720 + ] + ] + }, + { + "filename": "docs/source/en/model_doc/minicpm3.md", + "left_ranges": [ + [ + 1, + 67 + ] + ], + "right_ranges": [ + [ + 1, + 45 + ] + ] + }, + { + "filename": "src/transformers/models/auto/modeling_auto.py", + "left_ranges": [ + [ + 285, + 291 + ], + [ + 700, + 706 + ], + [ + 1298, + 1304 + ] + ], + "right_ranges": [ + [ + 699, + 705 + ], + [ + 1301, + 1307 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/__init__.py", + "left_ranges": [ + [ + 1, + 29 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/configuration_minicpm3.py", + "left_ranges": [ + [ + 1, + 141 + ] + ], + "right_ranges": [ + [ + 1, + 126 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/modeling_minicpm3.py", + "left_ranges": [ + [ + 1, + 544 + ] + ], + "right_ranges": [ + [ + 1, + 522 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/modular_minicpm3.py", + "left_ranges": [ + [ + 1, + 444 + ] + ], + "right_ranges": [ + [ + 1, + 342 + ] + ] + }, + { + "filename": "tests/models/minicpm3/test_modeling_minicpm3.py", + "left_ranges": [ + [ + 1, + 109 + ] + ], + "right_ranges": [ + [ + 1, + 136 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45081-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45081, + "canonical_pr_number": 45317, + "issue_numbers": [ + 45081 + ], + "pr_numbers": [ + 45086, + 45317 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45086, + "right_pr_number": 45317, + "code_similarity": 0.589, + "size_similarity": 0.34, + "file_overlap": 0.5, + "area_overlap": 1.0, + "patch_similarity": 0.136, + "shared_filenames": [ + "src/transformers/tokenization_utils_tokenizers.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/tokenization_utils_tokenizers.py", + "left_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ], + "right_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45561-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45561.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45561, + "canonical_pr_number": 45645, + "issue_numbers": [ + 45561 + ], + "pr_numbers": [ + 45639, + 45645 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45639, + "right_pr_number": 45645, + "code_similarity": 0.532, + "size_similarity": 0.619, + "file_overlap": 0.5, + "area_overlap": 0.543, + "patch_similarity": 0.451, + "shared_filenames": [ + "src/transformers/testing_utils.py", + "tests/utils/test_testing_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/testing_utils.py", + "left_ranges": [ + [ + 3529, + 3536 + ], + [ + 3753, + 3787 + ] + ], + "right_ranges": [ + [ + 3525, + 3558 + ], + [ + 3782, + 3788 + ] + ] + }, + { + "filename": "tests/utils/test_testing_utils.py", + "left_ranges": [ + [ + 1, + 86 + ] + ], + "right_ranges": [ + [ + 1, + 114 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43698-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43698.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43698, + "canonical_pr_number": 43779, + "issue_numbers": [ + 43698 + ], + "pr_numbers": [ + 43779, + 43816 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43779, + "right_pr_number": 43816, + "code_similarity": 0.418, + "size_similarity": 0.538, + "file_overlap": 1.0, + "area_overlap": 0.02, + "patch_similarity": 0.02, + "shared_filenames": [ + "src/transformers/integrations/integration_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/integration_utils.py", + "left_ranges": [ + [ + 2227, + 2241 + ], + [ + 2303, + 2309 + ] + ], + "right_ranges": [ + [ + 2278, + 2291 + ], + [ + 2309, + 2322 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] + } + ], + "duplicate_issues": [], + "duplicate_prs": [ + { + "cluster_id": "cluster-41115-3", + "canonical_pr_number": 45613, + "duplicate_pr_numbers": [ + 41116 + ], + "target_issue_number": 41115, + "reason": "PRs in cluster-41115-3 are treated as duplicates because they converge on issue #41115 with closing_reference evidence." + }, + { + "cluster_id": "cluster-41211-3", + "canonical_pr_number": 44339, + "duplicate_pr_numbers": [ + 41356 + ], + "target_issue_number": 41211, + "reason": "PRs in cluster-41211-3 are treated as duplicates because they converge on issue #41211 with closing_reference evidence." + }, + { + "cluster_id": "cluster-43240-3", + "canonical_pr_number": 43251, + "duplicate_pr_numbers": [ + 43254 + ], + "target_issue_number": 43240, + "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43656-4", + "canonical_pr_number": 43656, + "duplicate_pr_numbers": [ + 43836, + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-43656-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-43698-3", + "canonical_pr_number": 43779, + "duplicate_pr_numbers": [ + 43816 + ], + "target_issue_number": 43698, + "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43979-11", + "canonical_pr_number": 44007, + "duplicate_pr_numbers": [ + 43996, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "target_issue_number": 43979, + "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, + { + "cluster_id": "cluster-45081-3", + "canonical_pr_number": 45317, + "duplicate_pr_numbers": [ + 45086 + ], + "target_issue_number": 45081, + "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-45561-3", + "canonical_pr_number": 45645, + "duplicate_pr_numbers": [ + 45639 + ], + "target_issue_number": 45561, + "reason": "PRs in cluster-45561-3 are treated as duplicates because they converge on issue #45561 with closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "best_issue": { + "cluster_id": "cluster-41211-3", + "issue_number": 41211, + "reason": "Issue #41211 is the strongest global issue candidate because it is open, belongs to a cluster with 3 artifacts, and carries 6 discussion comments plus 6 inbound references.", + "score": 148.0 + }, + "best_pr": { + "cluster_id": "cluster-43979-11", + "pr_number": 44007, + "reason": "PR #44007 is the strongest global PR candidate because it is open, belongs to a cluster with 11 artifacts, links to 1 issues, and carries 2 review/discussion events.", + "score": 238.886 + } +} diff --git a/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.llm-reviews.json b/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.llm-reviews.json new file mode 100644 index 0000000000000000000000000000000000000000..0cc1643dbac0f74eb3c76a621b875ca9b1710b0a --- /dev/null +++ b/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.llm-reviews.json @@ -0,0 +1,18150 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260501T113108Z", + "generated_at": "2026-05-01T11:39:25Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "b2b4497bbd41fdc39c6dab4d9500b9944ca443294a6d925e643ce74d380dfdb1", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2477, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2477, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The three issues are not true duplicates: one is about Tapas tokenization parity, one about a private-model auth/download error message, and one about failing to load bert-base-uncased. The only plausible commonality is model-loading/download friction, but the concrete failures differ.", + "confidence": 0.83, + "canonical_issue_reason": "Issue #16618 is the broadest and most central model-loading failure in this set, with the most discussion, so it is the best representative if one issue must anchor the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "#16618 best captures the cluster\u2019s general theme of model loading problems and is the most likely target for cross-references or triage, even though it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Tapas tokenization parity with TensorFlow is a different code-path problem from failing to load bert-base-uncased; same library area, but not the same bug." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "One is a bad error message when downloading a private model without login, the other is a model load failure for a specific public model; related subsystem, but not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34634", + "issue:34928", + "issue:35545", + "issue:36010", + "issue:36331", + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:42915", + "issue:43039", + "issue:43404", + "issue:43425", + "issue:43531", + "issue:43541", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43792", + "issue:43827", + "issue:43828", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:43975", + "issue:44292", + "issue:44322", + "issue:44457", + "issue:44509", + "issue:44512", + "issue:44560", + "issue:44661", + "issue:44805", + "issue:44841", + "issue:44863", + "issue:44918", + "issue:45070", + "issue:45081", + "issue:45084", + "issue:45161", + "issue:45237", + "issue:45362", + "issue:45464", + "issue:45507", + "issue:45685" + ], + "soft_pairs": [ + "issue:44322|issue:45464", + "issue:45237|issue:45685", + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:45237|issue:45507", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:35545|issue:36010", + "issue:43638|issue:44805", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918", + "issue:44661|issue:44805", + "issue:34634|issue:35545", + "issue:30277|issue:43638", + "issue:43596|issue:43638", + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:43716|issue:45507", + "issue:39290|issue:45081", + "issue:43828|issue:45161", + "issue:43425|issue:44292", + "issue:44918|issue:45161", + "issue:34928|issue:41720", + "issue:43716|issue:45685", + "issue:43541|issue:43716", + "issue:43828|issue:44292", + "issue:45507|issue:45685", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:41762|issue:43638", + "issue:43039|issue:44918", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43828|issue:45685", + "issue:44918|issue:45685", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805", + "issue:34928|issue:36010", + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:39290|issue:41720", + "issue:43866|issue:44863", + "issue:43039|issue:44560", + "issue:43854|issue:43866", + "issue:41093|issue:43638", + "issue:39290|issue:43531", + "issue:43425|issue:45070", + "issue:45362|issue:45464", + "issue:43404|issue:43866", + "issue:43792|issue:45070", + "issue:45464|issue:45685", + "issue:43541|issue:45685", + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762" + ], + "prepared_review_unit_hash": "58b88e8edbc20920314b52a9d588e47a1c6da9ebae85ea7ad4c827ee5186d42f", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 67, + "serialized_chars": 47117, + "estimated_input_tokens": 11780, + "estimated_eval_tokens": 23816 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated issue set; the only clear duplicate edge is the ZeRO-3/BertModel index-0 failure pair. Most other high-similarity pairs are superficial symptom matches across different models, backends, or code paths.", + "confidence": 0.82, + "canonical_issue_reason": "issue:43596 is the clearest representative of the shared ZeRO-3/BertModel empty-index failure; issue:43638 appears to be the same bug with a slightly different repro.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43596 is the best canonical issue for this set because it most directly captures the recurring ZeRO-3/BertModel index-0 bug and is the cleanest duplicate anchor.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44322", + "right": "issue:45464", + "accept": false, + "reason": "Different Qwen3.5 failures: missing config attribute vs streaming chat/completions error." + }, + { + "left": "issue:45237", + "right": "issue:45685", + "accept": false, + "reason": "Unrelated backend issues: AMD GPU failure vs MPS Int histogram op missing." + }, + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Different models and failure modes; dtype mismatch vs AMD GPU runtime failure." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "Autocast dtype mismatch is a different bug than the AMD GPU startup failure." + }, + { + "left": "issue:45237", + "right": "issue:45507", + "accept": false, + "reason": "Different code paths: GPU/backend crash vs invalid method call." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Different model families and distinct runtime errors." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "DeepSpeed collective mismatch and ONNX export are unrelated." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "ONNX export error vs FP8 config load failure; different bugs." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "Different problem areas: export tooling vs auto device-mapping assert." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs regressions, but they refer to different removed v5 references." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "ONNX export issue is unrelated to GenerationMixin import failure." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Same broad index-error theme, but the specific repros and code paths differ too much." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "ZeRO-3/Bert empty-index failure vs a generic mask/tensor shape mismatch; not clearly the same bug." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "Mask/index shape error is unrelated to GPT-OSS tensor-parallel failure." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Different training/runtime paths and different underlying failures." + }, + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Tokenizer-mapping bug is unrelated to the index/mask error." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "BarkProcessor voice preset handling is unrelated to ModernBERT ONNX export." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "Both involve DeepSpeed, but one is collective mismatch and the other is a Bert empty-index crash." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "Same ZeRO-3/BertModel index-0 empty-tensor failure; 43638 is effectively a rephrased report of the same bug." + }, + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Detokenization bug and LoRA save/load mismatch are different issues." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Liger kernel cross_entropy path is unrelated to GPT-OSS tensor-parallel failure." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "Qwen-3 NVFP4 runtime error and LoRA merge/load inconsistency are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Activation checkpointing/FSDP recompute mismatch is a different failure than the generic mask/index error." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Both are DeepSpeed-related, but the underlying runtime errors and repros differ." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "ZeRO-3 Bert index error is unrelated to GPT-OSS MoE tensor-parallel failure." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "FSDP activation-checkpointing issue is unrelated to the trainer signature change." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "Different model families and unrelated failure modes." + }, + { + "left": "issue:43716", + "right": "issue:45507", + "accept": false, + "reason": "Different models and distinct bugs: dtype mismatch vs invalid method call." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Sliding-window config issue in vLLM is unrelated to the tokenizer regex patch crash." + }, + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Autocast dtype mismatch is not the same as GPT-OSS TP failure." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "TRL SFT embedding unpacking error is unrelated to GPT-OSS tensor-parallel failure." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "FSDP recompute mismatch and Qwen3 auto device-map cuda assert are different bugs." + }, + { + "left": "issue:43716", + "right": "issue:45685", + "accept": false, + "reason": "Different backends and different failures: dtype mismatch vs MPS unsupported op." + }, + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Different model families and code paths; tracing grouped_mm vs image-preprocessor dtype mismatch." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "Both are Qwen/Phi-family runtime errors, but the concrete bugs are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44918", + "accept": false, + "reason": "Liger kernel cross_entropy call and TRL embedding unpacking are unrelated." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch vs trainer API signature change are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Liger kernel behavior has no clear relation to the mask/index mismatch." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "FSDP checkpointing issue is unrelated to GenerationMixin import failure." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Both are ZeRO-3/index errors, but they involve different models and are not the same concrete bug." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Different docs regressions: return_all_scores wording vs removed run command." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Gemma3 sliding_window_pattern and Qwen3 auto device-map cuda assert are unrelated." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Checkpoint corruption is not the same as an implementation failing to load checkpoints." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger kernel cross_entropy issue and Qwen3-vl video StopIteration are unrelated." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "Unit-test loading failure and corrupted checkpoint are different bugs." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Different index/mask shape errors and different repro contexts." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Both mention sliding_window, but the model families and concrete failures differ." + }, + { + "left": "issue:43425", + "right": "issue:45070", + "accept": false, + "reason": "Torch-version incompatibility and pydantic config-field breakage are unrelated." + }, + { + "left": "issue:45362", + "right": "issue:45464", + "accept": false, + "reason": "Both involve Qwen3.5 chat, but one is a crash in transformers chat and the other is a streaming API failure." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Mistral3 weight tying and Ovis2 checkpoint corruption are unrelated." + }, + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Whisper loading failure is unrelated to the pydantic PretrainedConfig regression." + }, + { + "left": "issue:45464", + "right": "issue:45685", + "accept": false, + "reason": "Streaming inference on Qwen3.5 and an MPS Int op limitation are unrelated." + }, + { + "left": "issue:43541", + "right": "issue:45685", + "accept": false, + "reason": "Torch dynamo tracing error and MPS histogram support issue are unrelated." + }, + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Different runtime failures on different models; grouped_mm tracing vs autocast dtype mismatch." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "GenerationMixin import failure is unrelated to the Gemma3 sliding_window_pattern regression." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Processor failure for Voxtral is unrelated to the template compilation error." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Both are index errors, but they arise in different model/loading paths and are not the same bug." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet and stays conservative. The canonical choice of issue:43596 over issue:43638 is supported by the near-duplicate DeepSpeed ZeRO-3/Bert index-0 titles, and the rest of the soft-edge rejections are cautious given the clearly different models/backends/code paths in the packet. I did not see any overstated duplicate claims." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 2, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:39186", + "issue:39290", + "issue:39692", + "issue:40990", + "issue:41093", + "issue:41553", + "issue:42915", + "issue:43054", + "issue:43296", + "issue:43366", + "issue:43404", + "issue:43531", + "issue:43541", + "issue:43572", + "issue:43596", + "issue:43644", + "issue:43645", + "issue:43716", + "issue:43742", + "issue:43828", + "issue:43866", + "issue:43873", + "issue:43883", + "issue:43950", + "issue:43975", + "issue:43994", + "issue:44291", + "issue:44292", + "issue:44360", + "issue:44387", + "issue:44403", + "issue:44492", + "issue:44512", + "issue:44530", + "issue:44534", + "issue:44589", + "issue:44863", + "issue:44918", + "issue:44928", + "issue:44960", + "issue:45005", + "issue:45070", + "issue:45161", + "issue:45399", + "issue:45464", + "issue:45507", + "issue:45632", + "issue:45698" + ], + "soft_pairs": [ + "issue:43296|issue:43366", + "issue:43572|issue:45070", + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:43828|issue:45464", + "issue:44291|issue:45070", + "issue:43366|issue:45464", + "issue:43531|issue:45070", + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:41093|issue:43596", + "issue:43541|issue:44589", + "issue:40990|issue:44960", + "issue:45632|issue:45698", + "issue:43716|issue:44918", + "issue:39186|issue:44918", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005", + "issue:44360|issue:44512", + "issue:42915|issue:45399", + "issue:43873|issue:45005", + "issue:44492|issue:44512", + "issue:44918|issue:45507", + "issue:43975|issue:44292", + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:44292|issue:45507", + "issue:39692|issue:43054", + "issue:43828|issue:44928", + "issue:43866|issue:44403", + "issue:41553|issue:43883" + ], + "prepared_review_unit_hash": "fde77ef4f8b30eb45cf3dcdbe180c100d5b5d6768139f94378d616a0b888d7e4", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37182, + "estimated_input_tokens": 9296, + "estimated_eval_tokens": 18848 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a grab-bag of unrelated Transformers bug reports; the only clear duplicate cluster is the 5.x non-persistent buffer corruption regression.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43950 is the clearest root report for the non-persistent-buffer regression, and it matches the duplicate phrasing in issue:43644 and issue:44534.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43950 is the best representative issue because it names the concrete 5.x regression most directly and is the most likely anchor for duplicate consolidation.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "Different problems: vLLM/PaddleOCR-VL load failure vs GGUF architecture support request." + }, + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "Unrelated config regression vs pydantic-model field breakage." + }, + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "Different failure modes: int4 OOM vs tied-weights handling on translation models." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Torch-dynamo grouped_mm tracing error is not the same as GPT-OSS tensor-parallel support." + }, + { + "left": "issue:43828", + "right": "issue:45464", + "accept": false, + "reason": "Both mention dtype/runtime issues, but the models and code paths differ completely." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "init_empty_weights constructor-arg breakage is separate from the PretrainedConfig/pydantic field issue." + }, + { + "left": "issue:43366", + "right": "issue:45464", + "accept": false, + "reason": "Architecture support for GPT-OSS GGUF is unrelated to Qwen3.5 streaming inference failures." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "Qwen3-MoE sliding_window bug is not the same as the pydantic PretrainedConfig regression." + }, + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Gemma3TextConfig sliding_window_pattern error and MobileLLM key error are distinct model-loading bugs." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both concern SigLIP2 quality, but one is embedding degradation and the other is bad AutoModel/pipeline outputs." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Mask-shape mismatch and zero3 index error are different tensor/indexing failures." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Different concrete bugs: torch tracing grouped_mm vs missing Float8 storage type." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "High perplexity on gpt-oss-20b and GLM5 are unrelated model-quality reports." + }, + { + "left": "issue:45632", + "right": "issue:45698", + "accept": false, + "reason": "Both involve custom-module loading, but the titles point to different root causes; not enough to treat as the same bug." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Image-preprocessor dtype mismatch is separate from Qwen3.5 embedding unpacking with TRL." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP 2-D weight assertion and TRL embedding unpacking are unrelated." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "LM head tying bug in Mistral3 is not the same as translation-model tied-weights issues in v5." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Notebook custom-model init regression is unrelated to flash-attn fallback blocking." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache/Qwen3.5. models issue is unrelated to tied-weights handling." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint loading failure is a different model-specific bug." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "DSA indexer behavior and documentation cleanup are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8 failure is separate from flash-attn fallback selection." + }, + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Quantization/offloading behavior is not the same as tied-weights regressions." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "A cache-strategy typo and removed CLI docs are unrelated documentation issues." + }, + { + "left": "issue:44918", + "right": "issue:45507", + "accept": false, + "reason": "TRl SFT embedding unpacking and GraniteMoEHybrid invalid method are different model/trainer bugs." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "Detokenization regression and NVFP4 runtime error are unrelated." + }, + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Same regression: 5.x non-persistent buffers being filled with junk/corrupted." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Same underlying bug: non-persistent buffers are corrupted during from_pretrained in 5.x." + }, + { + "left": "issue:44292", + "right": "issue:45507", + "accept": false, + "reason": "NVFP4 model runtime error and GraniteMoEHybrid invalid method are not the same code path." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "SigLIP2 docs/example breakage is unrelated to SigLIP2 embedding quality complaints." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Different symptoms and causes: autocast dtype mismatch vs RLHF NaN explosion from 3D position_ids." + }, + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "Checkpoint corruption and loading noise are not the same underlying bug." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Bad AutoTokenizer error messaging and missing all_tied_weights_keys are unrelated issues." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet: the 43950/43644/44534 non-persistent-buffer regression is the only clearly supported duplicate cluster, and the other soft-edge pairs are rejected conservatively based on distinct titles and failure modes. No overstatement stands out." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 3, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36331", + "issue:38175", + "issue:41553", + "issue:41762", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43054", + "issue:43257", + "issue:43404", + "issue:43493", + "issue:43643", + "issue:43742", + "issue:43782", + "issue:43792", + "issue:43824", + "issue:43827", + "issue:43854", + "issue:43856", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:43940", + "issue:43950", + "issue:43975", + "issue:44220", + "issue:44291", + "issue:44368", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44509", + "issue:44589", + "issue:44661", + "issue:44863", + "issue:44960", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45237", + "issue:45313", + "issue:45356", + "issue:45399", + "issue:45520", + "issue:45701" + ], + "soft_pairs": [ + "issue:36331|issue:41762", + "issue:43872|issue:44589", + "issue:43054|issue:45701", + "issue:38175|issue:43054", + "issue:41553|issue:43881", + "issue:42947|issue:44387", + "issue:43054|issue:43493", + "issue:44403|issue:44451", + "issue:43881|issue:43883", + "issue:43975|issue:45520", + "issue:43643|issue:45020", + "issue:44403|issue:44488", + "issue:43975|issue:45237", + "issue:42947|issue:43856", + "issue:42491|issue:44368", + "issue:41762|issue:42915", + "issue:43782|issue:44960", + "issue:43975|issue:44451", + "issue:43824|issue:43872", + "issue:44661|issue:45020", + "issue:44991|issue:45399", + "issue:45020|issue:45356", + "issue:43827|issue:44509", + "issue:45081|issue:45399", + "issue:43792|issue:44220", + "issue:43404|issue:43950", + "issue:43257|issue:43866", + "issue:43742|issue:45399", + "issue:42915|issue:45020", + "issue:41553|issue:44991", + "issue:43742|issue:45020", + "issue:43940|issue:45313", + "issue:43872|issue:44291", + "issue:43854|issue:44863", + "issue:42915|issue:43872", + "issue:41553|issue:44488" + ], + "prepared_review_unit_hash": "11f28d0b049e4573edceb8668e99113b61d89c2d7312deff7110ef4b58f7d619", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 36, + "serialized_chars": 37864, + "estimated_input_tokens": 9466, + "estimated_eval_tokens": 19188 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly false-positive thematic matches: the cluster spans many unrelated model-loading, quantization, tokenizer, and docs issues. Only the v5 docs cleanup pair looks like a true duplicate; the rest are too model- or code-path-specific to merge safely.", + "confidence": 0.79, + "canonical_issue_reason": "issue:45020 is the broadest umbrella report in the set ('recent transformers versions break models using remote_code') and is the best representative of the shared regression theme, though the cluster is still quite heterogeneous.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the most suitable anchor issue because it is open, broad, and centrally covers the main compatibility-regression pattern seen across several items.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "Different failures: CustomTrainer loss signature mismatch vs Gemma3 ZeRO-3 loading IndexError." + }, + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Both are low-level dtype/storage errors, but they point to different incompatibilities (_is_hf_initialized vs missing Float8 storage) and different code paths." + }, + { + "left": "issue:43054", + "right": "issue:45701", + "accept": false, + "reason": "SigLIP2 embedding quality complaint vs tokenizer/version-induced tokenization change; not the same concrete bug." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": false, + "reason": "Same model family, but one is zero probabilities and the other is embedding quality regression; insufficient evidence of one root cause." + }, + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Different models and failures: Voxtral tokenizer error vs glm-4v-9b load failure." + }, + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both mention memory/perf, but one is gradient checkpointing with LoRA and the other is int4 quantization CUDA reserved memory; different bugs." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": false, + "reason": "Both involve SigLIP2, but one is general embedding degradation and the other is HF-vs-JAX implementation discrepancy; not clearly the same fix." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "Loading noise/logging issue vs a hard model loading failure for ScandiBERT." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different models and code paths: glm-4v-9b loading failure vs Molmo tied-weights attribute error." + }, + { + "left": "issue:43975", + "right": "issue:45520", + "accept": false, + "reason": "DeepSeek detokenization regression vs flash_attn import key error on Python 3.13; unrelated." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "Both involve remote_code/config loading, but one is missing fields in AutoConfig and the other is a broad recent-version breakage report; not the same concrete bug." + }, + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "Both are loading-related, but one is excessive noise/logging and the other is a specific tokenizer/model load failure." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "Tokenizer detokenization bug vs AMD GPU model execution issue; different subsystems." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Gradient checkpointing ineffective with LoRA is unrelated to Qwen3 MoE training memory inefficiency." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "Both mention Qwen3/Qwen3.5 LoRA, but one is a cross-version incompatibility and the other is a warning about tie_word_embeddings; different problems." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Gemma3 ZeRO-3 load failure and Qwen3 MoE FP8 load failure are different model/config regressions." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different models and failures: Qwen3VL weight_only error vs GLM5 issue." + }, + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "DeepSeek tokenizer regression vs inability to load ScandiBERT; not the same bug." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "Import error for Qwen2_5_VL class vs bitsandbytes Int8Params constructor mismatch; unrelated." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "Model-template registration bug in add-new-model-like vs remote_code loading regressions; different code paths." + }, + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer load failure for EMBEDDIA/est-roberta vs flash-attn fallback gating; unrelated." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Broad remote_code regression vs Kimi-K2.5 tokenizer codec regression; different concrete issues." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Same docs cleanup after v5 pipeline removals: both report stale documentation still referencing removed pipeline tasks." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer regex patch crash vs flash-attn fallback blocked by checks; different bugs." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "Whisper model run failure vs generic fbank feature-extraction issue; possible relation, but not enough evidence of the same root bug." + }, + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "Tied lm_head weights in Mistral3 vs silent corruption of non-persistent buffers; unrelated loader regressions." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "Qwen3 MoE weight conversion issue vs Ovis2 checkpoint corruption; different models and failures." + }, + { + "left": "issue:43742", + "right": "issue:45399", + "accept": false, + "reason": "MobileLLM key error while loading vs flash-attn fallback gating; unrelated." + }, + { + "left": "issue:42915", + "right": "issue:45020", + "accept": false, + "reason": "Qwen3 MoE ZeRO-3/FP8 load failure is a specific model/config issue, not the broader remote_code regression." + }, + { + "left": "issue:41553", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer loading problems, but different models and failure modes." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "Model-specific loading key error vs general remote_code regression; different scopes and causes." + }, + { + "left": "issue:43940", + "right": "issue:45313", + "accept": false, + "reason": "Qwen3-Next ZeRO-3 missing weights vs Qwen3.5 ZeRO-3 language_model load failure; similar theme but not enough evidence of one bug." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": false, + "reason": "bitsandbytes Int8Params constructor mismatch vs init_empty_weights/_is_hf_initialized error; both adapter/loading related but different incompatibilities." + }, + { + "left": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "GLM-4.7-Flash unit-test load problem vs NemotronH checkpoint loading failure; different models and issues." + }, + { + "left": "issue:42915", + "right": "issue:43872", + "accept": false, + "reason": "Qwen3 MoE loading issue vs bitsandbytes constructor incompatibility; unrelated." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Bad AutoTokenizer error for Voxtral vs failure to load cjvt/sleng-bert; different models and bugs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded and conservative overall. The summary matches the packet\u2019s broad mix of unrelated loading/tokenizer/quantization/docs issues, and the only accepted soft-edge pair (issue:43827 with issue:44509) is well-supported by the titles as docs cleanup around removed pipeline tasks. No obvious overstatement." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 4, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:41093", + "issue:42491", + "issue:42617", + "issue:42915", + "issue:43257", + "issue:43317", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43596", + "issue:43638", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43720", + "issue:43824", + "issue:43828", + "issue:43872", + "issue:43950", + "issue:43957", + "issue:43994", + "issue:44155", + "issue:44479", + "issue:44530", + "issue:44534", + "issue:44560", + "issue:44589", + "issue:44661", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44910", + "issue:44918", + "issue:44928", + "issue:45030", + "issue:45305", + "issue:45325", + "issue:45538", + "issue:45584", + "issue:45588", + "issue:45685" + ], + "soft_pairs": [ + "issue:44928|issue:45588", + "issue:43257|issue:45305", + "issue:36331|issue:41093", + "issue:41093|issue:44805", + "issue:43872|issue:45538", + "issue:44479|issue:44560", + "issue:44530|issue:44863", + "issue:24643|issue:30064", + "issue:43366|issue:43828", + "issue:44155|issue:45584", + "issue:43366|issue:43531", + "issue:43550|issue:44910", + "issue:44560|issue:45685", + "issue:43645|issue:43646", + "issue:43824|issue:43957", + "issue:43950|issue:45325", + "issue:42915|issue:43957", + "issue:43366|issue:44589", + "issue:44560|issue:44918", + "issue:42617|issue:43366", + "issue:38175|issue:43994", + "issue:43646|issue:43950", + "issue:36010|issue:42915", + "issue:44560|issue:44805", + "issue:44877|issue:45030", + "issue:43638|issue:44661", + "issue:43596|issue:44661", + "issue:43317|issue:44756", + "issue:43644|issue:44534", + "issue:38175|issue:39692", + "issue:42491|issue:43720" + ], + "prepared_review_unit_hash": "95afaa823216ad8348eb215623944f26ff0380adc8dbd3d4b5c75d500639e9e5", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37222, + "estimated_input_tokens": 9306, + "estimated_eval_tokens": 18868 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a loose similarity set of unrelated transformer bugs; none of the soft pairs look like the same underlying issue strongly enough to merge. The clearest duplicate-looking subgroup is the Transformers v5 non-persistent-buffer regression, centered around issue 44534.", + "confidence": 0.81, + "canonical_issue_reason": "Issue 44534 is the most canonical representative of the shared regression theme: it is concise, broadly phrased, and matches the closely related non-persistent-buffer loading reports in the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44534 is the best issue anchor because it captures the broadest version of the recurring Transformers v5 buffer-loading regression without overfitting to one model or workaround.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44928", + "right": "issue:45588", + "accept": false, + "reason": "Different failures: RLHF gradient explosion from 3D position_ids/SDPA fallback vs a flash-attention AttributeError on s_aux=None." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Both involve DeepSpeed, but one is about MOE weight conversion during loading and the other about gradient averaging with model_accepts_loss_kwargs." + }, + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "Unrelated bugs: a trainer API signature change versus a mask/tensor shape mismatch." + }, + { + "left": "issue:41093", + "right": "issue:44805", + "accept": false, + "reason": "Same error text pattern, but the reports do not establish the same code path or root cause." + }, + { + "left": "issue:43872", + "right": "issue:45538", + "accept": false, + "reason": "Completely different areas: bitsandbytes init incompatibility vs CLIPTokenizer model_max_length." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": false, + "reason": "Related video/model families, but the concrete failures differ and do not clearly share one fixable code path." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "Different model-loading bugs: PagedAttentionCache linear_attention crash vs NemotronH checkpoint loading failure." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "No common underlying bug: DeepSpeed training weight shape error vs image processor void-segmentation-map handling." + }, + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "Unrelated: GGUF architecture support request vs a dtype mismatch in Phi-tiny-MoE under autocast." + }, + { + "left": "issue:44155", + "right": "issue:45584", + "accept": false, + "reason": "Both mention incorrect inference behavior, but one is an AudioFlamingo3 batching leak and the other is Whisper empty-transcription generation." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "No match: GGUF gpt-oss support request vs a Qwen3-MoE sliding_window bug." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different subsystems and symptoms: torch.compile+SDPA crash in Bamba vs Qwen3.5 flash-attention illegal memory access from position_ids handling." + }, + { + "left": "issue:44560", + "right": "issue:45685", + "accept": false, + "reason": "Both are model-specific regressions, but one is a video StopIteration issue and the other is an MPS histogram Int implementation error in MOE." + }, + { + "left": "issue:43645", + "right": "issue:43646", + "accept": false, + "reason": "Closely related 5.x custom-model initialization regressions, but not the same concrete failure mode." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Different load/import failures: missing Qwen2_5_VLForConditionalGeneration export vs meta-device loading breakage." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Shared Transformers v5 context, but one is non-persistent-buffer corruption and the other is a Qwen2.5-VL rope-index scaling bug." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Different issues: Qwen3Moe FP8 failure vs meta-device loading regression." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "Unrelated: GGUF support request vs missing Float8 storage type when loading." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "Both are video/embedding related, but the failures are different and not clearly the same bug." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "No overlap beyond broad model-loading/runtime trouble; not the same bug." + }, + { + "left": "issue:38175", + "right": "issue:43994", + "accept": false, + "reason": "Same model family, but one report is zero probabilities and the other nonsensical AutoModel/pipeline output." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": false, + "reason": "Related regression family, but one is custom model initialization while the other is non-persistent buffer corruption." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "Different import/runtime problems: GenerationMixin import breakage vs Qwen3Moe FP8 loading failure." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Different video/embedding issue vs generic mask-shape IndexError; no shared concrete code path." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": false, + "reason": "Different config validation problems for different model families." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different failures: zero-sized Bert model indexing under ZeRO-3 vs tokenizer mapping name validation." + }, + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "No shared underlying bug: ZeRO-3 index error vs tokenizer mapping validation." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "Both are loading/memory related, but one is dequantized model offload and the other is mmap OOM on Strix Halo." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": false, + "reason": "These look related, but the titles are not identical enough to assert the same bug without the underlying traceback; keep separate." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "Same model family, but one is runtime zero-probability behavior and the other is documentation/example errors." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "Different compatibility issues: qwen3_moe LoRA loading across versions vs BitNet packed-weight loading with accelerate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 5, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:39692", + "issue:41720", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43257", + "issue:43335", + "issue:43404", + "issue:43425", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43708", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44075", + "issue:44155", + "issue:44164", + "issue:44291", + "issue:44387", + "issue:44402", + "issue:44451", + "issue:44488", + "issue:44661", + "issue:44683", + "issue:44756", + "issue:44910", + "issue:44960", + "issue:44977", + "issue:44991", + "issue:45216", + "issue:45276", + "issue:45292", + "issue:45313", + "issue:45335", + "issue:45357", + "issue:45362", + "issue:45406", + "issue:45588", + "issue:45663", + "issue:45704" + ], + "soft_pairs": [ + "issue:45313|issue:45357", + "issue:44451|issue:44488", + "issue:44960|issue:44977", + "issue:43854|issue:43931", + "issue:43708|issue:44075", + "issue:42491|issue:45276", + "issue:41720|issue:42915", + "issue:43856|issue:43873", + "issue:43257|issue:43854", + "issue:42915|issue:44756", + "issue:39692|issue:43994", + "issue:44488|issue:44991", + "issue:42491|issue:43931", + "issue:44661|issue:44683", + "issue:43644|issue:43645", + "issue:43856|issue:44387", + "issue:45362|issue:45406", + "issue:44910|issue:45588", + "issue:42491|issue:45292", + "issue:43404|issue:43454", + "issue:45276|issue:45335", + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:44402|issue:44488", + "issue:41720|issue:44155", + "issue:39692|issue:44960", + "issue:43335|issue:45663", + "issue:43425|issue:43957", + "issue:43873|issue:44387", + "issue:42947|issue:45704", + "issue:44387|issue:44756", + "issue:43854|issue:45216", + "issue:44291|issue:44756" + ], + "prepared_review_unit_hash": "748fcd26268cc9a7649eca2f84597e0b5a4c9905ef075ecae50418683ed847bb", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 33, + "serialized_chars": 37009, + "estimated_input_tokens": 9253, + "estimated_eval_tokens": 18762 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Loose mixed cluster of Transformers issues, mostly about recent model-loading / generation regressions and a few version-specific architecture bugs. Only two duplicate-like pairs look strong enough to merge; the rest are similar only at a broad subsystem level.", + "confidence": 0.59, + "canonical_issue_reason": "issue:43404 is the clearest concrete bug report in the set: a missing lm_head weight-tying regression in Mistral3ForConditionalGeneration, with issue:43454 looking like the same underlying failure on AyaVision.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43404 is the best standalone issue here: it has a specific code-path, clear user-visible failure, and a clean fix surface that can plausibly cover the related AyaVision report too.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Same model family, but one is ZeRO-3 load failure and the other is save_pretrained visual-encoder key regression; different code paths." + }, + { + "left": "issue:44451", + "right": "issue:44488", + "accept": false, + "reason": "Both are loading failures on related BERT-like models, but the titles don\u2019t \u0581\u0578\u0582\u0575\u0581 a shared concrete bug or fix path." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Unrelated models and failure modes: GLM5 vs Qwen3.5 flash-attention generation bug." + }, + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Different models and different load failures; no evidence of the same underlying issue." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Trainer checkpoint step accounting and SGD argument handling are separate bugs." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Both are regressions, but one is Qwen3 MoE version compatibility and the other is Gemma4 embedding resize propagation." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Both involve Qwen3, but one is a device-map CUDA assert and the other is a FineGrainedFP8Config failure." + }, + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "Both are memory-related, but one is Qwen3 MoE training inefficiency and the other is offloading with quantization." + }, + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to GLM-4.7-Flash loading." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Tokenizer mapping registration bug vs compiled flex_attention / torch>=2.9 failure; different subsystems." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "Both are Transformers 5.0 regressions, but they affect different initialization/save workflows." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "Related broadly to memory, but not the same bug: Qwen3 MoE training footprint vs int4 quantization reserved-memory/OOM regression." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different products and failures: Qwen3.5 chat crash vs Gemma4 processor missing _tokenizer." + }, + { + "left": "issue:44910", + "right": "issue:45588", + "accept": false, + "reason": "Both touch flash attention, but one is packed-sequence misinterpretation on Qwen3.5 and the other is an s_aux=None AttributeError on sink-less models." + }, + { + "left": "issue:42491", + "right": "issue:45292", + "accept": false, + "reason": "Qwen3 MoE hf4.x/hf5.x compatibility is not the same as a generic resize_token_embeddings propagation bug." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": true, + "reason": "Same underlying bug class: lm_head weights not tied, causing bad generation; 43454 looks like the same issue on a different model." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": true, + "reason": "Both describe the same resize_token_embeddings propagation bug, with model-specific symptoms around additional embedding/output heads." + }, + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "Importing GenerationMixin and Whisper pipeline return_language are unrelated regressions." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "save/from_pretrained extra_state handling and init_empty_weights argument mismatch are distinct loader/saver bugs." + }, + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Both are load problems, but one is a tokenizer vocab mismatch and the other is an unspecified model-loading failure." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "CUDA assert in device mapping is unrelated to AudioFlamingo3 batched inference token/embedding leakage." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "SigLIP2 documentation/example issues are unrelated to GLM5." + }, + { + "left": "issue:43335", + "right": "issue:45663", + "accept": false, + "reason": "Sparse-layer config creation and FSDP2 shared_kv_states rebuild are different model bugs." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Torch 2.10 compatibility and meta-device loading regressions are separate incompatibilities." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Offloading/quantization behavior and int4 reserved-memory growth are related only at a high level, not the same concrete bug." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "Different OOM causes: quantization memory regression vs mmap on Strix Halo." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "Different models and failures: GLM-4.7-Flash test loading vs Qwen3.5 save_pretrained regression." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "init_empty_weights argument mismatch and Strix Halo mmap OOM are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is grounded in the packet and stays conservative. The two accepted pairs are well-supported by the titles alone (43404/43454 on untied lm_head weights; 45276/45335 on resize_token_embeddings propagation). The remaining rejections are generally cautious and do not overclaim shared root causes beyond broad subsystem similarity." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 6, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:39404", + "issue:42947", + "issue:43257", + "issue:43299", + "issue:43381", + "issue:43425", + "issue:43611", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43716", + "issue:43828", + "issue:43901", + "issue:43906", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44079", + "issue:44164", + "issue:44265", + "issue:44292", + "issue:44402", + "issue:44451", + "issue:44479", + "issue:44509", + "issue:44530", + "issue:44589", + "issue:44671", + "issue:44743", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44898", + "issue:44912", + "issue:44918", + "issue:44928", + "issue:44960", + "issue:44991", + "issue:45084", + "issue:45276", + "issue:45292", + "issue:45335", + "issue:45588", + "issue:45663", + "issue:45684", + "issue:45698", + "issue:45701" + ], + "soft_pairs": [ + "issue:44292|issue:44912", + "issue:44743|issue:44960", + "issue:45276|issue:45292", + "issue:45684|issue:45698", + "issue:42947|issue:43381", + "issue:43957|issue:44756", + "issue:43381|issue:44928", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:45588|issue:45663", + "issue:44479|issue:44877", + "issue:45292|issue:45335", + "issue:44402|issue:44451", + "issue:43645|issue:44164", + "issue:43257|issue:44863", + "issue:43828|issue:43957", + "issue:43425|issue:43828", + "issue:44589|issue:44756", + "issue:39404|issue:45701", + "issue:44479|issue:45663", + "issue:43906|issue:44530", + "issue:43931|issue:44863", + "issue:43611|issue:43645", + "issue:43425|issue:43716", + "issue:44530|issue:45084", + "issue:39404|issue:44912", + "issue:43901|issue:44509", + "issue:43299|issue:43931", + "issue:44863|issue:45663", + "issue:44479|issue:44805", + "issue:44265|issue:44898", + "issue:43257|issue:45663", + "issue:43994|issue:44079", + "issue:44671|issue:44991", + "issue:39404|issue:44671", + "issue:43425|issue:44918" + ], + "prepared_review_unit_hash": "260c9ce7432bbc0d3e3806ec8b515f1ad87e22aa28baf7a7bc7783fafe3b2f30", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38409, + "estimated_input_tokens": 9603, + "estimated_eval_tokens": 19462 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous, with one clear duplicate subcluster around `resize_token_embeddings` not updating output embeddings. Most other soft pairs are related only at the subsystem level and should stay separate.", + "confidence": 0.94, + "canonical_issue_reason": "`issue:45292` is the most general phrasing of the `resize_token_embeddings` bug and cleanly covers the Gemma4- and t5gemma-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "`issue:45292` is the best representative of the only strong duplicate set in the cluster because it states the shared failure mode without model-specific narrowing.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Both are quantization/loading failures, but they involve different models and different failure modes." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "Same broad MoE/cache area, but no evidence they describe the same recurrent-state bug." + }, + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "Same underlying bug: `resize_token_embeddings` does not propagate to the model's output embeddings / tied embedding structures." + }, + { + "left": "issue:45684", + "right": "issue:45698", + "accept": false, + "reason": "Both involve custom-model save/load, but one is a permissions problem and the other is wrong module resolution." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Both mention gradient checkpointing, but one is LoRA ineffectiveness and the other is an eval-mode restriction." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Unrelated failures: meta-device loading versus mmap/OOM behavior." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Different problems: eval-mode checkpointing versus RLHF NaN explosion from 3D position ids." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "`extra_state` serialization is unrelated to Strix Halo mmap/OOM." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom model initialization in notebooks is unrelated to `resume_from_checkpoint` step calculation." + }, + { + "left": "issue:45588", + "right": "issue:45663", + "accept": false, + "reason": "Different code paths: flash-attention `s_aux=None` crash versus Gemma4 FSDP2 shared-state rebuild." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Video-input regression and strict-config loading are separate bugs." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Same duplicate family: `resize_token_embeddings` failing to update the output/decoder embeddings." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Tokenizer vocab mismatch and model-loading failure are not the same concrete bug." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "Jupyter custom-model initialization and `extra_state` save/load handling are distinct issues." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Different model-loading bugs affecting different architectures and failure points." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Autocast dtype mismatch is not the same as meta-device loading breakage." + }, + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "Torch version incompatibility is too broad; the other is a specific dtype mismatch under autocast." + }, + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to mmap/OOM on Strix Halo." + }, + { + "left": "issue:39404", + "right": "issue:45701", + "accept": false, + "reason": "Whisper pipeline language return and tokenizer-version drift are unrelated." + }, + { + "left": "issue:44479", + "right": "issue:45663", + "accept": false, + "reason": "Video-input regression and Gemma4 FSDP2 KeyError are different bugs." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "A reproduction of another issue is not enough to treat this as the same bug as the Qwen3.5 cache crash." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Weight-shape mismatch for Qwen3-VL and NemotronH checkpoint loading are different failures." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both are Transformer 5 regressions around custom models, but the reported breakages are different." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "Torch incompatibility and image-preprocessor dtype mismatch are unrelated." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "Qwen3.5 linear-attention cache crash is unrelated to template-node compilation failure." + }, + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline behavior and MXFP4 quantization fallback are unrelated." + }, + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Both are docs-related, but they refer to different deprecated/changed pipeline APIs." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Different Qwen3-VL loading regressions with different symptoms and likely different causes." + }, + { + "left": "issue:44863", + "right": "issue:45663", + "accept": false, + "reason": "NemotronH checkpoint loading and Gemma4 FSDP2 attention state handling are unrelated." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "Video regression and mask/tensor shape mismatch are not the same bug." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "`torch.export` failing on `torch_compilable_check` is unrelated to Perceiver interpolation failure." + }, + { + "left": "issue:43257", + "right": "issue:45663", + "accept": false, + "reason": "Accelerate/deepspeed conversion failure and Gemma4 FSDP2 `KeyError` are different problems." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "Incorrect predictions in a model/pipeline and `ModelOutput` key assignment are different failure modes." + }, + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "CamemBERT masked-LM prediction drift and tokenizer loading failure are not the same issue." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper pipeline language return and CamemBERT masked-LM predictions are unrelated." + }, + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Torch version compatibility and TRL embedding unpacking are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is grounded in the packet: `issue:45292` is a reasonable canonical representative for the resize_token_embeddings duplicate family, and the two accepted soft edges (`45276`/`45292`, `45292`/`45335`) are conservative and well supported by the titles. The remaining soft-edge rejections stay appropriately cautious rather than overstating similarity." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 7, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:32090", + "issue:34567", + "issue:36032", + "issue:39404", + "issue:40444", + "issue:40990", + "issue:42915", + "issue:42947", + "issue:43317", + "issue:43404", + "issue:43421", + "issue:43425", + "issue:43452", + "issue:43482", + "issue:43541", + "issue:43582", + "issue:43596", + "issue:43611", + "issue:43632", + "issue:43643", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43716", + "issue:43856", + "issue:43883", + "issue:43957", + "issue:44164", + "issue:44206", + "issue:44220", + "issue:44451", + "issue:44466", + "issue:44479", + "issue:44488", + "issue:44530", + "issue:44589", + "issue:44683", + "issue:44756", + "issue:44811", + "issue:44849", + "issue:44987", + "issue:44991", + "issue:45081", + "issue:45084", + "issue:45325", + "issue:45584", + "issue:45685" + ], + "soft_pairs": [ + "issue:43856|issue:44756", + "issue:43596|issue:44683", + "issue:44589|issue:45084", + "issue:40990|issue:42947", + "issue:43425|issue:45685", + "issue:36032|issue:43452", + "issue:32090|issue:39404", + "issue:44991|issue:45081", + "issue:43957|issue:44164", + "issue:44164|issue:44479", + "issue:43582|issue:43632", + "issue:44488|issue:45081", + "issue:40990|issue:43421", + "issue:43883|issue:44488", + "issue:43611|issue:43644", + "issue:44488|issue:44987", + "issue:44206|issue:44220", + "issue:44811|issue:45584", + "issue:43716|issue:45084", + "issue:34567|issue:40444", + "issue:43643|issue:44530", + "issue:43883|issue:44991", + "issue:43644|issue:43646", + "issue:44451|issue:44987", + "issue:43452|issue:43482", + "issue:36032|issue:42915", + "issue:43317|issue:43482", + "issue:43541|issue:45084", + "issue:42915|issue:44530", + "issue:44849|issue:45325", + "issue:43404|issue:44466", + "issue:36032|issue:43645", + "issue:44987|issue:44991" + ], + "prepared_review_unit_hash": "3096386bb432f83b1874cdf837b0c91b74b160955fc153c82991144295a23679", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 33, + "serialized_chars": 36982, + "estimated_input_tokens": 9246, + "estimated_eval_tokens": 18748 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated bug reports, with a small number of Transformers v5 loading regressions. Only one soft pair looks like the same underlying bug: the gguf_file/from_pretrained loading failure.", + "confidence": 0.81, + "canonical_issue_reason": "Issue 43452 is the broadest and clearest description of the gguf_file loading regression, and it naturally covers the model-specific follow-up in 43482.", + "canonical_pr_reason": null, + "best_issue_reason": "43452 is the best representative issue for this cluster because it states the core loading failure in a generic way and can subsume narrower duplicates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Different problems: Qwen3 MoE training memory use vs Strix Halo mmap OOM avoidance." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "BertModel/zero3 init IndexError is unrelated to torch>=2.9 flex_attention compilation." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "Float8 storage lookup error and template-node compile error are different failure modes." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "Perplexity regression on gpt-oss is unrelated to gradient checkpointing/LoRA behavior." + }, + { + "left": "issue:43425", + "right": "issue:45685", + "accept": false, + "reason": "Torch version incompatibility and an MPS histogram kernel missing for Int are not the same bug." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "Tokenizer special-token method conflict vs gguf_file/from_pretrained loading breakage." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper return_language pipeline failure." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "Generic tokenizer load failure and Mistral regex patch crash are different code paths." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "torch.device(meta) loading problems are not the same as extra_state save/from_pretrained handling." + }, + { + "left": "issue:44164", + "right": "issue:44479", + "accept": false, + "reason": "extra_state serialization and Qwen video-input regression are unrelated." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "Apple Silicon allocator warmup TypeError is unrelated to the _is_hf_initialized flag regression." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Both mention loading, but one is a model-load failure and the other is a specific Mistral regex patch crash." + }, + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "Perplexity on gpt-oss is unrelated to runtime special-token post-processor updates." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Missing all_tied_weights_keys on Molmo is unrelated to loading cjvt/sleng-bert." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Both are v5 regressions, but base_model_prefix loading and non-persistent-buffer initialization are different bugs." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Different model-loading failures on different models; no shared concrete bug." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": false, + "reason": "Unsupported center arg in feature extractor and _torch_extract_fbank_features() issue are distinct audio regressions." + }, + { + "left": "issue:44811", + "right": "issue:45584", + "accept": false, + "reason": "Both are Whisper-related, but batch_decode skip_special_tokens and empty-transcription generation are separate issues." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Mistral-3 dtype mismatch is unrelated to the template-node compilation error." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "TrainerState token counting and Qwen2.5-VL multi-image IterableDataset failure are unrelated." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "trust_remote_code field loss is unrelated to PagedAttentionCache's linear_attention group error." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Molmo tied-weights attribute error and est-roberta tokenizer loading failure do not share a concrete root cause." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Non-persistent buffer junk and custom model initialization are related only at a high level, not the same bug." + }, + { + "left": "issue:44451", + "right": "issue:44987", + "accept": false, + "reason": "Two different model-loading failures with different affected models and symptoms." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": true, + "reason": "Both point to the same gguf_file/from_pretrained loading regression; 43482 is the model-specific manifestation of 43452." + }, + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "Tokenizer special-token conflict is unrelated to FineGrainedFP8Config failure in Qwen3Moe." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "device_map=auto dequantized-model offload failure is unrelated to gguf loading breakage." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Torch Dynamo grouped_mm tracing error is unrelated to the template-node compile issue." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "FineGrainedFP8Config training failure and Qwen3.5 PagedAttentionCache crash are different bugs." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Qwen3.5 hidden-state output bug and Qwen2.5-VL rope-index scaling bug are separate model-specific regressions." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "Missing tied weights on Mistral3ForConditionalGeneration is not the same as device-dependent serialization inconsistency." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer method-name conflict and custom model initialization in notebooks are unrelated." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Both are load failures, but for different models and likely different root causes." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded overall: the cluster is mostly heterogeneous bug reports, and the only clearly same-bug soft pair is 43452/43482 around the gguf_file/from_pretrained loading regression. The other soft-edge rejections are conservative and do not overclaim shared root cause." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 8, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36032", + "issue:40444", + "issue:40990", + "issue:41553", + "issue:42175", + "issue:42915", + "issue:43262", + "issue:43296", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43531", + "issue:43540", + "issue:43582", + "issue:43611", + "issue:43632", + "issue:43645", + "issue:43646", + "issue:43742", + "issue:43819", + "issue:43856", + "issue:43994", + "issue:44062", + "issue:44079", + "issue:44220", + "issue:44265", + "issue:44291", + "issue:44403", + "issue:44451", + "issue:44479", + "issue:44534", + "issue:44589", + "issue:44610", + "issue:44683", + "issue:44756", + "issue:44811", + "issue:44877", + "issue:44987", + "issue:45081", + "issue:45084", + "issue:45127", + "issue:45216", + "issue:45325", + "issue:45357", + "issue:45507", + "issue:45542", + "issue:45663" + ], + "soft_pairs": [ + "issue:43262|issue:43377", + "issue:45084|issue:45507", + "issue:43646|issue:44534", + "issue:43994|issue:44610", + "issue:43645|issue:44534", + "issue:43322|issue:43540", + "issue:44403|issue:45081", + "issue:45216|issue:45357", + "issue:43377|issue:43819", + "issue:44451|issue:45081", + "issue:44220|issue:44683", + "issue:42915|issue:45084", + "issue:41553|issue:43582", + "issue:44265|issue:44610", + "issue:43404|issue:45127", + "issue:44987|issue:45081", + "issue:43632|issue:44756", + "issue:44479|issue:45325", + "issue:44062|issue:44987", + "issue:42915|issue:44291", + "issue:43296|issue:43531", + "issue:36032|issue:43742", + "issue:40444|issue:45325", + "issue:43611|issue:44534", + "issue:43531|issue:44877", + "issue:42915|issue:44589", + "issue:43317|issue:43856", + "issue:44079|issue:45663", + "issue:40990|issue:44811", + "issue:42175|issue:45542" + ], + "prepared_review_unit_hash": "dc22133b70acc0017f1619be1140e00d479f719889c090ef1fe4264244108e23", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36662, + "estimated_input_tokens": 9166, + "estimated_eval_tokens": 18588 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly heterogeneous issue set. The only duplicate-style pair I\u2019d accept is the Qwen3.5 save_pretrained regression (45216/45357). The Qwen VL/video and Qwen2.5-VL position-id pairs are related by theme but are still distinct bugs, so they should stay separate.", + "confidence": 0.64, + "canonical_issue_reason": "Issue:45216 is the best anchor because it has the only clear duplicate-style companion in the packet (45357) and its wording is the broader checkpoint-regression report; the rest of the set is too mixed to justify a stronger canonical choice.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue:45216 is the most suitable representative issue overall: it matches the single accepted issue-duplicate pair and describes the broadest concrete failure in that mini-cluster. The remaining issues are mostly unrelated regressions across different models and subsystems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Different bugs: audio chat-template sampling-rate handling vs MIMI batched-output mismatch from missing padding-mask support." + }, + { + "left": "issue:45084", + "right": "issue:45507", + "accept": false, + "reason": "Different code paths and failures; same broad model/workflow theme is not enough to merge." + }, + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Custom model initialization regression vs non-persistent buffer corruption are distinct bugs." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "Unrelated failures: bad inference results vs processor/model image-size mismatch." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Notebook custom-model init bug and junk-filled non-persistent buffers are not the same defect." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Different multimodal loading/video crashes with different triggers and code paths." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "Generic load noise vs tokenizer regex patch crash; no shared concrete bug." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Same Qwen3.5 save_pretrained regression; 45357 looks like the narrower report on incorrect visual encoder keys within the same failure." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "Both touch audio models, but padding-mask mismatch and DAC from_latents/forward mismatch are different bugs." + }, + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "Different models and failures; no evidence of the same underlying issue." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Audio feature extraction issue vs compiled flex_attention failure on newer torch are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:45084", + "accept": false, + "reason": "Both are regressions, but they describe different concrete failures and fixes." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "AutoTokenizer error handling and Apple Silicon allocator TypeError are unrelated." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is unrelated to the OmDet processor image-size regression." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "Both involve embeddings/weights, but they are different concrete bugs on different models and workflows." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Specific model loading failure vs Mistral tokenizer regex patch crash; not the same bug." + }, + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Transformers v5 flag regression vs mmap/OOM workaround request; no shared code-path bug." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Related Qwen-VL/position-id theme, but one is a video-input regression and the other is a still-image temporal position-id scaling bug; too different to accept as a duplicate." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "Tokenizer AddedToken keyword conflict is unrelated to loading a specific model." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "Both are v5-era loading problems, but the concrete failures differ." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "Different models, modalities, and error mechanisms." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "Tokenizer special-token conflict vs MobileLLM key error are not the same bug." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "Multi-image finetuning failure vs still-image temporal position-id scaling bug; only a broad Qwen-VL overlap." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "Different Transformers v5 regressions affecting different internals." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Different model/config failures with no shared concrete defect." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Perplexity anomaly and Whisper batch_decode skip_special_tokens handling are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:45542", + "accept": false, + "reason": "Both concern TensorFlow backend packaging, but the triggers and failure modes differ." + }, + { + "left": "issue:44589", + "right": "issue:44987", + "accept": false, + "reason": "Missing Float8 storage type error vs model-loading failure are unrelated." + }, + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "GPU/CPU offload loading failure and MoE memory inefficiency are different bugs." + }, + { + "left": "issue:44079", + "right": "issue:45663", + "accept": false, + "reason": "ModelOutput key-assignment bug vs Gemma4 FSDP2 shared_kv_states KeyError are unrelated." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The write-up is mostly careful, but one soft-edge accept is too strong for the packet: issue:44479 vs issue:45325 only share a broad Qwen-VL / position-id theme from the titles, while one is specifically a video-input regression and the other is a still-image temporal position-id scaling bug. That is enough for a tentative similarity, not a confident duplicate-style accept. Because the evaluation must stay conservative, I can\u2019t accept this clustering as written." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 9, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43296", + "issue:43377", + "issue:43421", + "issue:43425", + "issue:43454", + "issue:43482", + "issue:43525", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43644", + "issue:43646", + "issue:43749", + "issue:43792", + "issue:43818", + "issue:43828", + "issue:43881", + "issue:43994", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44403", + "issue:44466", + "issue:44488", + "issue:44534", + "issue:44568", + "issue:44589", + "issue:44683", + "issue:44829", + "issue:44898", + "issue:44991", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45325", + "issue:45357", + "issue:45381", + "issue:45479", + "issue:45491", + "issue:45538" + ], + "soft_pairs": [ + "issue:43818|issue:45357", + "issue:43425|issue:44403", + "issue:43632|issue:44534", + "issue:43421|issue:44568", + "issue:36032|issue:41720", + "issue:44683|issue:45020", + "issue:33666|issue:40444", + "issue:44829|issue:45479", + "issue:43482|issue:45538", + "issue:44403|issue:44991", + "issue:43792|issue:44589", + "issue:44079|issue:44534", + "issue:43482|issue:43525", + "issue:35707|issue:40444", + "issue:43377|issue:45491", + "issue:44291|issue:44589", + "issue:43749|issue:44079", + "issue:43644|issue:44079", + "issue:40444|issue:45381", + "issue:45325|issue:45381", + "issue:44265|issue:45072", + "issue:41720|issue:45081", + "issue:43632|issue:45072", + "issue:43531|issue:44291", + "issue:43881|issue:44488", + "issue:42175|issue:43296", + "issue:43611|issue:43646", + "issue:43994|issue:44898", + "issue:43454|issue:44466", + "issue:42915|issue:43482", + "issue:43828|issue:44683", + "issue:43525|issue:44589" + ], + "prepared_review_unit_hash": "d0928b90e396923581288ed0ef5aa1973c06b17d38113f7f9e82ee4b6c111383", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 32, + "serialized_chars": 36857, + "estimated_input_tokens": 9215, + "estimated_eval_tokens": 18686 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This is a heterogeneous cluster of mostly unrelated bug reports, with a loose theme around Transformers v5 regressions and model/tokenizer edge cases. I don\u2019t see any soft-edge pair that is safely the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": "No true duplicate hub stands out; issue 43632 is the closest representative of the broad core-initialization regression theme, but the cluster is too mixed to treat it as a canonical duplicate.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43632 is the most representative single report of the cluster\u2019s main Transformers v5 initialization/loading regression pattern, though it still shouldn\u2019t absorb the rest as duplicates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Both involve Qwen multimodal models, but one is a Video-LLaVA tower bug and the other is Qwen3.5 save_pretrained key serialization; different failures." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Torch 2.10 compatibility is unrelated to generic loading noise; no shared code-path bug." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 regressions, but one is `_is_hf_initialized` handling and the other is non-persistent buffer junk; distinct bugs." + }, + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Both touch special-token behavior, but runtime post-processor updates and tokenizer `add_special_tokens` BOS/EOS insertion are different issues." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "Tokenizer method conflict and Qwen3 auto device mapping assert are unrelated." + }, + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "Compiled flex_attention on torch>=2.9 and remote_code model breakage are separate problems." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Both are multimodal training issues, but multi-GPU training and iterable-dataset multi-image failures are different code paths." + }, + { + "left": "issue:44829", + "right": "issue:45479", + "accept": false, + "reason": "Both cause bad training behavior, but flash_attention_3 degeneration and single-label zero-loss classification are distinct bugs." + }, + { + "left": "issue:43482", + "right": "issue:45538", + "accept": false, + "reason": "GGUF loading in v5 and CLIPTokenizer max length are unrelated." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Generic loading noise and tokenizer loading failure for a specific model are not the same bug." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper loading/runtime failure and Float8 storage lookup are different failures." + }, + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "ModelOutput key assignment and buffer serialization junk are unrelated." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Qwen2.5-GGUF loading and missing `pad_token_id` on Llama4Config are separate issues." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to iterable-dataset multi-image finetuning." + }, + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Both are batching/padding-related, but MIMI padding-mask mismatch and Gemma3 sliding-window NaNs are different model/path bugs." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "Unexpected `_is_hf_initialized` arg during `init_empty_weights` is unrelated to Float8 storage resolution." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "FSDP CPU RAM-efficient loading and ModelOutput key bookkeeping are unrelated." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Non-persistent buffer serialization junk and ModelOutput key assignment are different code paths." + }, + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Multi-image iterable finetuning failure and Qwen2.5-VL video position ids are unrelated." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL position-id bugs, but one is still-image temporal scaling and the other is video vision_position_ids; not enough to call the same concrete bug." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export/export-check failures and bf16 dtype mismatches are different issues." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "Qwen3 device-mapping assert and Mistral tokenizer regex crash are unrelated." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "Core v5 init flag regression and bf16 dtype mismatch are unrelated." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and init_empty_weights `_is_hf_initialized` handling are not the same bug." + }, + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "GLM-4V loading failure and a specific tokenizer load failure are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Packaging/backend inclusion and vLLM PaddleOCR-VL loading are different issues." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 model-init regressions, but base_model_prefix loading failure and custom initialization breakage are not clearly the same concrete bug." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "SigLIP2 nonsense outputs and Perceiver non-default resolution failure are unrelated vision-model bugs." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "lm_head tie failure causing bad generation and device-dependent serialization inconsistency are related in theme but not the same bug." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8 failure and Qwen2.5-GGUF loading failure are unrelated." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Autocast dtype mismatch on Phi-tiny-MoE and compiled flex_attention on torch>=2.9 are different problems." + }, + { + "left": "issue:43525", + "right": "issue:44589", + "accept": false, + "reason": "Missing `pad_token_id` and Float8 storage lookup errors are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 10, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:39692", + "issue:40444", + "issue:40990", + "issue:41720", + "issue:43262", + "issue:43296", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43454", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43632", + "issue:43701", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43792", + "issue:43819", + "issue:43873", + "issue:43881", + "issue:43883", + "issue:43957", + "issue:44186", + "issue:44265", + "issue:44291", + "issue:44387", + "issue:44466", + "issue:44534", + "issue:44610", + "issue:44617", + "issue:44683", + "issue:44877", + "issue:44898", + "issue:44977", + "issue:44991", + "issue:45072", + "issue:45325", + "issue:45357", + "issue:45563", + "issue:45584", + "issue:45632", + "issue:45684", + "issue:45701" + ], + "soft_pairs": [ + "issue:44387|issue:44617", + "issue:43792|issue:43957", + "issue:43701|issue:45325", + "issue:43526|issue:43883", + "issue:43262|issue:43746", + "issue:40990|issue:45584", + "issue:43317|issue:43873", + "issue:44291|issue:44877", + "issue:43881|issue:44991", + "issue:43632|issue:45563", + "issue:43606|issue:44265", + "issue:34634|issue:35707", + "issue:43873|issue:45701", + "issue:44186|issue:44265", + "issue:44610|issue:45325", + "issue:35707|issue:41720", + "issue:43606|issue:43819", + "issue:43454|issue:43550", + "issue:43377|issue:44610", + "issue:43377|issue:44898", + "issue:43454|issue:45072", + "issue:39692|issue:40444", + "issue:43749|issue:44534", + "issue:44466|issue:45357", + "issue:43792|issue:44683", + "issue:43296|issue:44991", + "issue:43454|issue:43720", + "issue:43296|issue:44877", + "issue:41720|issue:43322", + "issue:43404|issue:45357", + "issue:45632|issue:45684", + "issue:41720|issue:44977", + "issue:40990|issue:45701" + ], + "prepared_review_unit_hash": "be0f48de431d42728c5c73d5d28b153668c2e0e8d604baac796bb9e876a3c9c1", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37623, + "estimated_input_tokens": 9406, + "estimated_eval_tokens": 19068 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly a mix of unrelated transformer/model-loading regressions rather than one duplicate cluster. I rejected all soft edges because the paired reports target different models, code paths, or failure modes. If an anchor is needed, issue 43873 is the broadest discussion-heavy offloading/quantization report, but it is not a strong duplicate hub.", + "confidence": 0.74, + "canonical_issue_reason": "Issue 43873 is the most discussion-heavy and broadly framed report in the set, centered on offloading/quantization behavior, so it is the least arbitrary issue to anchor the noisy group.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43873 is the best representative only in a loose sense because it has the broadest scope and strongest surrounding discussion, but the overall set is not a coherent duplicate cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Different models and symptoms: int4 quantization OOM vs generic SAM3Video OOM." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "Both are loading failures, but one is Whisper-specific while the other is a broad meta-device loading regression." + }, + { + "left": "issue:43701", + "right": "issue:45325", + "accept": false, + "reason": "Checkpoint key mismatch and Qwen2.5-VL rope/indexing are unrelated bugs." + }, + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug is unrelated to Molmo tied-weights attribute error." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio sampling-rate default mismatch is unrelated to GraniteSpeech PEFT checkpoint loading." + }, + { + "left": "issue:40990", + "right": "issue:45584", + "accept": false, + "reason": "Perplexity regression and Whisper empty-transcription generation are different failures." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both mention offload/quantization, but the concrete failures and affected paths are different." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "Unexpected init_empty_weights argument and strict config loading are distinct issues." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "GLM-4V loading failure and tokenizer loading failure for est-roberta are unrelated." + }, + { + "left": "issue:43632", + "right": "issue:45563", + "accept": false, + "reason": "_is_hf_initialized regression and a stale generate() warning do not describe the same bug." + }, + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "CPU-offload device mismatch is unrelated to torch.export failures with torch_compilable_check." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "Bark voice_preset bug and progressive generation with inputs_embeds/past_key_values are unrelated." + }, + { + "left": "issue:43873", + "right": "issue:45701", + "accept": false, + "reason": "Offloading/quantization behavior and version-dependent tokenization are different problems." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "Tokenizer crash on NER/padding is unrelated to torch.export compatibility." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "Processor output size mismatch and Qwen2.5-VL temporal position scaling are unrelated." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "Different Qwen issues: generation with inputs_embeds vs CUDA assert under auto device mapping." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "Bark CPU-offload mismatch and DAC latent mismatch are different code paths." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "lm_head tying bug and Bamba torch.compile SDPA failure are unrelated." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "MIMI padding-mask inconsistency and OmDet-Turbo input-size mismatch are different bugs." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "MIMI batch-vs-single mismatch and Perceiver resolution handling are unrelated." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "Weight-tying serialization bug and bf16 dtype mismatch are different failures." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "SigLIP2 doc-example errors and Qwen2.5-VL finetuning with multi-image iterable data are unrelated." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and non-persistent buffer junk are not the same bug." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Both involve save/serialization, but they affect different models and different saved keys." + }, + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Whisper loading/runtime failure and compiled flex_attention torch-version regression are unrelated." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "PaddleOCR-VL load failure in vLLM and tokenizer loading failure for est-roberta are different problems." + }, + { + "left": "issue:43454", + "right": "issue:43720", + "accept": false, + "reason": "lm_head tying in Mistral3 and BitNet packed-weight unloading during accelerate loading are unrelated." + }, + { + "left": "issue:43296", + "right": "issue:44877", + "accept": false, + "reason": "Model loading failure in vLLM and strict granite_speech config rejection are distinct issues." + }, + { + "left": "issue:41720", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3 device-map CUDA assert and Llava Next segmentation fault are different model-loading crashes." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "Mistral3 lm_head tying bug and Qwen3.5 visual-encoder save_pretrained regression are unrelated." + }, + { + "left": "issue:45632", + "right": "issue:45684", + "accept": false, + "reason": "Cache-path collision for trust_remote_code and read-only permission propagation are different save/load issues." + }, + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "CUDA assert with auto device mapping and flash-attention generation bug are different failures." + }, + { + "left": "issue:40990", + "right": "issue:45701", + "accept": false, + "reason": "High perplexity regression and version-dependent tokenization are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 11, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36010", + "issue:36331", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43295", + "issue:43316", + "issue:43317", + "issue:43322", + "issue:43388", + "issue:43421", + "issue:43450", + "issue:43526", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43577", + "issue:43606", + "issue:43708", + "issue:43720", + "issue:43782", + "issue:43873", + "issue:43881", + "issue:44186", + "issue:44292", + "issue:44361", + "issue:44387", + "issue:44423", + "issue:44479", + "issue:44492", + "issue:44509", + "issue:44610", + "issue:44671", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44898", + "issue:44913", + "issue:44918", + "issue:44991", + "issue:45059", + "issue:45072", + "issue:45161", + "issue:45406", + "issue:45464", + "issue:45701", + "issue:45704" + ], + "soft_pairs": [ + "issue:44423|issue:45406", + "issue:44186|issue:45072", + "issue:43873|issue:44756", + "issue:43526|issue:43881", + "issue:43577|issue:43708", + "issue:44387|issue:45704", + "issue:42617|issue:42915", + "issue:44877|issue:44991", + "issue:43450|issue:45701", + "issue:36331|issue:42915", + "issue:43550|issue:43720", + "issue:36331|issue:42617", + "issue:44610|issue:44898", + "issue:44479|issue:44918", + "issue:44186|issue:44610", + "issue:43316|issue:44913", + "issue:44671|issue:44743", + "issue:42915|issue:43317", + "issue:43295|issue:45701", + "issue:43550|issue:43606", + "issue:41720|issue:42617", + "issue:43531|issue:43572", + "issue:41720|issue:45161", + "issue:44292|issue:44479", + "issue:43388|issue:45059", + "issue:44186|issue:44361", + "issue:43540|issue:43782", + "issue:42915|issue:43322", + "issue:36010|issue:42617", + "issue:41720|issue:43873", + "issue:45406|issue:45464", + "issue:36331|issue:41720", + "issue:43421|issue:45701", + "issue:44492|issue:44509" + ], + "prepared_review_unit_hash": "197fc57a6c795a7bce86e7b53bcc5966e56d6c6093054fe14fe121b84d9fa2b2", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37765, + "estimated_input_tokens": 9442, + "estimated_eval_tokens": 19140 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: most items are unrelated transformer bugs/features across different models and subsystems. All soft edges are rejected; none look like the same underlying bug/change.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits this cluster; the items span distinct regressions and model-specific failures with only loose thematic overlap.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43873 is the closest broad representative of the offloading/quantization subset, but it is not a true cluster-wide canonical bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both are serve-time multimodal crashes, but they involve different models and different failure modes (Qwen3.5/continuous batching vs Gemma4Processor _tokenizer)." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "One is a tokenizer/NER padding crash; the other is a dtype mismatch in inference. Different components and root causes." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention offloading/memory, but one is quantization/offload behavior and the other is mmap OOM on Strix Halo. Not the same bug." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "BeitImageProcessorFast label reduction is unrelated to glm-4v-9b model loading failure." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "Blip2 dtype persistence and Trainer resume/max_steps are unrelated issues in different code paths." + }, + { + "left": "issue:44387", + "right": "issue:45704", + "accept": false, + "reason": "Both are memory-related, but int4 quantization OOM and apex RMSNorm leak are different causes and fixes." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "A 3d_parallel.py launch problem and a Qwen3Moe FP8 failure are different model/runtime bugs." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config loading and tokenizer loading are separate failures affecting different artifacts." + }, + { + "left": "issue:43450", + "right": "issue:45701", + "accept": false, + "reason": "Video processor batch-shape regression and tokenization-version changes are unrelated." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "CustomTrainer compute_loss signature mismatch is unrelated to Qwen3Moe FP8 loading." + }, + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "torch.compile+SDPA failure in Bamba and packed-weights unpacking in BitNet are different code paths." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Trainer compute_loss API break and 3d_parallel.py failure are unrelated." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "Both concern image sizing, but OmDet-Turbo and Perceiver have different model/processor expectations and distinct bugs." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Video-input regression and Qwen3.5 embedding unpacking with TRL SFT are unrelated." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "Tokenizer NER crash and image processor resolution mismatch are different subsystems and failures." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Gemma3TextConfig API discrepancy and GPTNeoX rotary_pct reload issue are unrelated config bugs." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "CamemBERT masked-LM predictions and Qwen3.5 recurrent-state reset are unrelated model behaviors." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "Qwen3Moe FP8 failure and dequantized-model device_map offload failure are different loading problems." + }, + { + "left": "issue:43295", + "right": "issue:45701", + "accept": false, + "reason": "Processor.tokenizer regression and tokenization-version changes are not the same underlying issue." + }, + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "Bamba SDPA/torch.compile bug and Bark CPU-offload device mismatch are unrelated." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Qwen3 auto device mapping cuda assert and 3d_parallel.py launch failure do not share a concrete root cause." + }, + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig pad_token_idx regression are separate issues." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Both involve MoE/device parallelism, but they target different models and distinct failures." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime error and v5.3.0 video regression are unrelated." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "Metric-gathering label truncation and SAM3 text/box odd behavior are different bugs." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "LayoutLMv2Tokenizer NER/padding crash and MLukeTokenizer task AttributeError are separate tokenizer issues." + }, + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Qwen3OmniMoe video-processing ValueError and Qwen3VL weight_only load error are different code paths." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and Llava Next segfault are unrelated model-loading bugs." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "GenerationMixin import error is unrelated to a 3d_parallel.py execution failure." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Auto device-map cuda asserts and quantization/offloading issues are only loosely related, not the same bug." + }, + { + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Gemma4Processor missing _tokenizer in serve and Qwen3.5 streaming chat/completions failure are different regressions." + }, + { + "left": "issue:36331", + "right": "issue:41720", + "accept": false, + "reason": "Trainer API signature mismatch and Qwen3 auto device mapping failure are unrelated." + }, + { + "left": "issue:43421", + "right": "issue:45701", + "accept": false, + "reason": "Runtime special-token post-processor updates and tokenization-version changes are not the same defect." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "A cache-strategy typo and outdated pipeline docs are separate documentation/config issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 12, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:30064", + "issue:33357", + "issue:34567", + "issue:34634", + "issue:35707", + "issue:39692", + "issue:41720", + "issue:42886", + "issue:43381", + "issue:43421", + "issue:43525", + "issue:43531", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43746", + "issue:43782", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:43994", + "issue:44062", + "issue:44077", + "issue:44186", + "issue:44368", + "issue:44451", + "issue:44514", + "issue:44545", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44671", + "issue:44843", + "issue:44898", + "issue:44918", + "issue:44936", + "issue:44987", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45081", + "issue:45356", + "issue:45357", + "issue:45381", + "issue:45479", + "issue:45704" + ], + "soft_pairs": [ + "issue:33357|issue:35707", + "issue:30064|issue:34634", + "issue:44514|issue:44545", + "issue:41720|issue:43746", + "issue:43606|issue:43720", + "issue:43873|issue:45704", + "issue:43550|issue:43653", + "issue:44625|issue:45479", + "issue:44368|issue:45357", + "issue:43421|issue:43927", + "issue:43606|issue:43653", + "issue:34567|issue:35707", + "issue:34634|issue:41720", + "issue:44451|issue:44991", + "issue:44062|issue:44451", + "issue:43782|issue:45381", + "issue:42886|issue:44843", + "issue:44077|issue:44625", + "issue:44898|issue:45072", + "issue:44987|issue:45042", + "issue:43421|issue:43653", + "issue:43531|issue:44589", + "issue:39692|issue:44610", + "issue:44918|issue:44936", + "issue:43381|issue:43844", + "issue:43421|issue:44186", + "issue:43994|issue:44671", + "issue:43525|issue:43531", + "issue:45081|issue:45356" + ], + "prepared_review_unit_hash": "a04e4cd23dbc6a9f969972f173dc21d0c7aeda452d877d823c4450e4e3d8a668", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36093, + "estimated_input_tokens": 9024, + "estimated_eval_tokens": 18304 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: many items are unrelated model/tokenizer/training bugs that only share broad subsystem keywords. I only see two clear duplicate pairs: the Qwen2.5-VL batched `padding=False` crash, and the HF_HUB_OFFLINE / `_patch_mistral_regex` tokenizer failure.", + "confidence": 0.84, + "canonical_issue_reason": "issue:44843 is the clearest root-cause report for the HF_HUB_OFFLINE tokenizer regression, explicitly naming the failing `_patch_mistral_regex` path; issue:42886 is the broader symptom report for the same bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44843 is the best representative issue in this set because it pinpoints the offending code path and environment trigger, making it the most actionable canonical bug report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "Different failures: MacOS bus error with a CLIP model vs progressive generation with `inputs_embeds`/`past_key_values`." + }, + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "Unrelated areas: void segmentation-map processing vs BarkProcessor `voice_preset` behavior." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "Same title and same concrete crash: `Qwen2_5_VLProcessor.apply_chat_template` on batched input with `padding=False`." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Different root causes and code paths: Qwen3 auto device mapping CUDA assert vs GraniteSpeech PEFT local checkpoint loading." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "Both are CI/offload-related, but one is a device mismatch under CPU offload and the other is packed-weight unpacking during accelerate loading; not the same bug." + }, + { + "left": "issue:43873", + "right": "issue:45704", + "accept": false, + "reason": "Quantization/offloading behavior vs a T5 apex RMSNorm memory leak are separate issues." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "Different failures: torch.compile/SDPA on Bamba vs BigBirdTokenizer special-token registration." + }, + { + "left": "issue:44625", + "right": "issue:45479", + "accept": false, + "reason": "Both touch labels, but one is config propagation and the other is a zero-loss classification bug; not the same concrete problem." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "Different symptoms and likely fixes: tied-embeddings warning vs incorrect visual encoder keys in save/load." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "Tokenizer backend post-processor updates and DiaConfig custom token-ID persistence are different bug classes." + }, + { + "left": "issue:43606", + "right": "issue:43653", + "accept": false, + "reason": "Device mismatch during offload vs tokenizer special-token registration; no shared underlying code path." + }, + { + "left": "issue:34567", + "right": "issue:35707", + "accept": false, + "reason": "Trainer token-count tracking vs generation with embeddings/past KV are unrelated." + }, + { + "left": "issue:34634", + "right": "issue:41720", + "accept": false, + "reason": "BarkProcessor voice preset bug vs Qwen3 auto device mapping CUDA assert." + }, + { + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-loading regressions, but for different models and different failure modes; too broad to merge." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "AddedToken constructor argument conflict is not the same bug as failing to load ScandiBERT." + }, + { + "left": "issue:43782", + "right": "issue:45381", + "accept": false, + "reason": "Different Qwen multimodal issues: `from_pretrained` weight-only error vs video vision-position-id bug." + }, + { + "left": "issue:42886", + "right": "issue:44843", + "accept": true, + "reason": "Same underlying offline-mode failure in tokenizer loading; 44843 identifies the specific `_patch_mistral_regex`/`model_info()` cause." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "PatchTSMixer post-init policy issue vs Qwen3.5 `num_labels` propagation; unrelated." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Different model-specific inference bugs: Perceiver resolution handling vs dtype mismatch in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "Model-loading regression vs PIL image processors incorrectly requiring torchvision; separate problems." + }, + { + "left": "issue:43421", + "right": "issue:43653", + "accept": false, + "reason": "Special-token post-processor refresh and BigBirdTokenizer mask-token registration are not the same bug." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Sliding-window handling in Qwen3-MoE is unrelated to missing Float8 storage support." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "SigLIP2 docs/model mismatch and OmDet-Turbo processor size mismatch are different issues." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "TRL embedding unpacking during SFT is not the same as `trainer.evaluate()` failing after training." + }, + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Gradient checkpointing eval-mode restriction is unrelated to DeepSpeed ZeRO-3 gradient growth." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Runtime post-processor special-token updates vs LayoutLMv2Tokenizer padding/truncation crashes are different bugs." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Different models and outputs: SigLIP2 nonsensical results vs CamemBERT masked-LM prediction regression." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Missing `pad_token_id` on Llama4Config is unrelated to the Qwen3-MoE `sliding_window` bug." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Both mention `fix_mistral_regex`, but the reported failures differ enough that they do not clearly describe the same concrete bug." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative. The two accepted duplicates are well supported by near-identical titles and matching failure modes, and the other pairwise rejections do not overclaim shared root causes. The heterogeneity summary is also consistent with the listed issues." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 13, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:43322", + "issue:43335", + "issue:43381", + "issue:43421", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43596", + "issue:43618", + "issue:43643", + "issue:43644", + "issue:43653", + "issue:43697", + "issue:43716", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43756", + "issue:43818", + "issue:43819", + "issue:43873", + "issue:43927", + "issue:44008", + "issue:44062", + "issue:44186", + "issue:44220", + "issue:44448", + "issue:44488", + "issue:44661", + "issue:44671", + "issue:44811", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:45072", + "issue:45081", + "issue:45183", + "issue:45381", + "issue:45479", + "issue:45685", + "issue:45704" + ], + "soft_pairs": [ + "issue:43596|issue:44220", + "issue:41720|issue:45704", + "issue:43618|issue:43697", + "issue:43381|issue:45704", + "issue:39692|issue:43873", + "issue:43746|issue:45381", + "issue:40444|issue:43746", + "issue:43746|issue:43873", + "issue:44220|issue:44661", + "issue:45183|issue:45479", + "issue:43540|issue:44008", + "issue:43335|issue:43756", + "issue:43720|issue:43819", + "issue:43716|issue:44008", + "issue:43653|issue:43720", + "issue:30064|issue:36010", + "issue:44062|issue:44488", + "issue:44843|issue:45081", + "issue:43322|issue:45685", + "issue:43577|issue:43818", + "issue:44186|issue:44898", + "issue:43550|issue:45072", + "issue:43531|issue:43643", + "issue:43643|issue:44877", + "issue:43421|issue:44811", + "issue:43550|issue:43927", + "issue:43540|issue:45381", + "issue:44448|issue:44671", + "issue:43644|issue:43749" + ], + "prepared_review_unit_hash": "9365df409774735a6d4f6359a5c3ef00189055b778b925ca06e665ff222a4807", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36148, + "estimated_input_tokens": 9037, + "estimated_eval_tokens": 18330 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose mix of unrelated Transformers bug reports; the soft-similarity links are mostly false positives. The most representative issue is the highly discussed FSDP loading regression.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43749 has the strongest triage signal (high discussion and inbound references) and is a broad, actionable infrastructure regression compared with the narrower one-off bugs in the set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 is the best single issue to anchor the cluster because it is open, well-discussed, and broadly impactful.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "Different subsystems and failures: DeepSpeed/BertModel init vs audio feature extraction." + }, + { + "left": "issue:41720", + "right": "issue:45704", + "accept": false, + "reason": "Qwen3 device-mapping assert is unrelated to T5/apex RMSNorm memory leak." + }, + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "CLIP attentions field regression and RTDetr output drift are separate model bugs." + }, + { + "left": "issue:43381", + "right": "issue:45704", + "accept": false, + "reason": "Gradient checkpointing eval-mode limitation is unrelated to the T5 apex leak." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "Docs example/model mismatch is not the same bug as quantization offloading behavior." + }, + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "PEFT checkpoint loading for GraniteSpeech is unrelated to Qwen2.5-VL video position IDs." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "Multi-image iterable dataset finetuning failure is a different code path from checkpoint loading." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "PEFT local checkpoint loading and quantization offloading are separate loading problems." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Audio fbank extraction and tokenizer-mapping model registration are unrelated." + }, + { + "left": "issue:45183", + "right": "issue:45479", + "accept": false, + "reason": "Generic input validation messaging is not the same as the single-label-classification zero-loss bug." + }, + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Qwen3OmniMoe video processing error is unrelated to Gemma3n variable-name collision." + }, + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer drop are unrelated model-setup bugs." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "BitNet packed-weight loading and DAC STE mismatch affect different models and mechanics." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Blip2 dtype mismatch is unrelated to Gemma3n's audio mask attribute error." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "Tokenizer special-token registration and BitNet accelerate loading are not the same defect." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Void segmentation-map processing and GenerationMixin import failure are unrelated." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "AddedToken keyword collision is not the same as failing to load a specific model repo." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Both involve _patch_mistral_regex, but one is offline model_info access and the other is a backend_tokenizer attribute crash; different failures." + }, + { + "left": "issue:43322", + "right": "issue:45685", + "accept": false, + "reason": "Llava Next segmentation fault and MPS histogram support error are unrelated." + }, + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "Blip2 dtype propagation and Video-LLaVA temporal-attention/weight-sharing are distinct issues." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and Perceiver interpolation failures are different model/tokenizer paths." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "torch.compile+SDPA on Bamba and bfloat16 dtype mismatch on other models are unrelated." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and AutoConfig strict-field loss are different bugs." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Both are config-loading issues, but one drops fields under trust_remote_code and the other rejects granite_speech config under strict validation." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Runtime special-token/post-processor sync and Whisper batch_decode skip_special_tokens are separate tokenizer/processor behaviors." + }, + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "Bamba torch.compile failure is unrelated to DiaConfig losing custom token IDs on save/load." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "Video input processing failure in Qwen3OmniMoe is unrelated to Qwen2.5-VL vision_position_ids regression." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Both are v5 regressions, but Pegasus output drift and CamemBERT masked-LM errors are different model-specific bugs." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Non-persistent buffer corruption and FSDP CPU RAM-efficient loading are different regressions in different loading paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 14, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:36010", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43262", + "issue:43526", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43606", + "issue:43650", + "issue:43653", + "issue:43697", + "issue:43720", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43906", + "issue:43994", + "issue:44060", + "issue:44186", + "issue:44291", + "issue:44368", + "issue:44442", + "issue:44485", + "issue:44509", + "issue:44554", + "issue:44561", + "issue:44610", + "issue:44671", + "issue:44805", + "issue:44821", + "issue:44857", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45084", + "issue:45325", + "issue:45406", + "issue:45701" + ], + "soft_pairs": [ + "issue:43782|issue:44977", + "issue:43653|issue:43819", + "issue:44561|issue:45020", + "issue:43825|issue:43994", + "issue:43577|issue:43582", + "issue:43550|issue:43819", + "issue:43825|issue:43827", + "issue:44821|issue:45084", + "issue:44485|issue:44509", + "issue:41720|issue:44991", + "issue:44186|issue:44442", + "issue:30064|issue:36331", + "issue:44671|issue:45701", + "issue:44805|issue:44936", + "issue:43606|issue:45072", + "issue:42175|issue:45081", + "issue:34634|issue:36010", + "issue:43650|issue:43906", + "issue:43782|issue:45325", + "issue:44060|issue:44368", + "issue:44610|issue:45072", + "issue:45081|issue:45406", + "issue:43749|issue:44291", + "issue:43720|issue:45072", + "issue:43526|issue:43697", + "issue:43582|issue:44857", + "issue:43526|issue:44554", + "issue:43262|issue:43819" + ], + "prepared_review_unit_hash": "a16e9142f8b79a6cd75c4c344a5e185149a6168911730f07a7ceca8c8fe3a7fb", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35714, + "estimated_input_tokens": 8929, + "estimated_eval_tokens": 18114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is highly heterogeneous: most items are unrelated model/processor/trainer regressions rather than true duplicates. No PRs are present. I selected the broad remote-code regression report as the best issue representative, but it is only an umbrella, not a true duplicate of the others.", + "confidence": 0.95, + "canonical_issue_reason": "issue:45020 is the broadest umbrella-style report in the set, covering recent-version breakages for remote_code models.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the best top-level representative if a single issue must stand in for the cluster, but it should not be treated as a duplicate of the more specific bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Different models and failure modes: from_pretrained weight_only loading error vs flash-attention generation issue." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Unrelated subsystems: tokenizer special-token registration vs DAC latent decoding/STE mismatch." + }, + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "One is a specific API removal breakage; the other is a broad umbrella report about remote_code regressions." + }, + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "Different problems: v5 error-message wording vs a model producing bad outputs with AutoModel/pipeline." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "Unrelated: BLIP2 dtype propagation vs Apple Silicon caching allocator TypeError." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "Different code paths and models: torch.compile SDPA failure vs DAC forward/from_latents mismatch." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "Related theme, but one is runtime messaging and the other is docs cleanup; not the same bug/change." + }, + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "Image processor URL loading issue vs template compilation error; no shared underlying defect." + }, + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "GLM-5 RoPE discussion is unrelated to docs still mentioning removed pipeline tasks." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "Different models and symptoms: auto device mapping CUDA assert vs tokenizer loading failure." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "Tokenizer crash on NER/padding vs AutoTokenizer failing to load a specific tokenizer class." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Different layers of the stack: image segmentation-map processing vs trainer loss signature change." + }, + { + "left": "issue:44671", + "right": "issue:45701", + "accept": false, + "reason": "CamemBERT masked-LM regression is not the same as a broad tokenization-change report." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Mask-shape IndexError and trainer evaluate-after-train failure are different bugs." + }, + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "CPU offload device mismatch is unrelated to bfloat16 dtype mismatches in other models." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Backend dependency packaging issue vs Mistral tokenizer regex patch crash." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice preset bug is unrelated to GenerationMixin import failure." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "Placeholder/empty issue title is not the same as an isolated reproduction of another bug." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen-related, but they describe different models and different rope/loading failures." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Different warning causes: tied-weights bug across layers vs a LoRA fine-tuning config warning." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "Processor size mismatch and bfloat16 dtype mismatch are unrelated." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Different tokenizer/processor classes and different AttributeError causes." + }, + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "Both involve loading, but one is FSDP RAM-efficient loading and the other is init_empty_weights argument handling." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "Packed-weight accelerate loading bug is unrelated to dtype mismatch issues." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "Label reduction bug in an image processor is not the same as RTDetrV2 output changes." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "Apple Silicon warmup TypeError vs CUDA AMP crash are different platform-specific failures." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "Image label reduction bug is unrelated to an MPS attention correctness issue." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio sampling-rate default bug is unrelated to DAC latent reconstruction mismatch." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 15, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:34567", + "issue:34634", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:40990", + "issue:43257", + "issue:43262", + "issue:43329", + "issue:43450", + "issue:43452", + "issue:43454", + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43638", + "issue:43645", + "issue:43653", + "issue:43696", + "issue:43697", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43756", + "issue:43819", + "issue:43854", + "issue:43927", + "issue:43931", + "issue:44479", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44849", + "issue:44912", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45042", + "issue:45084", + "issue:45356", + "issue:45357", + "issue:45488", + "issue:45538", + "issue:45600", + "issue:45701" + ], + "soft_pairs": [ + "issue:43329|issue:45600", + "issue:45084|issue:45538", + "issue:34634|issue:36331", + "issue:43653|issue:44912", + "issue:43452|issue:44991", + "issue:43257|issue:43329", + "issue:43638|issue:44936", + "issue:38175|issue:44610", + "issue:43454|issue:43577", + "issue:44568|issue:45488", + "issue:44589|issue:45042", + "issue:43854|issue:44977", + "issue:43257|issue:43696", + "issue:43746|issue:44811", + "issue:39692|issue:44912", + "issue:36010|issue:43645", + "issue:43720|issue:44610", + "issue:43819|issue:43927", + "issue:43550|issue:44912", + "issue:43749|issue:44756", + "issue:44568|issue:45356", + "issue:43329|issue:44479", + "issue:43262|issue:44811", + "issue:43531|issue:45042", + "issue:36010|issue:36331", + "issue:43257|issue:43931", + "issue:44849|issue:45701", + "issue:43329|issue:43450", + "issue:34567|issue:40990", + "issue:43257|issue:43756", + "issue:43697|issue:45357" + ], + "prepared_review_unit_hash": "ded0bbb03bb2c6d86ec77777345856733e03cecbe02a085192073ef31aeb9de9", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37123, + "estimated_input_tokens": 9281, + "estimated_eval_tokens": 18818 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; the items span unrelated bugs across trainers, tokenizers, vision/audio processors, quantization, loading, and model-specific regressions. All soft edges are false positives and should be rejected.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43329", + "right": "issue:45600", + "accept": false, + "reason": "Different subsystems: multimodal token counting vs stale auto-mapping references breaking CI. No shared bug path." + }, + { + "left": "issue:45084", + "right": "issue:45538", + "accept": false, + "reason": "Unrelated issues: template-node compilation error vs CLIPTokenizer max length constant. Same tokenizer/model area only at a very broad level." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "BarkProcessor voice_preset bug and Trainer.compute_loss signature change are entirely different code paths." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "BigBird special-token registration vs GPT-OSS MXFP4 quantization fallback are unrelated tokenizer/model loading problems." + }, + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "Both involve loading, but one is gguf_file handling for AutoTokenizer/AutoModel and the other is a specific tokenizer regression for est-roberta." + }, + { + "left": "issue:43257", + "right": "issue:43329", + "accept": false, + "reason": "Qwen3 MoE weight conversion/loading issue vs multimodal token counting video-branch bug; not the same defect." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "DeepSpeed ZeRO3 with non-pretrained BERT index error is distinct from trainer.evaluate() failing after train()." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "SigLIP2 zero probabilities and OmDet-Turbo processor/model input-size mismatch are different model-specific bugs." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "AyaVision weight-tying bug and BLIP2 dtype propagation bug are separate model initialization issues." + }, + { + "left": "issue:44568", + "right": "issue:45488", + "accept": false, + "reason": "mDeBERTa special-token addition regression is unrelated to LlamaTokenizer pre-tokenizer override affecting DeepSeek." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "Float8 storage lookup error and PIL backend requiring torchvision are unrelated runtime/loading problems." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "GLM-4.7-Flash test loading failure and Qwen3.5 flash-attention generation regression are different model bugs." + }, + { + "left": "issue:43746", + "right": "issue:44811", + "accept": false, + "reason": "PEFT local checkpoint loading for GraniteSpeech is unrelated to Whisper batch_decode skip_special_tokens handling." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "SigLIP2 doc-example/model-processor mismatch and git-oss MXFP4 loading fallback are different problems." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "GenerationMixin import path breakage and Jupyter custom-model initialization regression are not the same underlying issue." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "BitNet packed-weight unpacking during accelerate loading vs OmDet processor image-size mismatch are unrelated." + }, + { + "left": "issue:43819", + "right": "issue:43927", + "accept": false, + "reason": "DAC from_latents/forward mismatch is unrelated to DiaConfig losing custom token IDs after save/load." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to GPT-OSS quantization loading behavior." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "FSDP CPU RAM efficient loading breakage and Strix Halo mmap OOM are different memory/loading scenarios." + }, + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "mDeBERTa BOS/EOS token insertion regression and Kimi-K2.5 codec handling warning are distinct tokenizer bugs." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Both mention video processing, but one is multimodal token counting internals and the other is batched processor output shape." + }, + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "TrainerState num_input_tokens_seen not updating is unrelated to GPT-OSS high-perplexity evaluation results." + }, + { + "left": "issue:43257", + "right": "issue:43756", + "accept": false, + "reason": "Qwen3 MoE weight conversion/loading bug and Smollm3 RoPE-layer omission are unrelated model implementation issues." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "RTDetrV2 output regression and Qwen3.5 visual-encoder key saving regression are different code paths and model families." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 16, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:33357", + "issue:34567", + "issue:34689", + "issue:36010", + "issue:38175", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43317", + "issue:43452", + "issue:43493", + "issue:43526", + "issue:43540", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43742", + "issue:43749", + "issue:43756", + "issue:43824", + "issue:43906", + "issue:43927", + "issue:43994", + "issue:44062", + "issue:44164", + "issue:44291", + "issue:44479", + "issue:44488", + "issue:44560", + "issue:44737", + "issue:44821", + "issue:44898", + "issue:44912", + "issue:44938", + "issue:44987", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45137", + "issue:45325", + "issue:45375", + "issue:45381", + "issue:45405", + "issue:45479", + "issue:45538", + "issue:45588" + ], + "soft_pairs": [ + "issue:44987|issue:45538", + "issue:44479|issue:45381", + "issue:43577|issue:45072", + "issue:41720|issue:43540", + "issue:43452|issue:43582", + "issue:42915|issue:43742", + "issue:33357|issue:34567", + "issue:44291|issue:45042", + "issue:42175|issue:43824", + "issue:43317|issue:45538", + "issue:42915|issue:44821", + "issue:43927|issue:44912", + "issue:43540|issue:44560", + "issue:42915|issue:45538", + "issue:43540|issue:44479", + "issue:42175|issue:44938", + "issue:44062|issue:44991", + "issue:42175|issue:44488", + "issue:43526|issue:45479", + "issue:45137|issue:45405", + "issue:43906|issue:45381", + "issue:38175|issue:44898", + "issue:43749|issue:44164", + "issue:28282|issue:34689", + "issue:44737|issue:45588", + "issue:43756|issue:45325", + "issue:36010|issue:42175", + "issue:28282|issue:29127", + "issue:43742|issue:44291", + "issue:43493|issue:43994", + "issue:43643|issue:45375" + ], + "prepared_review_unit_hash": "a269bb6b1097d8a956bad6c7812385b5772c0440b800840963ad66331bef79d9", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37116, + "estimated_input_tokens": 9279, + "estimated_eval_tokens": 18814 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous false positives, with only a couple of likely true duplicate pairs around the same concrete bug. No PRs are present.", + "confidence": 0.67, + "canonical_issue_reason": "No single canonical issue for the whole set; 44479 is the best anchor for the strongest duplicate pair because it is the broader Qwen2.5-VL video regression report and matches the same position-id bug seen in 45381.", + "canonical_pr_reason": null, + "best_issue_reason": "44479 is the most representative issue among the true duplicates: it describes the broader regression and has the clearest overlap with the matching Qwen2.5-VL report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44987", + "right": "issue:45538", + "accept": false, + "reason": "Both are loading-related, but one is a model-loading failure and the other is a CLIPTokenizer max-length issue; not the same bug." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Same Qwen2.5-VL video-input regression: both describe wrong video position IDs / temporal position handling in v5.3.0." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype, but they affect different models and different failure modes; too broad to be the same bug." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "Different Qwen-family problems: auto device mapping cuda assert versus video-input ValueError." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "Unrelated areas: gguf/AutoTokenizer loading versus Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Different concrete bugs: FP8 quantization failure versus MobileLLM key error on load." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "CLIP MacOS bus error and TrainerState token-count tracking are unrelated." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "Init-empty-weights constructor-arg breakage is unrelated to the PIL/torchvision backend regression." + }, + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Packaging/backend dependency issue versus missing Qwen2_5_VL class import; not the same bug." + }, + { + "left": "issue:43317", + "right": "issue:45538", + "accept": false, + "reason": "Device-map/offload loading failure is unrelated to CLIPTokenizer model_max_length." + }, + { + "left": "issue:43927", + "right": "issue:44912", + "accept": false, + "reason": "Config save/load token-ID loss is a different bug from MXFP4 quantization falling back to bf16." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Both involve video, but they are different Qwen3 variants with different error signatures and likely different code paths." + }, + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "One is just an isolated reproduction of another issue; it is not the same as the Qwen2.5-VL video-position regression." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "Different models and different symptoms: SigLIP2 zero probabilities versus Perceiver non-default-resolution failure." + }, + { + "left": "issue:43749", + "right": "issue:44164", + "accept": false, + "reason": "Both touch loading/saving, but FSDP CPU RAM efficient loading and extra_state handling are separate concrete problems." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Missing PyTorch import dependency and Llama 3.2 Vision loading breakage are unrelated." + }, + { + "left": "issue:44737", + "right": "issue:45588", + "accept": false, + "reason": "XLNet CPU tensor placement bug and flash-attention s_aux=None crash are unrelated code paths." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Both mention RoPE, but they affect different models and different position/index logic." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "GenerationMixin import failure is unrelated to TensorFlow backend packaging." + }, + { + "left": "issue:28282", + "right": "issue:29127", + "accept": false, + "reason": "Generic import error versus LayoutLMv3 box-content validation messaging; not the same bug." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "Different root causes: MobileLLM key error versus init_empty_weights argument mismatch." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Same SigLIP2 model-family regression: both report incorrect/nonsensical outputs from the HF implementation compared with expected behavior." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque error and PEFT version bump are unrelated." + }, + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "Reproduction issue versus a specific Qwen2.5-VL video-position bug; not the same underlying change." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Repeated pair: different models and distinct RoPE-related bugs." + }, + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "Tokenizer loading failures, but the titles do not establish the same concrete root cause." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "TensorFlow backend packaging versus Python 3.14 load failure are unrelated." + }, + { + "left": "issue:43526", + "right": "issue:45479", + "accept": false, + "reason": "Image-processor reduce_labels behavior is unrelated to degenerate zero-loss sequence classification." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Repeated pair: import dependency issue versus model-loading regression." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "Repeated pair: different loading bugs with different failure mechanisms." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Repeated pair: same SigLIP2 implementation/output discrepancy." + }, + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config fields, but one is generic trust_remote_code config loss and the other is a model-specific field dropped by @strict; not enough to unify them." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: all items are issues, no PRs are present, and the accepted duplicate pairs are reasonably supported by the titles (notably the Qwen2.5-VL video regression pair and the SigLIP2 discrepancy pair). The rejected pairs are generally conservative and do not overclaim same-root-cause evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 17, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:24643", + "issue:29127", + "issue:34567", + "issue:36331", + "issue:41762", + "issue:43295", + "issue:43452", + "issue:43525", + "issue:43531", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:44062", + "issue:44291", + "issue:44466", + "issue:44484", + "issue:44488", + "issue:44514", + "issue:44561", + "issue:44589", + "issue:44625", + "issue:44704", + "issue:44792", + "issue:44811", + "issue:44821", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44936", + "issue:44964", + "issue:45005", + "issue:45042", + "issue:45081", + "issue:45216", + "issue:45290", + "issue:45356", + "issue:45381", + "issue:45405", + "issue:45440", + "issue:45600" + ], + "soft_pairs": [ + "issue:43582|issue:43782", + "issue:44062|issue:45081", + "issue:34567|issue:36331", + "issue:44291|issue:44821", + "issue:43452|issue:44488", + "issue:44792|issue:44964", + "issue:44484|issue:44704", + "issue:45405|issue:45600", + "issue:45042|issue:45081", + "issue:43697|issue:45216", + "issue:43643|issue:44561", + "issue:43688|issue:43749", + "issue:43742|issue:44589", + "issue:44514|issue:45381", + "issue:43531|issue:44821", + "issue:44514|issue:45290", + "issue:44625|issue:44877", + "issue:43452|issue:43577", + "issue:43720|issue:44898", + "issue:43452|issue:44843", + "issue:43688|issue:45440", + "issue:41762|issue:44936", + "issue:43525|issue:43643", + "issue:24643|issue:29127", + "issue:43295|issue:45356", + "issue:43819|issue:44811", + "issue:44466|issue:45005" + ], + "prepared_review_unit_hash": "ad7ab0e0c1fa3bbd365925d9aee9a605b67bc33b26d2e858996a1c4b121a270b", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 27, + "serialized_chars": 35496, + "estimated_input_tokens": 8874, + "estimated_eval_tokens": 18004 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated Transformers regression/issues, largely around loading, tokenizers, config handling, and training. The only strong duplicate-like pair is the tied-weights serialization reports (44466/45005).", + "confidence": 0.58, + "canonical_issue_reason": "issue:44466 is the clearest concrete bug report in the one credible duplicate pair: tied/lm_head weight serialization in v5.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44466 is the best anchor overall because it is specific, reproducible, and most directly matches the accepted duplicate-style pair.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Both are TypeErrors, but they describe different code paths: Apple Silicon allocator warmup vs Qwen3VL weight_only loading." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer-related, but one is AddedToken special-arg duplication and the other is a Mistral regex/backend_tokenizer crash." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "Different Trainer regressions: token counting not updating vs custom compute_loss signature breaking on num_items_in_batch." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "Unrelated loading failures: init_empty_weights/_is_hf_initialized vs AutoImageProcessor URL loading." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "Both are model-loading complaints, but the bugs are different and model-specific; not the same root cause." + }, + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Both involve multimodal models, but they target different models and likely different failures." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "save_pretrained shard sizing is unrelated to AutoProcessor forwarding kwargs to cached_file." + }, + { + "left": "issue:45405", + "right": "issue:45600", + "accept": false, + "reason": "Completely different issues: dependency version bump vs stale auto_mappings config references." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "Different bugs in different subsystems: image processor torchvision dependency vs Mistral tokenizer regex handling." + }, + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Different model regressions: RTDetrV2 output differences vs Qwen3.5 checkpoint serialization." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Both mention trust_remote_code, but one is missing config fields and the other is removal of is_torch_fx_available breaking remote code." + }, + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Different failures: MoE auxiliary-loss normalization vs FSDP CPU RAM efficient loading." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "Different load-time errors with different symptoms and code paths." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL related, but one is chat template batching and the other is video position ids." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Sliding-window model issue and image-processor URL loading are unrelated." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": false, + "reason": "Same general chat-template area, but different crash conditions and no clear same underlying bug." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Both concern config handling, but one is num_labels propagation and the other is strict config loading for granite_speech." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "Different bugs: gguf_file/pretrained loading vs dtype not propagating to BLIP2 qformer." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "Different model behaviors: packed BitNet weight loading vs Perceiver image classification with interpolation." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "Different tokenizer/loading regressions: gguf_file handling vs offline-model_info call in Mistral regex patching." + }, + { + "left": "issue:43688", + "right": "issue:45440", + "accept": false, + "reason": "Both are MoE/model-quality issues, but not the same bug or code path." + }, + { + "left": "issue:41762", + "right": "issue:44936", + "accept": false, + "reason": "Different Trainer/DeepSpeed problems: ZeRO-3 model loading vs evaluate failing after train." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": false, + "reason": "Both are config-field problems, but one is a missing pad_token_id attribute and the other is remote-code config field loss." + }, + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "Entirely unrelated: DeepSpeed weight-dimension runtime error vs LayoutLMV3 error-message clarity." + }, + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer regressions, but they are different models and different failure modes." + }, + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "Different functionality: DAC latent reconstruction vs Whisper batch_decode special-token handling." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": true, + "reason": "Both report v5 tied-weight/lm_head serialization problems and plausibly share the same save_pretrained code path." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative on the soft-edge pairs. The only accepted duplicate-style pair, issue:44466 and issue:45005, is plausibly the same tied-weights/save_pretrained bug class, while the other rejected pairs are not overstated and the reasons match the issue titles. Confidence is appropriately moderate." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 18, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:28282", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:34567", + "issue:35141", + "issue:38175", + "issue:41720", + "issue:43122", + "issue:43295", + "issue:43329", + "issue:43334", + "issue:43452", + "issue:43504", + "issue:43525", + "issue:43531", + "issue:43550", + "issue:43572", + "issue:43577", + "issue:43646", + "issue:43653", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43906", + "issue:43927", + "issue:44361", + "issue:44442", + "issue:44451", + "issue:44464", + "issue:44479", + "issue:44554", + "issue:44568", + "issue:44610", + "issue:44625", + "issue:44743", + "issue:44843", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45325", + "issue:45356", + "issue:45588", + "issue:45701", + "issue:45704" + ], + "soft_pairs": [ + "issue:43653|issue:44568", + "issue:44464|issue:44743", + "issue:38175|issue:43295", + "issue:43577|issue:43697", + "issue:41720|issue:43906", + "issue:43688|issue:45704", + "issue:44361|issue:44442", + "issue:43646|issue:43749", + "issue:43688|issue:45701", + "issue:43653|issue:44361", + "issue:28282|issue:32090", + "issue:44843|issue:44991", + "issue:44479|issue:44936", + "issue:30064|issue:33666", + "issue:28282|issue:33357", + "issue:28282|issue:35141", + "issue:44977|issue:45356", + "issue:43577|issue:45588", + "issue:43122|issue:43906", + "issue:43577|issue:44554", + "issue:43531|issue:43742", + "issue:43653|issue:43927", + "issue:43329|issue:45325", + "issue:44464|issue:44977", + "issue:44479|issue:44625", + "issue:43550|issue:44361", + "issue:32090|issue:34567", + "issue:43504|issue:43720", + "issue:43504|issue:43550", + "issue:43452|issue:44451", + "issue:32090|issue:33666", + "issue:43504|issue:44610", + "issue:41720|issue:44464", + "issue:43334|issue:43572", + "issue:43525|issue:43572" + ], + "prepared_review_unit_hash": "cfd7b06dd0bb98b4fd6ec86a9b826b941a7dec30a3f3dc9da28d0628d8e23a2c", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38343, + "estimated_input_tokens": 9586, + "estimated_eval_tokens": 19428 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly separate bug reports with only superficial overlap (same library/version or same subsystem). None of the soft pairs look like the same underlying defect, so I would not merge any of them as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43646 is the broadest representative of the cluster: a generic Transformers 5.0 initialization regression that could umbrella several nearby version-breakage reports, unlike the narrower model- or tokenizer-specific issues.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43646 is the most useful standalone umbrella issue in this set because it describes a widespread initialization regression rather than a single model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer bugs, but one is about BigBird mask-token registration/decoding and the other is about BOS/EOS insertion for mdeberta-v3; different failure modes and fixes." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "One is about chunked generation with compiled forward, the other about recurrent state reset with cache; different code paths." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "Siglip2 zero-probabilities and processor.tokenizer/image-passing regression are unrelated symptoms and components." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "One is a dtype propagation issue in Blip2 loading, the other is a model output mismatch in RTDetrV2; not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "The second is only an isolated reproduction of another issue number; there is no evidence it targets the same Qwen3 auto-device-map cudaErrorAssert." + }, + { + "left": "issue:43688", + "right": "issue:45704", + "accept": false, + "reason": "Aux-loss normalization in MoE models and a T5 RMSNorm memory leak are separate implementation defects." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "MLukeTokenizer task AttributeError and FastSpeech2ConformerTokenizer loading failure affect different tokenizers and failure points." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Generic custom-model initialization breakage is not the same as FSDP CPU RAM-efficient loading being broken." + }, + { + "left": "issue:43688", + "right": "issue:45701", + "accept": false, + "reason": "Aux-loss normalization and a generic tokenization-change report do not describe the same underlying defect." + }, + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "Different tokenizer classes and different symptoms: empty decode output vs task-time AttributeError." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "PyTorch import error and Trainer broadcast TypeError are unrelated environment/runtime problems." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "Offline hub lookup in mistral regex patch is a different code path from tokenizer loading failure for EMBEDDIA/est-roberta." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Video-input regression for several VLMs and trainer.evaluate() failing after train() are unrelated." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are different domains and bugs." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "Missing PyTorch import and MacOS bus error in CLIP loading are not the same issue." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "AutoModel import failure and token embedding resize reinitialization are unrelated." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Flash-attention generation bug in Qwen3.5 and tokenizer codec/regex regression in Kimi-K2.5 are separate." + }, + { + "left": "issue:43577", + "right": "issue:45588", + "accept": false, + "reason": "Blip2 dtype handling and flash_attention.py crashing on s_aux=None affect different subsystems." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "Tokenizer output changes across versions and an isolated reproduction of another issue are not enough to conclude the same bug." + }, + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "Blip2 dtype propagation and an MPS attention correctness issue are distinct." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and a MobileLLM-125M load key error are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "Tokenizer special-token registration and DiaConfig custom token IDs lost on save/load are different defects." + }, + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Undefined video-path helper variables in multimodal token counting and Qwen2.5-VL rope index scaling are different bugs." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Compiled-forward generation inconsistency and flash-attention generation failure are separate generation-path issues." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "Video-input regression and num_labels propagation into text config are unrelated Qwen3.5 problems." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to MLukeTokenizer task AttributeError." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "NoneType broadcast TypeError in Trainer and num_input_tokens_seen not updating are different Trainer bugs." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Legacy field loading failure in BeitForSemanticSegmentation and packed-weight unpacking in BitNet loading are different model-load paths." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Legacy-field pretrained loading and Bamba torch.compile SDPA failure are unrelated." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "gguf_file handling for AutoTokenizer/AutoModelForCausalLM and inability to load a specific tokenizer are different loading issues." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "Trainer _gpu_broadcast_one NoneType failure and Qwen2-VL multi-GPU training are not the same problem." + }, + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "Model preset loading with a legacy field and OmDet-Turbo processor/model input-size mismatch are separate." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "CUDA assert on auto device mapping and compiled-forward generation inconsistency are different runtime failures." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Qwen3-VL missing pad_token_id and StableLmConfig missing pad_token_idx are similar naming issues but on different configs and code paths." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Llama4Config missing pad_token_id and StableLmConfig missing pad_token_idx are separate model config regressions, not one duplicate bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 19, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:33357", + "issue:33666", + "issue:36010", + "issue:36331", + "issue:39692", + "issue:43257", + "issue:43262", + "issue:43278", + "issue:43295", + "issue:43299", + "issue:43493", + "issue:43504", + "issue:43540", + "issue:43572", + "issue:43575", + "issue:43606", + "issue:43646", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43742", + "issue:43756", + "issue:43824", + "issue:43825", + "issue:43844", + "issue:43927", + "issue:44291", + "issue:44361", + "issue:44423", + "issue:44466", + "issue:44488", + "issue:44589", + "issue:44625", + "issue:44734", + "issue:44821", + "issue:44843", + "issue:44849", + "issue:44913", + "issue:44964", + "issue:44977", + "issue:45083", + "issue:45084", + "issue:45200", + "issue:45229", + "issue:45357", + "issue:45405", + "issue:45520", + "issue:45584" + ], + "soft_pairs": [ + "issue:43278|issue:43688", + "issue:44849|issue:45357", + "issue:43756|issue:44913", + "issue:43493|issue:43825", + "issue:43742|issue:44821", + "issue:43262|issue:45084", + "issue:33666|issue:36010", + "issue:43540|issue:43716", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:43257|issue:43299", + "issue:43278|issue:43844", + "issue:39692|issue:43493", + "issue:43493|issue:43844", + "issue:43720|issue:44361", + "issue:44964|issue:45405", + "issue:33666|issue:36331", + "issue:44488|issue:44843", + "issue:43540|issue:44977", + "issue:43756|issue:45083", + "issue:45200|issue:45229", + "issue:44625|issue:45584", + "issue:43824|issue:44589", + "issue:43646|issue:44466", + "issue:44589|issue:45520", + "issue:39692|issue:43295", + "issue:44423|issue:44734", + "issue:33357|issue:33666", + "issue:43572|issue:44291", + "issue:43504|issue:43606", + "issue:43575|issue:45405", + "issue:43927|issue:44361" + ], + "prepared_review_unit_hash": "c53fb539f95a512eb6a0c34f804b1cd37c43bb67b92baaef1b65c2c3348f09ea", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37401, + "estimated_input_tokens": 9351, + "estimated_eval_tokens": 18958 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs share broad themes like loading, multimodal models, or config regressions, but they describe different concrete bugs and code paths. None of the proposed duplicate pairs should be merged. No PRs are present.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "Different bugs: BF16/FP32 dtype drift between training and eval vs auxiliary-loss normalization in MoE models." + }, + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "Both mention Qwen3.5, but one is an output_hidden_states bug and the other is incorrect visual-encoder key serialization." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Different models and failures: Smollm3 RoPE-layer dropping vs GPTNeoX rotary_pct not persisting on reload." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "SigLIP2 implementation discrepancy is unrelated to the pipeline() translation-task error message regression." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "Different loading problems: MobileLLM key error vs AutoImageProcessor URL loading." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "Audio chat-template sampling-rate default bug is unrelated to the non-template-node compile error." + }, + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Qwen2-VL multi-GPU training is unrelated to the GenerationMixin import failure." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "Different model families and code paths: Qwen3OmniMoe video processing vs Mistral-3 image-preprocessor dtype mismatch." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "BitNet packed-weight unpacking during load is unrelated to DiaConfig losing custom token IDs on save/load." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU offload device mismatch for bark-small is a different issue than DiaConfig token-ID persistence." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both involve Qwen MOE models, but one is an accelerate+deepspeed conversion problem and the other is a Transformers 5.0 load regression." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "Embedding dtype mismatch is not the same as abnormal gradient growth under HfDeepSpeedConfig/ZeRO-3." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "Documentation example mistakes are not the same as the SigLIP2 HF-vs-JAX implementation discrepancy." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 fidelity mismatch is unrelated to the ZeRO-3 gradient issue." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "BitNet accelerate-loading bug is unrelated to MLukeTokenizer AttributeError on tasks." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "Model-load failure for Phi-4 multimodal is unrelated to the unreleased MIN_PEFT_VERSION bump." + }, + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Qwen2-VL multi-GPU training and CustomTrainer.compute_loss signature break are different problems." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Loading cjvt/sleng-bert is unrelated to the offline-mode failure in _patch_mistral_regex." + }, + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Different Qwen3 issues: video-input processing error vs flash-attention generation bug." + }, + { + "left": "issue:43756", + "right": "issue:45083", + "accept": false, + "reason": "Smollm3 RoPE-layer count bug does not match the qwen3_omni_moe helper-function behavior issue." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "Gemma 4 mm_token_type_ids defaulting bug is unrelated to multi-GPU inference CUDA OOM." + }, + { + "left": "issue:44625", + "right": "issue:45584", + "accept": false, + "reason": "Qwen3.5 num_labels propagation and Whisper empty-transcription handling are distinct bugs." + }, + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "Qwen2.5-VL import error is unrelated to the Float8 storage-object lookup failure." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Custom model initialization regression is different from inconsistent lm_head.weight serialization." + }, + { + "left": "issue:44589", + "right": "issue:45520", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to the flash_attn KeyError on Python 3.13." + }, + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "Docs example errors are not the same as the regression around processor.tokenizer access and image inputs to tokenizer." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Continuous-batching multimodal crash from str.to() is a different failure than KV-cache continuation tensor indexing." + }, + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "MacOS CLIP bus error and Qwen2-VL multi-GPU training are unrelated." + }, + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "StableLmConfig pad_token_idx loss is unrelated to init_empty_weights/_is_hf_initialized argument handling." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "BEiT legacy-field load failure is a different bug from bark-small CPU offload device mismatch." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "Tensor-parallel OOM for Qwen2-57B is unrelated to the unreleased PEFT version bump." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "DiaConfig save/load token-ID corruption is not the same as MLukeTokenizer task-time AttributeError." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 20, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:15354", + "issue:29942", + "issue:30333", + "issue:37428", + "issue:38175", + "issue:39401", + "issue:43232", + "issue:43278", + "issue:43295", + "issue:43334", + "issue:43493", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43582", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43844", + "issue:43957", + "issue:43994", + "issue:44112", + "issue:44315", + "issue:44464", + "issue:44493", + "issue:44521", + "issue:44554", + "issue:44568", + "issue:44610", + "issue:44792", + "issue:44849", + "issue:44898", + "issue:44964", + "issue:44977", + "issue:45072", + "issue:45084", + "issue:45198", + "issue:45200", + "issue:45290", + "issue:45325", + "issue:45356", + "issue:45405", + "issue:45412", + "issue:45446", + "issue:45520", + "issue:45584", + "issue:45701" + ], + "soft_pairs": [ + "issue:43504|issue:43653", + "issue:44521|issue:45290", + "issue:44464|issue:45584", + "issue:43994|issue:44493", + "issue:44792|issue:45405", + "issue:43278|issue:43701", + "issue:43606|issue:44112", + "issue:29942|issue:30333", + "issue:44554|issue:45446", + "issue:43844|issue:44964", + "issue:44568|issue:45701", + "issue:43540|issue:45200", + "issue:45084|issue:45520", + "issue:43582|issue:44977", + "issue:43232|issue:44849", + "issue:15354|issue:38175", + "issue:37428|issue:43957", + "issue:45072|issue:45198", + "issue:39401|issue:44568", + "issue:44315|issue:44849", + "issue:43334|issue:43525", + "issue:44554|issue:45356", + "issue:43295|issue:44610", + "issue:45200|issue:45325", + "issue:43493|issue:43701", + "issue:44898|issue:45412" + ], + "prepared_review_unit_hash": "f6567a3845a819456f775998384c9b61689ac42295ea8e0421a1a8fefdc1ec96", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 26, + "serialized_chars": 35029, + "estimated_input_tokens": 8758, + "estimated_eval_tokens": 17772 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is a collection of mostly unrelated transformer bugs spanning tokenization, multimodal chat templates, generation, dtype/device issues, and CI failures. I don\u2019t see any soft pair that appears to be the same concrete underlying bug/change.", + "confidence": 0.95, + "canonical_issue_reason": "issue:44521 is the most active open issue here and has the strongest cross-reference signal, so it works best as a cluster anchor even though the items are not true duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 is the best representative issue because it has the highest discussion/inbound activity and a concrete reproducible regression report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Both are loading/token-related, but one is a Beit legacy-field load failure and the other is a BigBirdTokenizer special-token decode bug; different code paths." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both involve apply_chat_template, but one is multimodal assistant mask generation and the other is a tool-call tokenization crash; not the same bug." + }, + { + "left": "issue:44464", + "right": "issue:45584", + "accept": false, + "reason": "Different generation failures in different models/features: compiled-forward chunked generation vs Whisper empty-transcription alignment handling." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "A SigLIP2 output regression is unrelated to the broad 'unexpected key with position id' issue, which points to a different model/state handling problem." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "A janus image-generation test failure and a PEFT version-pin issue are unrelated." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "Embedding dtype drift in eval and a resume_from_checkpoint key mismatch are separate behaviors with different causes." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Both mention devices, but one is a real CPU-offload mismatch and the other is a stale CI test; not the same underlying defect." + }, + { + "left": "issue:29942", + "right": "issue:30333", + "accept": false, + "reason": "FlashAttention2 test failures and MLflow job-status reporting are unrelated subsystems." + }, + { + "left": "issue:44554", + "right": "issue:45446", + "accept": false, + "reason": "An MPS attention correctness bug and a PyTorch-version import guard in flex_attention are different issues." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "ZeRO-3 gradient growth and a latest-transformers model-load failure are unrelated." + }, + { + "left": "issue:44568", + "right": "issue:45701", + "accept": false, + "reason": "A specific mdeberta BOS/EOS regression is not the same as a vague tokenizer-version-change report." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Video-input processing in Qwen3OmniMoe and Gemma 4 mm_token_type_ids defaults are different multimodal bugs." + }, + { + "left": "issue:45084", + "right": "issue:45520", + "accept": false, + "reason": "A compile-time template-node error and a Python 3.13 flash_attn import KeyError are unrelated." + }, + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "Apple Silicon warmup TypeError and Qwen3.5 flash-attention generation problems do not share the same code path." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "These are different generation-related regressions: sync_gpus kwargs handling vs output_hidden_states behavior." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "TorchScript GeneratorExp export failure is unrelated to SigLIP2 zero-probability outputs." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "A missing flash attention helper import is unrelated to meta-device model loading regressions." + }, + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "bfloat16 dtype mismatches in inference and Wav2Vec2 save/tokenization failures are different bugs." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Wrong offset_mapping in Qwen3 tokenizer and missing BOS/EOS insertion in mdeberta are separate tokenizer defects." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Liger Kernel not applied during model_init is unrelated to the Qwen3.5 hidden-states regression." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": false, + "reason": "Both are pad_token_id attribute errors, but they affect different model configs and load paths; not enough to treat as the same bug." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "An MPS attention correctness issue and a Kimi-K2.5 tokenizer codec regression are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Processor.tokenizer access breakage and OmDet-Turbo image-size mismatch are different model/processor integration failures." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Gemma 4 text-only mm_token_type_ids handling and Qwen2.5-VL rope-index scaling are separate multimodal position-id issues." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "A SigLIP2 implementation discrepancy is not the same as a checkpoint resume key mismatch." + }, + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Perceiver interpolation failure and RT-DETR memory reclamation are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 21, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:37428", + "issue:38175", + "issue:42915", + "issue:43278", + "issue:43295", + "issue:43388", + "issue:43425", + "issue:43450", + "issue:43493", + "issue:43526", + "issue:43572", + "issue:43575", + "issue:43653", + "issue:43819", + "issue:43824", + "issue:43827", + "issue:43844", + "issue:43856", + "issue:43873", + "issue:43901", + "issue:43957", + "issue:43975", + "issue:43994", + "issue:44112", + "issue:44206", + "issue:44360", + "issue:44442", + "issue:44462", + "issue:44464", + "issue:44466", + "issue:44514", + "issue:44521", + "issue:44561", + "issue:44610", + "issue:44779", + "issue:44792", + "issue:44857", + "issue:44936", + "issue:44987", + "issue:45083", + "issue:45127", + "issue:45198", + "issue:45245", + "issue:45290", + "issue:45381", + "issue:45412", + "issue:45563", + "issue:45584" + ], + "soft_pairs": [ + "issue:44466|issue:45127", + "issue:44792|issue:45563", + "issue:37428|issue:43824", + "issue:43873|issue:44464", + "issue:43572|issue:44987", + "issue:43388|issue:43526", + "issue:43572|issue:43957", + "issue:43844|issue:44936", + "issue:43824|issue:43975", + "issue:43295|issue:43450", + "issue:44514|issue:44521", + "issue:38175|issue:45412", + "issue:43295|issue:43873", + "issue:42915|issue:43824", + "issue:43295|issue:43994", + "issue:44360|issue:44561", + "issue:44442|issue:45198", + "issue:38175|issue:43493", + "issue:43278|issue:43856", + "issue:43827|issue:43901", + "issue:43653|issue:44112", + "issue:44206|issue:45584", + "issue:43450|issue:43493", + "issue:45127|issue:45245", + "issue:43450|issue:43575", + "issue:43819|issue:45083", + "issue:44561|issue:44779", + "issue:45290|issue:45381", + "issue:37428|issue:43425", + "issue:44462|issue:44561", + "issue:44610|issue:44857", + "issue:43819|issue:44112" + ], + "prepared_review_unit_hash": "8900d69b755feb63dabd49649524c143250a9cc91ada5dd59bf094b15aa70ebe", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37287, + "estimated_input_tokens": 9322, + "estimated_eval_tokens": 18900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the soft pairs share broad topical similarity but not the same concrete bug/change, so none should be merged as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue stands out; the items span unrelated regressions across import errors, tokenizers, chat templates, processors, training, docs, and memory/loading behavior.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good global representative because the cluster is not a single underlying defect but many distinct problems with only superficial similarity.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "Different bugs: one is about inconsistent lm_head.weight serialization, the other about LoRA merge collapse with extended vocabulary and tied embeddings." + }, + { + "left": "issue:44792", + "right": "issue:45563", + "accept": false, + "reason": "Different symptoms and code paths: janus image-generation test failure vs a stale num_return_sequences warning in paged generate()." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and likely different regressions; not the same underlying fix." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization offloading behavior and chunked generation with compiled forward are unrelated concrete problems." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "Version-related loading failures, but with different models and failure modes; too broad to treat as one bug." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Both involve labels, but one is a gather_for_metrics truncation issue and the other is a BeitImageProcessorFast reduce_labels bug." + }, + { + "left": "issue:43572", + "right": "issue:43957", + "accept": false, + "reason": "Different regressions: missing StableLmConfig field vs meta-device loading failures in some models." + }, + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "Training instability under ZeRO-3 is unrelated to trainer.evaluate() failing after train()." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Qwen2.5-VL import failure and DeepSeek detokenization regression are separate issues." + }, + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Processor/tokenizer regression and batched video processor shape bug are different code paths." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both are apply_chat_template-related, but one is a batching crash with padding=False and the other is incorrect assistant masks for multimodal inputs." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 zero probabilities and RT-DETR memory not releasing are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "Custom processor/tokenizer regression vs quantization offloading issue; no shared concrete bug." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "FineGrainedFP8Config failure in Qwen3Moe is unrelated to a missing Qwen2.5-VL export." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Different regressions in different models: processor/tokenizer access vs SigLIP2 inference correctness." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "DSA indexer activation issue and removal of is_torch_fx_available breaking remote-code models are unrelated." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "AutoTokenizer failing for FastSpeech2ConformerTokenizer and Wav2Vec2 save/tokenization failure are distinct tokenizer problems." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Embedding dtype drift in eval and Qwen3 MoE memory inefficiency are different problems." + }, + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "Both are docs issues, but they cover different removed/deprecated pipeline APIs and are not the same change." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and a GraniteSpeech CI device-override test are unrelated." + }, + { + "left": "issue:44206", + "right": "issue:45584", + "accept": false, + "reason": "LASR feature extractor center-arg crash and Whisper empty-transcription failure after align_special_tokens are different audio bugs." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "Video batch shape bug and SigLIP2 HF-vs-JAX discrepancy are separate issues." + }, + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "LoRA merge collapse with tied embeddings and a categories-count runtime error are unrelated." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Batched video processor shape error and Qwen2-57B-A14B-Instruct TP OOM are unrelated." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "DAC.from_latents mismatch and a Qwen3 Omni MoE feature-length helper issue are different code paths." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "Removal of is_torch_fx_available and DeepSeek tokenizer regression are unrelated." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Chat-template tool-call crash and Qwen2.5-VL video vision_position_ids bug are distinct multimodal issues." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Missing flash-attention helper import and Torch 2.10 incompatibility are not the same bug." + }, + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "Tokenizer.json lookup behavior and trust_remote_code breakage from is_torch_fx_available removal are unrelated." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "OmDet-Turbo input-size mismatch and LwDetrImageLoss AMP crash are different failures." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "DAC latent conversion mismatch and GraniteSpeech CI test failure do not share a concrete underlying defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 22, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:32090", + "issue:37428", + "issue:38175", + "issue:39692", + "issue:40444", + "issue:43262", + "issue:43278", + "issue:43335", + "issue:43450", + "issue:43504", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43611", + "issue:43653", + "issue:43720", + "issue:43723", + "issue:43819", + "issue:43825", + "issue:43931", + "issue:44112", + "issue:44292", + "issue:44351", + "issue:44448", + "issue:44493", + "issue:44554", + "issue:44568", + "issue:44743", + "issue:44779", + "issue:44855", + "issue:44857", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45200", + "issue:45229", + "issue:45310", + "issue:45381", + "issue:45459", + "issue:45478", + "issue:45491", + "issue:45520", + "issue:45588", + "issue:45593", + "issue:45712" + ], + "soft_pairs": [ + "issue:44857|issue:45593", + "issue:43262|issue:43450", + "issue:43278|issue:43825", + "issue:43606|issue:45083", + "issue:45200|issue:45588", + "issue:44448|issue:44743", + "issue:45310|issue:45478", + "issue:37428|issue:44351", + "issue:29942|issue:32090", + "issue:43335|issue:43550", + "issue:43611|issue:43931", + "issue:39692|issue:43526", + "issue:43819|issue:44493", + "issue:45200|issue:45381", + "issue:43335|issue:43653", + "issue:29127|issue:32090", + "issue:44292|issue:45520", + "issue:44855|issue:45083", + "issue:43335|issue:45083", + "issue:44448|issue:44779", + "issue:43335|issue:43504", + "issue:39692|issue:44493", + "issue:43550|issue:44112", + "issue:38175|issue:44493", + "issue:44977|issue:45229", + "issue:43335|issue:45072", + "issue:45459|issue:45712", + "issue:40444|issue:43526", + "issue:43723|issue:44554", + "issue:43335|issue:45593", + "issue:43720|issue:44112", + "issue:44568|issue:45491" + ], + "prepared_review_unit_hash": "4893f886797557ded8acb589c8cb11294498b33c3967d3abc36fb83bb9e3b9d7", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 32, + "serialized_chars": 36962, + "estimated_input_tokens": 9241, + "estimated_eval_tokens": 18738 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab-bag of unrelated Transformers issues across many model families and subsystems. The only clear duplicate is the repeated Qwen3.5 MoE `from_pretrained` regression; the rest are only loosely similar by broad theme and should stay separate.", + "confidence": 0.95, + "canonical_issue_reason": "Issue:45310 is the best canonical issue because it is the cleaner, earlier report of the Qwen3.5 MoE `from_pretrained` regression, and it is an exact duplicate of issue:45478.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue:45310 is the strongest representative issue in the cluster since it has a clear, concrete failure mode and matches another report verbatim.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44857", + "right": "issue:45593", + "accept": false, + "reason": "Both are vision/detection losses, but they describe different failures: AMP/CUDA crash vs missing auxiliary losses." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Different media pipelines and different bugs: audio chat-template sampling rate vs batched video output shape." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "One is a dtype regression in evaluation, the other is a v5 pipeline error-message issue; no shared code-path bug." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "Completely different components: CPU offload device mismatch vs a Qwen3 Omni MoE feature-length helper behavior." + }, + { + "left": "issue:45200", + "right": "issue:45588", + "accept": false, + "reason": "Different models and failures: Gemma4 token-type defaults vs flash-attention crash on sink-less models." + }, + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Both affect generation behavior, but the concrete bugs differ: Pegasus output drift vs Qwen3.5 recurrent-state reset." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Same exact reported bug: Qwen3.5 MoE `from_pretrained` failure on transformers>=5.4.0." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they involve different symbols and different failure causes." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Flash Attention 2 test failures and a Trainer `_gpu_broadcast_one` NoneType error are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "Different models and paths: SwitchTransformers sparse-layer config bug vs Bamba torch.compile/SDPA failure." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "Both are loading problems, but one concerns `base_model_prefix` and the other a specific Qwen3-VL weight-shape mismatch." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "SigLIP2 doc-example issues and BeitImageProcessorFast label reduction are unrelated processor/model bugs." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "Different code paths: DAC latent decoding mismatch vs widespread unexpected position-id key warnings." + }, + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Both involve multimodal inputs, but one is token-type defaults and the other is incorrect vision position IDs." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "Different tokenizer/model bugs: sparse-layer creation vs BigBird mask token registration." + }, + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "A model-specific layoutlmv3 error message issue is not the same bug as Trainer GPU broadcast failing on NoneType." + }, + { + "left": "issue:44292", + "right": "issue:45520", + "accept": false, + "reason": "Different runtime failures: Qwen-3-8B-NVFP4 execution vs a Python 3.13 `flash_attn` import key error." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "DebertaV2 Python 3.13 parsing/import issue is unrelated to the Qwen3 Omni MoE helper function behavior." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "Both touch model setup, but one is SwitchTransformers config creation and the other is BEiT pretrained loading legacy-field handling." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "A SigLIP2 documentation/example regression is not the same as a broad position-id key issue across many models." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "A model compilation/SDPA bug and a stale CI device-override test are different problems." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "Zero probabilities in SigLIP2 are unrelated to unexpected position-id keys." + }, + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Qwen3.5 flash-attention generation failure and Gemma4 multi-GPU OOM are distinct performance/behavior issues." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "Different model families and symptoms: SwitchTransformers sparse-layer config vs bfloat16 inference dtype mismatches." + }, + { + "left": "issue:45459", + "right": "issue:45712", + "accept": false, + "reason": "Tokenizer protobuf error hiding and dummy-class cleanup are unrelated repository issues." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Qwen2.5-VL iterable dataset multi-image failure is unrelated to BeitImageProcessorFast label reduction." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "Tokenizer loading in v5 and an MPS attention correctness issue are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:45593", + "accept": false, + "reason": "SwitchTransformers sparse-layer config bug and D-FINE auxiliary-loss behavior do not share the same code-path." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "BitNet packed-weight loading and a GraniteSpeech CI device override test are different failures." + }, + { + "left": "issue:44568", + "right": "issue:45491", + "accept": false, + "reason": "Tokenizer special-token handling and Gemma3 NaN embeddings under sliding-window attention are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded in the packet: the cluster is indeed dominated by disparate issue titles across unrelated subsystems, and the Qwen3.5 MoE `from_pretrained` pair is the only clear duplicate based on identical titles and chronology. The soft-edge rejections are conservative overall and do not overstate linkage." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 23, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:25251", + "issue:29942", + "issue:32090", + "issue:33357", + "issue:34689", + "issue:35141", + "issue:37428", + "issue:41720", + "issue:42915", + "issue:42994", + "issue:43065", + "issue:43066", + "issue:43278", + "issue:43329", + "issue:43381", + "issue:43412", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43873", + "issue:43927", + "issue:43976", + "issue:43994", + "issue:44186", + "issue:44448", + "issue:44451", + "issue:44462", + "issue:44466", + "issue:44493", + "issue:44743", + "issue:44857", + "issue:44977", + "issue:44987", + "issue:45072", + "issue:45083", + "issue:45198", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45406", + "issue:45412", + "issue:45440", + "issue:45459", + "issue:45520", + "issue:45563", + "issue:45588", + "issue:45698" + ], + "soft_pairs": [ + "issue:34689|issue:35141", + "issue:44448|issue:44977", + "issue:43606|issue:44857", + "issue:44857|issue:45072", + "issue:43066|issue:44462", + "issue:37428|issue:44987", + "issue:43927|issue:45698", + "issue:45372|issue:45588", + "issue:44977|issue:45563", + "issue:43701|issue:45083", + "issue:43873|issue:44493", + "issue:43550|issue:45083", + "issue:43976|issue:45440", + "issue:43412|issue:45588", + "issue:29942|issue:35141", + "issue:43873|issue:45229", + "issue:45245|issue:45563", + "issue:43278|issue:43329", + "issue:32090|issue:33357", + "issue:45372|issue:45406", + "issue:41720|issue:45229", + "issue:43065|issue:43994", + "issue:45372|issue:45459", + "issue:43065|issue:45412", + "issue:44448|issue:44466", + "issue:44451|issue:45520", + "issue:25251|issue:32090", + "issue:44857|issue:45588", + "issue:43653|issue:44743", + "issue:43278|issue:43381", + "issue:43994|issue:45412", + "issue:42915|issue:42994", + "issue:44186|issue:45198" + ], + "prepared_review_unit_hash": "6205f2b30e277a15cc26d0234eec7189de6b5085e8cb8bf75bb5b50676e7cd96", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37489, + "estimated_input_tokens": 9373, + "estimated_eval_tokens": 19002 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are superficially similar in wording but describe different models, features, and failure modes; I found no strong duplicate pairs. The most central issue is the open offloading/quantization report.", + "confidence": 0.74, + "canonical_issue_reason": "issue:43873 is the most central/open item in the set, with the most inbound references and discussion; its offloading/quantization scope is broad enough to act as the cluster representative.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43873 is the best representative issue because it is open, broadly scoped, and appears to be the most referenced problem in the group.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Different bug classes: model loading regression for Llama 3.2 vision vs. output embedding reinitialization after resize/tied weights." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Both mention output differences, but one is Pegasus v4/v5 behavior and the other is Qwen3.5 flash-attention generation; different code paths." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "Device-mismatch offload failure for bark-small vs. AMP float16 crash in LwDetrImageLoss; unrelated symptoms and subsystems." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "One is a loss/AMP CUDA crash, the other is a dtype mismatch during bfloat16 inference in different models." + }, + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Tokenizer decoder type mismatch in v5 vs. AutoTokenizer ignoring tokenizer.json; related area, but not the same underlying bug." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "Different import/version issues: missing flash attention symbol vs. failing model load for physical-intelligence/fast." + }, + { + "left": "issue:43927", + "right": "issue:45698", + "accept": false, + "reason": "Both involve save/load and custom module handling, but one is config token IDs causing generation IndexError and the other is loading the wrong custom module after save_pretrained." + }, + { + "left": "issue:45372", + "right": "issue:45588", + "accept": false, + "reason": "Gemma 4 processor import failure from mistral_common vs. flash_attention.py crashing on s_aux=None; different failure points." + }, + { + "left": "issue:44977", + "right": "issue:45563", + "accept": false, + "reason": "Qwen3.5 flash-attention generation bug is unrelated to a stale warning in paged generate()." + }, + { + "left": "issue:43701", + "right": "issue:45083", + "accept": false, + "reason": "Checkpoint resume key mismatch vs. unexpected helper output lengths in qwen3_omni_moe; unrelated bugs." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Offloading/quantization behavior vs. unexpected position-id key warnings; not the same code-path problem." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "torch.compile with SDPA in Bamba vs. a feature-extraction length helper bug in qwen3_omni_moe; different models and failures." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Python version compatibility issue vs. DeepseekV3MoE divergence from remote implementation; unrelated." + }, + { + "left": "issue:43412", + "right": "issue:45588", + "accept": false, + "reason": "Executorch export issues and flash-attention sinkless-model crash are distinct export/runtime paths." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "Flash Attention 2 test failures are unrelated to embedding reinitialization after resize." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Quantization/offloading bug vs. multi-GPU CUDA OOM for Gemma4; different resource problems." + }, + { + "left": "issue:45245", + "right": "issue:45563", + "accept": false, + "reason": "Category-cardinality runtime error is unrelated to a generate() warning." + }, + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Embedding dtype drift between train/eval vs. multimodal token counting bug in video branch; different components." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Trainer broadcast NoneType error vs. MacOS bus error with CLIP; different platforms and failure modes." + }, + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Both affect Gemma 4, but one is a processor import dependency issue and the other is a missing _tokenizer attribute in transformers serve." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Auto device mapping CUDA assert for Qwen3 vs. Gemma4 multi-GPU OOM; not the same bug." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Sam3PixelDecoder dummy Conv2d issue vs. SigLIP2 nonsensical AutoModel/pipeline results; different models and causes." + }, + { + "left": "issue:45372", + "right": "issue:45459", + "accept": false, + "reason": "Gemma 4 processor import regression vs. protobuf-related tokenizer error masking; unrelated." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder is not the same as RT-DETR memory not being released." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Pegasus output changes across versions vs. lm_head.weight serialization inconsistency; both are version-related but different underlying defects." + }, + { + "left": "issue:44451", + "right": "issue:45520", + "accept": false, + "reason": "ScandiBERT loading problem vs. Python 3.13 flash_attn import KeyError; unrelated loader vs. optional-dependency issue." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Pipeline top_k nesting bug is unrelated to Trainer _gpu_broadcast_one NoneType failures." + }, + { + "left": "issue:44857", + "right": "issue:45588", + "accept": false, + "reason": "AMP loss crash and flash_attention sinkless-model crash are different runtime failures." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "Tokenizer special-token registration bug vs. recurrent state reset in modular_qwen3_5; different subsystems." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "Embedding dtype change in eval vs. gradient checkpointing disallowed in eval mode; not the same issue." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 wrong outputs vs. RT-DETR memory leak; separate problems." + }, + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Qwen3Moe FP8 config failure vs. quantized model saving failure; both quantization-related but not the same concrete bug." + }, + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "LayoutLMv2Tokenizer NER/padding crash vs. Wav2Vec2 save_pretrained/tokenization failure; unrelated tokenizer behaviors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 24, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:30064", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:35141", + "issue:36010", + "issue:39692", + "issue:42915", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43278", + "issue:43334", + "issue:43381", + "issue:43388", + "issue:43421", + "issue:43526", + "issue:43531", + "issue:43653", + "issue:43701", + "issue:43756", + "issue:43824", + "issue:43931", + "issue:44186", + "issue:44206", + "issue:44265", + "issue:44279", + "issue:44361", + "issue:44442", + "issue:44464", + "issue:44479", + "issue:44610", + "issue:44857", + "issue:44871", + "issue:44977", + "issue:45072", + "issue:45103", + "issue:45198", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45412", + "issue:45440", + "issue:45491", + "issue:45593" + ], + "soft_pairs": [ + "issue:43421|issue:45491", + "issue:44361|issue:45198", + "issue:43756|issue:45440", + "issue:44206|issue:44479", + "issue:44265|issue:45198", + "issue:44871|issue:44977", + "issue:43257|issue:43278", + "issue:29942|issue:30064", + "issue:44610|issue:45412", + "issue:45440|issue:45593", + "issue:29127|issue:34567", + "issue:45103|issue:45372", + "issue:32090|issue:33290", + "issue:33357|issue:39692", + "issue:43824|issue:44279", + "issue:43381|issue:43701", + "issue:45072|issue:45229", + "issue:43334|issue:43531", + "issue:43653|issue:44442", + "issue:43526|issue:44186", + "issue:29942|issue:36010", + "issue:44464|issue:45491", + "issue:29127|issue:33357", + "issue:44977|issue:45245", + "issue:42915|issue:43931", + "issue:29127|issue:35141", + "issue:43232|issue:43388", + "issue:43526|issue:44857", + "issue:43262|issue:44206", + "issue:44442|issue:45072" + ], + "prepared_review_unit_hash": "a9a86d85228a5dbe3eeb5a0868ab06d8a24021afbccb6bbf58e9ed6e55838fad", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36814, + "estimated_input_tokens": 9204, + "estimated_eval_tokens": 18664 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the issues span unrelated tokenizer, generation, vision, audio, training, and dtype/precision bugs. I do not see a credible duplicate core, and there are no PRs to consolidate.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out; the items are not the same underlying bug and should not be merged as duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative because the cluster is not a coherent duplicate set; the similarity signal appears to be broad Transformers/topic overlap rather than shared root cause.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "Different problems: runtime special-token post-processing update vs Gemma3 NaN embeddings from sliding-window/all-padding behavior." + }, + { + "left": "issue:44361", + "right": "issue:45198", + "accept": false, + "reason": "Different models and failures: MLukeTokenizer task AttributeError vs Wav2Vec2 save_pretrained/tokenization breakage." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Both are model-specific, but they describe different bugs: Smollm3 RoPE layer dropping vs DeepSeekV3 implementation divergence." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Unsupported feature argument in LASR extractor vs Qwen video-input regression; unrelated code paths." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is unrelated to Wav2Vec2 tokenizer/save_pretrained issues." + }, + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "Gemma eos_token_id config mismatch is a different bug from Qwen3.5 flash-attention generation failures." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "Accelerate/deepspeed weight conversion issue vs embedding dtype drift between train and eval; different symptoms and causes." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention 2 test failures are unrelated to void segmentation-map processing in an image processor." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor size mismatch vs RT-DETR memory not being released; not the same defect." + }, + { + "left": "issue:45440", + "right": "issue:45593", + "accept": false, + "reason": "DeepSeekV3 native-vs-remote divergence and D-FINE auxiliary-loss omission are distinct model bugs." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and TrainerState token-count updating are unrelated issues." + }, + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "auto_docstring annotation handling vs missing ReasoningEffort import; different subsystems." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "NoneType broadcast error in Trainer vs deepspeed/adafactor OOM; no shared root cause." + }, + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "MacOS bus error with CLIP and SigLIP2 doc/example mismatch are different bugs." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "Specific Qwen2_5_VL import failure is not the same as a generic transformers dependency issue." + }, + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Gradient checkpointing in eval mode and resume_from_checkpoint key mismatch are unrelated training bugs." + }, + { + "left": "issue:45072", + "right": "issue:45229", + "accept": false, + "reason": "bf16 dtype mismatch in inference is unrelated to Gemma4 multi-GPU OOM." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Missing pad_token_id on Qwen3-VL differs from Qwen3-MoE sliding_window behavior." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug is unrelated to AutoTokenizer loading FastSpeech2ConformerTokenizer." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and LayoutLMv2Tokenizer NER/padding crashes affect different components." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention 2 test regressions are unrelated to the GenerationMixin import error." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency is a different bug from Gemma3 sliding-window NaNs." + }, + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "LayoutLMv3 error messaging and CLIP MacOS bus error are unrelated." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention generation failure is unrelated to the category-count limit runtime error." + }, + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and Qwen3-VL weight-shape mismatch are different model-loading issues." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "LayoutLMv3 error clarity and embedding reinitialization on resize are unrelated." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwarg handling after sync_gpus and gather_for_metrics label truncation are different code paths." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "ImageProcessor label reduction bug vs CUDA float16 AMP crash in LwDetrImageLoss; unrelated." + }, + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sampling-rate default mismatch is unrelated to LASR feature-extractor argument handling." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer loading failure and bf16 inference dtype mismatch are not the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 25, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:33290", + "issue:34567", + "issue:34689", + "issue:35141", + "issue:39692", + "issue:41628", + "issue:42907", + "issue:43316", + "issue:43381", + "issue:43441", + "issue:43452", + "issue:43493", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43575", + "issue:43611", + "issue:43646", + "issue:43701", + "issue:43723", + "issue:43761", + "issue:43827", + "issue:43873", + "issue:43931", + "issue:43994", + "issue:44077", + "issue:44079", + "issue:44188", + "issue:44206", + "issue:44387", + "issue:44442", + "issue:44479", + "issue:44743", + "issue:44811", + "issue:44898", + "issue:44977", + "issue:45003", + "issue:45216", + "issue:45290", + "issue:45372", + "issue:45440", + "issue:45479", + "issue:45491", + "issue:45584", + "issue:45588", + "issue:45600", + "issue:45698" + ], + "soft_pairs": [ + "issue:43441|issue:45588", + "issue:43761|issue:44079", + "issue:43526|issue:43994", + "issue:45003|issue:45440", + "issue:43723|issue:45479", + "issue:44743|issue:44811", + "issue:43452|issue:45588", + "issue:43316|issue:43525", + "issue:43646|issue:44077", + "issue:43575|issue:43701", + "issue:45588|issue:45600", + "issue:44479|issue:45290", + "issue:44977|issue:45584", + "issue:43761|issue:44188", + "issue:43931|issue:45216", + "issue:43575|issue:44387", + "issue:42907|issue:45698", + "issue:43994|issue:44206", + "issue:43873|issue:45588", + "issue:39692|issue:45491", + "issue:34689|issue:43611", + "issue:33290|issue:34567", + "issue:44442|issue:44898", + "issue:43493|issue:43827", + "issue:33290|issue:34689", + "issue:43381|issue:43873", + "issue:33290|issue:35141", + "issue:43550|issue:44743", + "issue:41628|issue:45372", + "issue:44077|issue:44479" + ], + "prepared_review_unit_hash": "e5d3b97c019d126817440bb3cea49e897166e1ddcd31f80083c7279070405d2f", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36194, + "estimated_input_tokens": 9049, + "estimated_eval_tokens": 18354 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a set of superficially similar but distinct Transformers bugs; none of the soft pairs look like true duplicates. Several involve loading/saving, attention, tokenizer, or model-output issues, but the concrete failure modes differ.", + "confidence": 0.86, + "canonical_issue_reason": "issue:45588 is the most central concrete bug report in this cluster and appears in multiple soft-similarity links, but it should be treated as an anchor rather than a duplicate umbrella.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45588 is the best representative issue because it is specific, actionable, and the most hub-like item among the candidates, even though it does not unify the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43441", + "right": "issue:45588", + "accept": false, + "reason": "Different bugs: model-specific FlashAttention generation failure vs a flash_attention.py AttributeError on s_aux=None for sink-less models." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "Related area, but not the same bug: one is CLIPVisionModel.forward dropping hidden_states, the other is ModelOutput key assignment when a value was previously None." + }, + { + "left": "issue:43526", + "right": "issue:43994", + "accept": false, + "reason": "Different model/processor failures: BeitImageProcessorFast reduce_labels behavior vs SigLIP2 nonsensical outputs with AutoModel/pipeline." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "Unrelated issues: unsafe sys.modules access in modeling_utils vs DeepseekV3MoE diverging from remote implementation." + }, + { + "left": "issue:43723", + "right": "issue:45479", + "accept": false, + "reason": "Different failure modes: AutoTokenizer loading in v5 vs degenerate zero-loss classification with num_labels=1." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Separate code paths: recurrent state reset with cache/seq_len>1 vs Whisper batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:43452", + "right": "issue:45588", + "accept": false, + "reason": "Tokenizer/model loading with gguf_file is unrelated to the flash_attention integration crash." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Both are config/API inconsistencies, but they concern different models and different missing attributes; not the same underlying bug." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "Broadly about initialization, but not the same concrete defect: generic v5 custom model init breakage vs patchtsmixer-specific optional post_init handling." + }, + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "Different problems: tensor-parallel OOM while loading a model vs resume_from_checkpoint key mismatch." + }, + { + "left": "issue:45588", + "right": "issue:45600", + "accept": false, + "reason": "Both involve internals around loading/integration, but one is a flash_attention crash and the other is a stale auto_mappings reference causing CI breakage." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "Different regressions: video input handling for Qwen variants vs apply_chat_template crashing on tool-call assistant messages with no content." + }, + { + "left": "issue:44977", + "right": "issue:45584", + "accept": false, + "reason": "Both are generation-related, but one is a Qwen3.5 flash-attention issue and the other is Whisper empty-transcription handling after align_special_tokens." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "Different bugs: hidden_states not returned vs divergent attention kernels under torch.compile branching." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Both are load/save regressions, but one is weight-shape mismatch for a Qwen3-VL checkpoint and the other is incorrect checkpoint saving for Qwen3.5." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both mention OOM, but one is tensor-parallel model loading and the other is increased reserved memory under int4 quantization." + }, + { + "left": "issue:42907", + "right": "issue:45698", + "accept": false, + "reason": "Both mention save/load behavior, but one is saving dequantized models and the other is loading the wrong custom module after save_pretrained." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "Different concrete regressions: SigLIP2 wrong outputs vs LasrFeatureExtractor crashing on an unsupported center argument." + }, + { + "left": "issue:43873", + "right": "issue:45588", + "accept": false, + "reason": "Different subsystems and failure modes: quantization/offloading behavior vs flash_attention integration AttributeError." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "One is a docs/example problem for SigLIP2, the other is a runtime NaN bug in Gemma3 with sliding-window attention." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Both are model-loading regressions, but they involve different root causes and different affected paths/models." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "Unrelated trainer/runtime issues: Adafactor+DeepSpeed OOM vs TrainerState.num_input_tokens_seen not updating." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "Different tokenizer vs image-classification bugs; no shared concrete code path." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "Different concerns: SigLIP2 implementation mismatch vs docs still referencing removed pipeline() APIs." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "OOM during training with DeepSpeed is unrelated to Llama 3.2 Vision model loading breakage." + }, + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Eval-mode gradient checkpointing and quantization offloading are separate features with different failure modes." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Training OOM is unrelated to embedding reinitialization after resizing token embeddings." + }, + { + "left": "issue:43550", + "right": "issue:44743", + "accept": false, + "reason": "Different code paths: Bamba SDPA torch.compile failure vs Qwen3.5 recurrent state reset with cache." + }, + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "Both are import-related, but one is a direct AutoImageProcessor import removal issue and the other is a Gemma 4 processor dependency/import failure." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "PatchTSMixer post_init handling is unrelated to Qwen video-input regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 26, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:33290", + "issue:34689", + "issue:36010", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43295", + "issue:43316", + "issue:43381", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43643", + "issue:43673", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43761", + "issue:43825", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:44162", + "issue:44190", + "issue:44442", + "issue:44479", + "issue:44496", + "issue:44514", + "issue:44617", + "issue:44743", + "issue:44792", + "issue:44855", + "issue:44871", + "issue:44877", + "issue:44913", + "issue:45127", + "issue:45137", + "issue:45200", + "issue:45584", + "issue:45656" + ], + "soft_pairs": [ + "issue:43643|issue:44913", + "issue:44190|issue:44496", + "issue:43720|issue:44442", + "issue:43749|issue:43931", + "issue:43873|issue:44871", + "issue:33290|issue:45656", + "issue:44855|issue:45137", + "issue:34689|issue:36010", + "issue:43673|issue:44743", + "issue:43381|issue:43856", + "issue:43316|issue:44877", + "issue:43540|issue:44514", + "issue:44162|issue:44792", + "issue:42175|issue:43316", + "issue:43404|issue:43441", + "issue:43381|issue:43742", + "issue:41950|issue:43475", + "issue:43316|issue:43716", + "issue:25251|issue:28282", + "issue:16998|issue:22355", + "issue:43388|issue:43688", + "issue:43295|issue:43825", + "issue:43065|issue:44617", + "issue:44871|issue:45584", + "issue:43761|issue:44479", + "issue:45127|issue:45200" + ], + "prepared_review_unit_hash": "494d070670bd1ce22f40ebf12ef7b4a2150f99566e558c309d20098aaaa03a33", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 26, + "serialized_chars": 35389, + "estimated_input_tokens": 8848, + "estimated_eval_tokens": 17952 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose set of Transformers bug reports around model/config loading, processor behavior, and regression issues, but the items are not duplicates of one another. The soft edges are similarity-only and should be rejected.", + "confidence": 0.78, + "canonical_issue_reason": "No single issue fully represents the cluster, but issue 43643 is the best anchor because it is a concrete config-loading regression with a broad API surface (`AutoConfig.from_pretrained` returning incomplete objects).", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43643 is the most suitable single representative: it is specific, user-facing, and sits in the center of the broader config/load regression theme present in several nearby issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Both are config-related, but one is missing fields on load and the other loses `rotary_pct` on reload; different bugs and code paths." + }, + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Local dataset loading error vs missing `model_type` in model config; unrelated failure modes." + }, + { + "left": "issue:43720", + "right": "issue:44442", + "accept": false, + "reason": "BitNet packed-weight loading bug and AutoTokenizer tokenizer-class loading failure are distinct issues." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "FSDP RAM-efficient loading vs Qwen3-VL shape mismatch are different model-loading problems." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading behavior and Gemma eos-token config mismatch are unrelated." + }, + { + "left": "issue:33290", + "right": "issue:45656", + "accept": false, + "reason": "Both involve DeepSpeed/optimizer behavior, but one is OOM with Adafactor and the other is duplicate optimizer stepping." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Python 3.13 JIT parsing error in DebertaV2 vs DeepSpeed ZeRO3 deque underflow; unrelated." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "A model-loading regression for Llama 3.2 Vision is not the same as a missing `GenerationMixin` import path." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Chunked prefill cache regression and Qwen3_5 recurrent-state reset are different cache/state bugs." + }, + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "Gradient checkpointing in eval mode and MoE training memory usage are separate concerns." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Gemma3TextConfig API inconsistency and strict config rejection for granite_speech are not the same bug." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Video-input handling in Qwen3OmniMoe and batched chat-template padding crashes in Qwen2_5_VLProcessor are different code paths." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "ESM2 breakage and a janus generate-images test failure are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Backend-install packaging issue vs config API discrepancy; no shared underlying defect." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "Weight tying bug in Mistral3ForConditionalGeneration and FlashAttention failure in Ministral-3 are distinct regressions." + }, + { + "left": "issue:43381", + "right": "issue:43742", + "accept": false, + "reason": "Gradient-checkpointing eval-mode bug is unrelated to key errors while loading MobileLLM-125M." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Video-classification processor selection and missing `fpn_position_embeddings` in Sam3Video are different bugs." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "API discrepancy in a config class and image-preprocessor dtype mismatch are not the same change." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Pipeline output nesting change and missing PyTorch import are unrelated." + }, + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "Model max length question and missing `transformers.onnx` module are unrelated." + }, + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Metric gathering truncation and MoE auxiliary-loss normalization are different training/eval bugs." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "Processor/tokenizer regression and pipeline translation-error messaging are unrelated." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "Sam3PixelDecoder dummy-conv issue and Sam3Video OOM are not the same underlying problem." + }, + { + "left": "issue:44871", + "right": "issue:45584", + "accept": false, + "reason": "Gemma eos-token configuration mismatch and Whisper empty-transcription failure are different generation bugs." + }, + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "CLIPVisionModel hidden-state regression and Qwen video-input regression are unrelated model families and code paths." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "LoRA merge with extended vocabulary collapse and Gemma 4 token-type defaults are different model-specific issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 27, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:42617", + "issue:43065", + "issue:43295", + "issue:43452", + "issue:43550", + "issue:43575", + "issue:43606", + "issue:43646", + "issue:43701", + "issue:43742", + "issue:43761", + "issue:43827", + "issue:43906", + "issue:43931", + "issue:43994", + "issue:44060", + "issue:44077", + "issue:44190", + "issue:44242", + "issue:44464", + "issue:44479", + "issue:44496", + "issue:44521", + "issue:44534", + "issue:44610", + "issue:44617", + "issue:44734", + "issue:44792", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44936", + "issue:44964", + "issue:44977", + "issue:45127", + "issue:45245", + "issue:45290", + "issue:45357", + "issue:45362", + "issue:45381", + "issue:45406", + "issue:45563", + "issue:45584", + "issue:45600", + "issue:45698" + ], + "soft_pairs": [ + "issue:44077|issue:44534", + "issue:43550|issue:43701", + "issue:43295|issue:44871", + "issue:43606|issue:43701", + "issue:44734|issue:45406", + "issue:44936|issue:45698", + "issue:43646|issue:44913", + "issue:43452|issue:44496", + "issue:43931|issue:44060", + "issue:43575|issue:43742", + "issue:43761|issue:44242", + "issue:42617|issue:43575", + "issue:44617|issue:45584", + "issue:45563|issue:45584", + "issue:41720|issue:43575", + "issue:32090|issue:35141", + "issue:44521|issue:45381", + "issue:43906|issue:45290", + "issue:44933|issue:45362", + "issue:44479|issue:44734", + "issue:43065|issue:44610", + "issue:44464|issue:44871", + "issue:44792|issue:44977", + "issue:43827|issue:43994", + "issue:44464|issue:45563", + "issue:44534|issue:44913", + "issue:44190|issue:44964", + "issue:41720|issue:45245", + "issue:44792|issue:45600", + "issue:45127|issue:45357" + ], + "prepared_review_unit_hash": "6b6046e19c9d641f2e527e8d9bda6b188dc13c1305a269a483804b5093aaa304", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36573, + "estimated_input_tokens": 9144, + "estimated_eval_tokens": 18544 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is very heterogeneous: most items are superficially similar only because they are Transformers regressions, loading failures, or multimodal/model-specific bugs. I do not see any true duplicate pairs among the soft candidates; they should stay separate.", + "confidence": 0.96, + "canonical_issue_reason": "issue:43646 is the broadest and most representative framework-level regression in the set: custom model initialization breaking under Transformers 5.0.0. It best fits the recurring init/load regression theme, even though the cluster as a whole is not a duplicate group.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43646 is the strongest global representative because it describes a core library regression affecting model initialization, which is more central than the many narrow model- or feature-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "Both are v5-related, but one is about patchtsmixer/post_init policy and the other about non-persistent buffer serialization; different bugs and fixes." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "Torch.compile + SDPA failure for Bamba is unrelated to resume_from_checkpoint key mismatch." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "One is a processor/tokenizer regression in multimodal custom code; the other is an eos_token_id config inconsistency for Gemma-3." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "CPU offload device mismatch in bark-small is unrelated to checkpoint key mismatch on resume." + }, + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "Serve KV-cache continuation indexing bug is not the same as Gemma4Processor missing _tokenizer." + }, + { + "left": "issue:44936", + "right": "issue:45698", + "accept": false, + "reason": "trainer.evaluate after train and wrong custom module loading after save_pretrained are different code paths and failures." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom model initialization breakage and GPTNeoXConfig rotary_pct reload defaulting are separate config/init issues." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "gguf_file path regression and unrecognized model_type/config loading error are both loading-related but not the same defect." + }, + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Qwen3-VL weight shape mismatch and Qwen3-Next tied-weights warning are different model architecture problems." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "Tensor-parallel OOM and MobileLLM key error on load are unrelated failure modes." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "CLIPVision hidden_states regression and MoE load-balancing loss omission are different model behaviors." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "3d_parallel.py runtime failure is not the same issue as Qwen2 large-model TP OOM." + }, + { + "left": "issue:44617", + "right": "issue:45584", + "accept": false, + "reason": "Sam3Video CUDA OOM and Whisper empty-transcription failure are unrelated." + }, + { + "left": "issue:45563", + "right": "issue:45584", + "accept": false, + "reason": "A stale warning for num_return_sequences is not the same bug as empty-transcription failure after align_special_tokens." + }, + { + "left": "issue:41720", + "right": "issue:43575", + "accept": false, + "reason": "Qwen3 auto device mapping cuda assert and Qwen2 TP OOM are distinct distributed-loading issues." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer gpu_broadcast_one NoneType error and token-embedding resize reinitialization are different training/model-init bugs." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "All-zero assistant masks in apply_chat_template and wrong vision_position_ids are both multimodal, but not the same concrete bug." + }, + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "An isolated reproduction report is not the same as the tool-call/empty-content crash in apply_chat_template(tokenize=True)." + }, + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Missing import from image_utils and Qwen3.5 chat crash are unrelated." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression in Qwen2.5-VL/Qwen3-VL models and serve KV-cache tensor indexing are different problems." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and wrong processor image size are both SAM3-related but different components and fixes." + }, + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Chunked generation inconsistency under compile and Gemma-3 eos_token_id mismatch are unrelated." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "A failed Janus image-generation test and Qwen3.5 flash-attention generation bug are different models and code paths." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "Docs still referencing pipeline() and SigLIP2 nonsensical outputs are not the same issue." + }, + { + "left": "issue:44464", + "right": "issue:45563", + "accept": false, + "reason": "Compiled-forward generation inconsistency is unrelated to a stale warning about num_return_sequences." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk and rotary_pct reverting on reload are separate serialization/config bugs." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Local dataset loading in a no-trainer script and Phi-4 multimodal loading failure are different loading scenarios." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "CUDA assert on Qwen3 device mapping and category-count overflow are unrelated runtime errors." + }, + { + "left": "issue:44792", + "right": "issue:45600", + "accept": false, + "reason": "Janus image-generation test failure and broken auto_mappings references for Sam3LiteText are not the same defect." + }, + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "LoRA merge collapse with extended vocab and incorrect visual encoder keys on save_pretrained are different save/merge regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 28, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41628", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43116", + "issue:43295", + "issue:43335", + "issue:43388", + "issue:43404", + "issue:43479", + "issue:43540", + "issue:43550", + "issue:43644", + "issue:43650", + "issue:43761", + "issue:43976", + "issue:44016", + "issue:44077", + "issue:44079", + "issue:44112", + "issue:44186", + "issue:44190", + "issue:44220", + "issue:44246", + "issue:44279", + "issue:44466", + "issue:44561", + "issue:44683", + "issue:44734", + "issue:44743", + "issue:44855", + "issue:44913", + "issue:44938", + "issue:45003", + "issue:45216", + "issue:45245", + "issue:45276", + "issue:45375", + "issue:45446", + "issue:45542", + "issue:45584", + "issue:45588", + "issue:45600", + "issue:45632" + ], + "soft_pairs": [ + "issue:43388|issue:44079", + "issue:44855|issue:45588", + "issue:44938|issue:45542", + "issue:43761|issue:44743", + "issue:43976|issue:44855", + "issue:43335|issue:44855", + "issue:44077|issue:44079", + "issue:43550|issue:44190", + "issue:43644|issue:44913", + "issue:43650|issue:44220", + "issue:43540|issue:44734", + "issue:45276|issue:45584", + "issue:43116|issue:44190", + "issue:44112|issue:45600", + "issue:41950|issue:44016", + "issue:39692|issue:44190", + "issue:43295|issue:45276", + "issue:43479|issue:45375", + "issue:36246|issue:41628", + "issue:43404|issue:45216", + "issue:45245|issue:45584", + "issue:44734|issue:45632", + "issue:36246|issue:38617", + "issue:38175|issue:43065", + "issue:44016|issue:44220", + "issue:44683|issue:45446", + "issue:44561|issue:45632", + "issue:44186|issue:44734", + "issue:42175|issue:44279", + "issue:44466|issue:44913", + "issue:44246|issue:45003" + ], + "prepared_review_unit_hash": "07748904f4c63450aca9175962fa6875e6ed158b409d9e004da1f36ef108fc1b", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37122, + "estimated_input_tokens": 9281, + "estimated_eval_tokens": 18818 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly a loose similarity cluster rather than true duplicates: it mixes unrelated import/export regressions, model-specific bugs, pipeline issues, and docs/notebook problems. I rejected all soft edges because none look like the same concrete underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "issue:41628 is the broadest top-level API regression in the set and the closest thing to a hub for the import/export failures, though it is not a true duplicate center for the rest.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:41628 is the most representative issue for the cluster because it describes a generic import failure at the package boundary, making it the closest match to the few other import-related items.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43388", + "right": "issue:44079", + "accept": false, + "reason": "Different failures: batch label truncation in metrics vs ModelOutput key assignment logic." + }, + { + "left": "issue:44855", + "right": "issue:45588", + "accept": false, + "reason": "Unrelated code paths: Python 3.13 import/parsing issue vs flash_attention crash on sink-less models." + }, + { + "left": "issue:44938", + "right": "issue:45542", + "accept": false, + "reason": "Different runtime problems: Python 3.14 import/load failure vs missing TensorFlow backend wiring." + }, + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "Different model bugs: CLIP hidden_states regression vs Qwen recurrent state reset with cache." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "Both are Python-version related, but one is a broad 5.1.0 compatibility issue and the other is a specific DebertaV2 parsing failure." + }, + { + "left": "issue:43335", + "right": "issue:44855", + "accept": false, + "reason": "SwitchTransformers config construction bug is unrelated to the DebertaV2 import/parse error." + }, + { + "left": "issue:44077", + "right": "issue:44079", + "accept": false, + "reason": "Different semantics: optional post_init handling vs ModelOutput key assignment for None values." + }, + { + "left": "issue:43550", + "right": "issue:44190", + "accept": false, + "reason": "Different subsystems: SDPA/torch.compile failure in Bamba vs dataset loading in an image classification script." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer initialization issue is unrelated to GPTNeoX rotary_pct reload behavior." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "One is a vague data issue, the other is a specific feature-extraction function bug." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Different execution paths: Qwen3OmniMoe video input validation vs serve KV-cache continuation indexing." + }, + { + "left": "issue:45276", + "right": "issue:45584", + "accept": false, + "reason": "Embedding resize/tied-weight behavior is unrelated to Whisper empty-transcription generation failure." + }, + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "Different examples and symptoms: multi-label empty outputs vs local dataset loading failure." + }, + { + "left": "issue:44112", + "right": "issue:45600", + "accept": false, + "reason": "CI test flakiness in GraniteSpeech is unrelated to stale auto_mappings references for Sam3LiteText." + }, + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Pipeline processor selection bug vs notebook syntax error; not the same bug class." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "SigLIP2 doc example/model mismatch and quantization errors are unrelated to local dataset loading." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Processor/tokenizer regression and resize_token_embeddings weight propagation are different code paths." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Both are config-related, but one is default multimodal config initialization and the other is a missing field dropped by strict serialization." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they involve different missing symbols and modules, not the same export bug." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Tied lm_head weight bug in Mistral3 is unrelated to Qwen3.5 save_pretrained checkpoint regression." + }, + { + "left": "issue:45245", + "right": "issue:45584", + "accept": false, + "reason": "Category-count runtime limit has no relation to Whisper generation on empty transcription." + }, + { + "left": "issue:44734", + "right": "issue:45632", + "accept": false, + "reason": "Serve KV-cache continuation bug and trust_remote_code cache path collision are unrelated." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Different import failures: missing Qwen2_5_VLImageProcessor export vs missing layer_type_validation symbol." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "siglip2 zero probabilities and Sam3PixelDecoder dummy Conv2d are unrelated model-specific issues." + }, + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "Notebook syntax error is not the same as the _torch_extract_fbank_features() bug." + }, + { + "left": "issue:44683", + "right": "issue:45446", + "accept": false, + "reason": "Compiled flex_attention failure on torch>=2.9 is not the same as a bad PyTorch version gate for AuxRequest import." + }, + { + "left": "issue:44561", + "right": "issue:45632", + "accept": false, + "reason": "Removal of is_torch_fx_available breaking trust_remote_code is unrelated to cache-path collisions." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Tokenizer padding/truncation crash in LayoutLMv2 is unrelated to serve KV-cache indexing." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "Package dependency/install issue vs a generic dependency issue report; not the same concrete bug." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Serialization of tied lm_head weights is unrelated to GPTNeoX rotary_pct resetting on reload." + }, + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "Import-time slowness and unsafe sys.modules access are related thematically, but not the same bug or fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 29, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:41950", + "issue:42907", + "issue:43295", + "issue:43299", + "issue:43381", + "issue:43441", + "issue:43479", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43673", + "issue:43824", + "issue:43854", + "issue:43901", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44242", + "issue:44279", + "issue:44336", + "issue:44351", + "issue:44368", + "issue:44496", + "issue:44521", + "issue:44683", + "issue:44877", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45020", + "issue:45042", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45325", + "issue:45335", + "issue:45357", + "issue:45405", + "issue:45406", + "issue:45542", + "issue:45584", + "issue:45593", + "issue:45600", + "issue:45632" + ], + "soft_pairs": [ + "issue:45406|issue:45632", + "issue:43643|issue:44496", + "issue:42907|issue:43441", + "issue:44016|issue:44062", + "issue:44964|issue:45600", + "issue:43295|issue:45335", + "issue:43299|issue:45216", + "issue:43976|issue:44987", + "issue:44112|issue:44964", + "issue:43673|issue:44336", + "issue:44496|issue:45042", + "issue:43824|issue:44933", + "issue:43854|issue:44964", + "issue:43531|issue:45600", + "issue:43976|issue:44279", + "issue:43526|issue:43901", + "issue:45335|issue:45584", + "issue:44075|issue:44368", + "issue:43299|issue:45357", + "issue:43381|issue:45593", + "issue:44521|issue:45325", + "issue:43577|issue:45216", + "issue:44351|issue:45278", + "issue:43901|issue:44496", + "issue:43976|issue:45405", + "issue:41950|issue:45020", + "issue:44242|issue:45200", + "issue:43479|issue:43525", + "issue:44496|issue:44877", + "issue:44683|issue:45542" + ], + "prepared_review_unit_hash": "85aafd3376af131895d929d0d2ff1b6ad5582026588cdfba8e31a0b1d77a3e0b", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36644, + "estimated_input_tokens": 9161, + "estimated_eval_tokens": 18578 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly a loose collection of unrelated Transformers regression and model-loading issues, not a tight duplicate cluster. If a single umbrella issue must be chosen, the broad open remote-code regression report is the closest anchor; the soft pairs are all too different to merge.", + "confidence": 0.79, + "canonical_issue_reason": "issue:45020 is the broadest open umbrella here, covering recent-version regressions in remote-code model loading, but it is still only a thematic anchor rather than a true duplicate of the others.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the best global issue anchor because it is open, broad in scope, and plausibly encompasses multiple version-regression reports better than any model-specific bug report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45406", + "right": "issue:45632", + "accept": false, + "reason": "Different failures: Gemma4Processor missing _tokenizer vs trust_remote_code cache-path collision." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "Both involve config/remote-code loading, but one is missing fields in AutoConfig output and the other is invalid model config recognition; not the same bug." + }, + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Different Ministral issues: save_pretrained/dequantized save failure vs FlashAttention load failure." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "Notebook syntax error and AddedToken keyword conflict are unrelated." + }, + { + "left": "issue:44964", + "right": "issue:45600", + "accept": false, + "reason": "Phi-4 multimodal loading failure and stale auto_mappings CI references are different code paths." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Separate regressions: processor.tokenizer/images handling vs t5gemma embedding resize not propagating." + }, + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Both are Qwen regressions, but one is model loading and the other is save_pretrained output corruption; not one underlying bug." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "A Python-version compatibility complaint and a model-specific load failure are not the same issue." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "CI stale-device override test failure is unrelated to Phi-4 multimodal loading." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Generation cache/chunked prefill bug and ANSI-code logging bug are unrelated." + }, + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "Unrecognized model/config.json problem is unrelated to PIL backend torchvision dependency handling." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Both are import errors, but for different symbols and subsystems; not the same underlying breakage." + }, + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Different models and failure modes: GLM-4.7-Flash unit-test loading vs Phi-4 multimodal loading." + }, + { + "left": "issue:43531", + "right": "issue:45600", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and removed auto_mappings references are unrelated." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "Generic dependency/version issue vs a vague transformers dependency problem; no shared concrete bug." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug and TextClassificationPipeline docs mismatch are not the same change." + }, + { + "left": "issue:45335", + "right": "issue:45584", + "accept": false, + "reason": "t5gemma embedding resize bug and Whisper empty-transcription generation bug are unrelated." + }, + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "SGD optimizer args not used and tie_word_embeddings warning are different behavior issues." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Both affect Qwen save/load behavior, but one is a loading regression and the other is incorrect visual-encoder keys on save; not mergeable as one fix." + }, + { + "left": "issue:43381", + "right": "issue:45593", + "accept": false, + "reason": "Gradient checkpointing eval-mode failure and D-FINE auxiliary-loss omission are unrelated training bugs." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "Multimodal assistant mask generation and still-image temporal position-id scaling are separate multimodal issues." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "Blip2 dtype propagation bug and Qwen3.5 save_pretrained regression are unrelated." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "HybridCache import error may be one symptom of import breakage, but 45278 is a broad version-upgrade report and not the same concrete bug." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Documentation text mismatch is unrelated to model config recognition failure." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "Python compatibility and MIN_PEFT_VERSION release timing are unrelated." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Video-classification processor lookup bug is unrelated to remote_code model-loading regressions." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "Router-logit load-balancing loss and Gemma 4 mm_token_type_ids defaulting are different model-train path bugs." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Phi4Multimodal default-config initialization and Llama4Config missing pad_token_id are separate config issues." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Unrecognized model due to missing model_type and strict granite_speech config loading are not the same bug." + }, + { + "left": "issue:44683", + "right": "issue:45542", + "accept": false, + "reason": "Compiled flex_attention on torch>=2.9 and tensorboard-without-TensorFlow backend error are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 30, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:41950", + "issue:42994", + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43295", + "issue:43299", + "issue:43317", + "issue:43441", + "issue:43450", + "issue:43650", + "issue:43653", + "issue:43673", + "issue:43723", + "issue:43761", + "issue:43784", + "issue:43854", + "issue:43901", + "issue:44016", + "issue:44162", + "issue:44261", + "issue:44263", + "issue:44279", + "issue:44291", + "issue:44297", + "issue:44462", + "issue:44466", + "issue:44521", + "issue:44617", + "issue:44623", + "issue:44671", + "issue:44683", + "issue:44749", + "issue:44779", + "issue:44898", + "issue:44987", + "issue:45081", + "issue:45276", + "issue:45290", + "issue:45397", + "issue:45405", + "issue:45542", + "issue:45593", + "issue:45600", + "issue:45712" + ], + "soft_pairs": [ + "issue:44462|issue:45081", + "issue:44279|issue:45405", + "issue:43299|issue:43854", + "issue:41950|issue:43650", + "issue:44297|issue:44462", + "issue:39692|issue:43901", + "issue:43295|issue:44779", + "issue:38175|issue:44617", + "issue:44462|issue:44779", + "issue:45600|issue:45712", + "issue:44016|issue:44291", + "issue:44261|issue:44263", + "issue:43450|issue:44279", + "issue:42994|issue:43317", + "issue:43065|issue:44898", + "issue:43723|issue:44162", + "issue:43441|issue:44683", + "issue:44466|issue:45276", + "issue:43650|issue:45290", + "issue:44623|issue:45397", + "issue:44623|issue:44749", + "issue:43262|issue:44521", + "issue:43784|issue:45542", + "issue:43673|issue:43761", + "issue:44671|issue:44779", + "issue:43653|issue:45593", + "issue:43122|issue:43650", + "issue:44987|issue:45542" + ], + "prepared_review_unit_hash": "7a12f72b7bb534663446e9d7591dd8f6d1b3b3fff035daffcb2fb4be28cada09", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35529, + "estimated_input_tokens": 8883, + "estimated_eval_tokens": 18022 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: most items are separate Transformers regressions, docs issues, or model-specific bugs. The soft pairs share broad subsystems at best, but not the same concrete underlying issue or mergeable fix.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out because the items span unrelated bugs. If a loose anchor is needed, issue 44462 is the broadest tokenizer-loading report, but it does not actually subsume the rest of the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44462 is the closest thing to a representative issue because it is a generic tokenizer-loading regression, but it is still too narrow to serve as a true duplicate hub.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Both are tokenizer-related, but one is AutoTokenizer ignoring repository tokenizer.json and the other is a Mistral regex patch crash; different code paths and fixes." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "These are unrelated version/dependency problems: a generic transformers dependency issue versus an unreleased PEFT version bump." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "Both concern model loading failures, but for different models and different failure modes; not the same underlying bug." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "Completely unrelated: a video-classification processor lookup bug versus a trivial placeholder issue." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Both involve tokenizer serialization/loading, but one is wrong tokenizer_class saved on disk and the other is AutoTokenizer ignoring tokenizer.json; distinct fixes." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "Both are docs-related, but one is a SigLIP2 example with model/processor mismatch and quantization errors, while the other is a pipeline docs mismatch." + }, + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 tokenizer/processor regressions, but one is about processor.tokenizer access and multimodal tokenization, while the other is Deepseek tokenization output changes." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unrelated model-specific issues: unexpected zero probabilities in SigLIP2 versus Sam3Video CUDA OOM." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenizers, but the reported failures are different models and different behaviors; not the same bug." + }, + { + "left": "issue:45600", + "right": "issue:45712", + "accept": false, + "reason": "Both are CI/repo hygiene problems, but they affect different maintenance checks and files." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "A notebook syntax error and an init_empty_weights TypeError are unrelated issues." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "Different model internals: MLA q_a_layernorm precision/config handling versus torch.split return handling in GlmMoeDsaIndexer." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Video processor shape handling and a dependency issue are unrelated." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Both touch quantization, but one is about saving a quantized model and the other is about loading a dequantized model with device_map offload." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "Different vision-model bugs with different symptoms and code paths; not mergeable as one fix." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "A generic AutoTokenizer loading regression and an ESM2 breakage are not the same underlying problem." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "Both mention attention backends, but one is model-specific FlashAttention breakage and the other is compiled flex_attention failing on newer torch." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "Related to embeddings/weight tying in Gemma4, but one is device-dependent serialization and the other is resize_token_embeddings not updating per-layer/output embeddings." + }, + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "Completely different: a placeholder issue versus a crash on assistant messages with tool calls and empty content." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "Processor save_pretrained file omission and gemma-4 zero3 loading failure are different save/load problems." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "Missing processor files and a performance regression in filtering are unrelated." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Both involve apply_chat_template, but one is an audio sampling-rate default bug and the other is multimodal assistant mask generation." + }, + { + "left": "issue:43784", + "right": "issue:45542", + "accept": false, + "reason": "Both are import/backend errors, but they come from different optional dependencies and different missing symbols." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "A generation cache regression and a CLIPVision hidden_states regression are unrelated." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "Different v5 regressions affecting different models and outputs; not the same code-path problem." + }, + { + "left": "issue:43653", + "right": "issue:45593", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and D-FINE auxiliary-loss behavior are unrelated." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Tokenizer behavior change and a placeholder issue have no overlap." + }, + { + "left": "issue:44987", + "right": "issue:45542", + "accept": false, + "reason": "A model-loading failure for physical-intelligence/fast and a TensorFlow backend import issue are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 31, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42175", + "issue:42491", + "issue:42757", + "issue:43295", + "issue:43299", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43673", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44162", + "issue:44188", + "issue:44220", + "issue:44242", + "issue:44246", + "issue:44295", + "issue:44297", + "issue:44336", + "issue:44351", + "issue:44361", + "issue:44373", + "issue:44462", + "issue:44485", + "issue:44496", + "issue:44561", + "issue:44589", + "issue:44617", + "issue:44661", + "issue:45003", + "issue:45200", + "issue:45250", + "issue:45356", + "issue:45362", + "issue:45397", + "issue:45542", + "issue:45561", + "issue:45563", + "issue:45593" + ], + "soft_pairs": [ + "issue:42175|issue:43976", + "issue:43901|issue:43994", + "issue:44361|issue:45593", + "issue:44297|issue:45397", + "issue:43824|issue:43976", + "issue:43475|issue:44016", + "issue:43525|issue:43881", + "issue:44589|issue:45362", + "issue:45561|issue:45563", + "issue:44188|issue:44336", + "issue:43352|issue:45250", + "issue:41628|issue:43976", + "issue:43295|issue:44617", + "issue:42491|issue:43299", + "issue:43901|issue:44246", + "issue:44336|issue:44373", + "issue:44242|issue:45593", + "issue:41950|issue:44220", + "issue:44295|issue:45397", + "issue:44661|issue:45542", + "issue:44462|issue:45356", + "issue:44246|issue:44496", + "issue:43531|issue:45362", + "issue:36246|issue:42757", + "issue:45003|issue:45561", + "issue:36246|issue:44351", + "issue:44336|issue:44485", + "issue:43526|issue:45003", + "issue:43673|issue:44561", + "issue:44162|issue:45200" + ], + "prepared_review_unit_hash": "2bc9024962422aa0f36b3cdcab6dd5a6ad0fb1426f298ebdfcdcd7a7f6491452", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36338, + "estimated_input_tokens": 9085, + "estimated_eval_tokens": 18426 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous. Several items share broad themes like import errors, model-loading regressions, or tokenizer behavior, but the soft pairs do not describe the same concrete bug/change closely enough to merge under strict duplicate triage.", + "confidence": 0.82, + "canonical_issue_reason": "issue:43824 is the clearest, most referenced, user-facing concrete regression here: a specific Qwen2.5-VL import failure with strong duplicate potential in the same family of export/import problems.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the best representative issue because it is specific, actionable, and tied to a concrete import/export failure rather than a broad symptom or unrelated regression.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "TensorFlow-backend packaging bug vs Python-version compatibility regression; unrelated failures." + }, + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs/behavior mismatch for a pipeline flag vs incorrect SigLIP2 outputs; different problem classes." + }, + { + "left": "issue:44361", + "right": "issue:45593", + "accept": false, + "reason": "Tokenizer AttributeError in tasks vs missing auxiliary losses in D-FINE; different models and code paths." + }, + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer save metadata bug vs Gemma-4 from_pretrained/ZeRO loading issue; not the same bug." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "Qwen2.5-VL import/export failure vs Python version support regression; no shared code-path." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "SAM3 video model attribute error vs notebook syntax error; completely unrelated." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config vs GLM-4V loading failure; different model families and failure modes." + }, + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Float8 storage type error vs Qwen3.5 chat crash; unrelated runtime issues." + }, + { + "left": "issue:45561", + "right": "issue:45563", + "accept": false, + "reason": "pytest-xdist file race vs stale generate() warning; unrelated." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "torch.compile attention-kernel divergence vs terminal ANSI output leakage; unrelated subsystems." + }, + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "Specific unsupported Flash Attention 2.0 error for Nemotron vs a generic FA2 topic; not the same concrete bug." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "AutoImageProcessor import failure vs Python3.9/3.10 compatibility problem; different causes." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "Processor/tokenizer regression in v4.57.5 vs SAM3 video CUDA OOM; unrelated." + }, + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Qwen3 MoE LoRA compatibility issue vs Qwen3VL MoE loading breakage; same broad family but not the same concrete bug." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "Pipeline docs mismatch vs import performance regression; no common underlying defect." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "ANSI code emission in loading_report vs wrong position_ids docstring; unrelated." + }, + { + "left": "issue:44242", + "right": "issue:45593", + "accept": false, + "reason": "MoE load-balancing loss bookkeeping vs D-FINE denoising auxiliary-loss behavior; different models and paths." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Video-classification pipeline selecting image processors vs fbank feature extraction issue; unrelated." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "position_ids buffer access bug vs Gemma-4 ZeRO loading bug; not the same issue." + }, + { + "left": "issue:44661", + "right": "issue:45542", + "accept": false, + "reason": "add-new-model-like tokenizer mapping bug vs tf backend installation/availability issue; unrelated." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "AutoTokenizer ignoring tokenizer.json vs Kimi-K2.5 codec/warning regression; both tokenizer-related but different concrete bugs." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Import latency sometimes vs unrecognized model/config loading error; unrelated." + }, + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior vs Qwen3.5 chat crash; different failure modes." + }, + { + "left": "issue:36246", + "right": "issue:42757", + "accept": false, + "reason": "Missing Qwen2_5_VLImageProcessor export vs missing is_offline_mode from huggingface_hub; both import errors but unrelated symbols." + }, + { + "left": "issue:45003", + "right": "issue:45561", + "accept": false, + "reason": "Unsafe sys.modules access in modeling_utils vs xdist test-file race; unrelated." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Different missing exports/symbols in transformers; same pattern of import error but not the same bug." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "Terminal ANSI formatting bug vs GLM-5 RoPE implementation discussion; unrelated." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug vs sys.modules access bug; different code paths." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Generation cache issue in chunked_prefill vs removal of is_torch_fx_available breaking trust_remote_code; unrelated regressions." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 breakage vs Gemma 4 mm_token_type_ids defaulting; different model-specific defects." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 32, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42886", + "issue:43065", + "issue:43066", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43408", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43596", + "issue:43653", + "issue:43901", + "issue:44060", + "issue:44077", + "issue:44162", + "issue:44242", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44368", + "issue:44466", + "issue:44496", + "issue:44749", + "issue:44779", + "issue:44857", + "issue:44871", + "issue:44877", + "issue:44977", + "issue:44987", + "issue:44995", + "issue:45003", + "issue:45137", + "issue:45200", + "issue:45229", + "issue:45292", + "issue:45335", + "issue:45375", + "issue:45397", + "issue:45459", + "issue:45563", + "issue:45584", + "issue:45588", + "issue:45656", + "issue:45701" + ], + "soft_pairs": [ + "issue:44242|issue:44857", + "issue:44246|issue:44871", + "issue:36246|issue:43596", + "issue:44295|issue:44749", + "issue:43441|issue:43540", + "issue:44779|issue:45292", + "issue:44877|issue:45375", + "issue:43066|issue:44779", + "issue:42886|issue:45229", + "issue:41628|issue:41950", + "issue:45292|issue:45584", + "issue:44987|issue:45459", + "issue:43065|issue:43408", + "issue:43901|issue:45003", + "issue:45292|issue:45701", + "issue:43232|issue:44336", + "issue:44995|issue:45200", + "issue:43257|issue:45137", + "issue:44496|issue:45003", + "issue:44060|issue:44077", + "issue:43475|issue:43540", + "issue:43262|issue:44246", + "issue:43653|issue:44162", + "issue:44368|issue:45563", + "issue:44995|issue:45588", + "issue:44466|issue:45335", + "issue:45397|issue:45656", + "issue:44368|issue:44977" + ], + "prepared_review_unit_hash": "f6f6404144b654fe1a3725191ca5147bb3eaecb8f82b9f901c19baee694cf0ab", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 28, + "serialized_chars": 36152, + "estimated_input_tokens": 9038, + "estimated_eval_tokens": 18332 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is thematically noisy: many items mention Transformers loading/tokenizer/modeling regressions, but the paired candidates are not the same concrete bugs. I rejected all soft edges because they differ in model, failure mode, or code path.", + "confidence": 0.33, + "canonical_issue_reason": "issue:45701 is the broadest and most representative open report here: a version-dependent tokenization regression with clear impact and room for follow-up.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45701 is the best single anchor for this cluster because it is general, open, and closest to the recurring tokenizer-regression theme among the listed issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "Different bugs: MoE router loss handling vs an AMP/CUDA crash in image loss." + }, + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Different issues: import slowness vs eos_token_id config mismatch." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "Different code paths: missing Qwen2.5-VL image processor export vs DeepSpeed/BertModel zero3 indexing." + }, + { + "left": "issue:44295", + "right": "issue:44749", + "accept": false, + "reason": "Different symptoms: position_ids buffer access error vs a general performance regression." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "Different models and failures: FlashAttention on Ministral-3 vs Qwen3OmniMoe video-input validation." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer output regression vs resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config strictness, but they are different model-specific config problems and not the same bug." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Different concrete failures: wrong tokenizer decoder-type warning vs incorrect DeepSeek tokenization." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Unrelated: offline tokenizer cache loading vs Gemma4 multi-GPU OOM." + }, + { + "left": "issue:41628", + "right": "issue:41950", + "accept": false, + "reason": "Different import/pipeline problems: missing AutoImageProcessor export vs video pipeline image-processor lookup." + }, + { + "left": "issue:45292", + "right": "issue:45584", + "accept": false, + "reason": "Different areas: embedding resize propagation vs Whisper empty-transcription generation failure." + }, + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Different bugs: model loading failure for a repo vs tokenizer error masking when protobuf is absent." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "Different SAM3 issues: dummy Conv2d in pixel decoder vs model-type warning for sam3_tracker." + }, + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch vs a modeling_utils sys.modules access bug; not the same underlying issue." + }, + { + "left": "issue:45292", + "right": "issue:45701", + "accept": false, + "reason": "Related to tokenization/embeddings in a broad sense, but one is resizing embeddings and the other is a version-dependent tokenizer regression." + }, + { + "left": "issue:43232", + "right": "issue:44336", + "accept": false, + "reason": "Generation kwargs handling vs ANSI codes in loading_report are unrelated." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "Different model failures: stale MoE cache on second forward vs missing mm_token_type_ids default." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both mention DeepSpeed, but one is Qwen3 MOE weight conversion and the other is a generic deque IndexError." + }, + { + "left": "issue:44496", + "right": "issue:45003", + "accept": false, + "reason": "Unrecognized model/config issue vs sys.modules access bug; unrelated code paths." + }, + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Different model/config issues: tied-weights warning in Qwen3-Next vs patchtsmixer post_init policy." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "Different SAM3 vs Qwen3OmniMoe video-processing failures." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio chat-template sampling-rate bug vs import latency; unrelated." + }, + { + "left": "issue:43653", + "right": "issue:44162", + "accept": false, + "reason": "Different tokenizer/model problems: BigBirdTokenizer special-token registration vs ESM2 breakage." + }, + { + "left": "issue:44368", + "right": "issue:45563", + "accept": false, + "reason": "Different warnings in unrelated areas: tied-weights config warning vs stale paged-generate warning." + }, + { + "left": "issue:44995", + "right": "issue:45588", + "accept": false, + "reason": "Different failures: stale indexer cache vs flash-attention s_aux=None crash." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Both touch embedding serialization/resizing, but they are not the same concrete bug." + }, + { + "left": "issue:45397", + "right": "issue:45656", + "accept": false, + "reason": "Different DeepSpeed issues: gemma-4 zero3 loading vs duplicate optimizer stepping." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "Different bugs: config warning text vs Qwen3.5 flash-attention generation failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 33, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:41628", + "issue:42371", + "issue:42673", + "issue:42831", + "issue:42994", + "issue:43232", + "issue:43475", + "issue:43482", + "issue:43525", + "issue:43643", + "issue:43646", + "issue:43673", + "issue:43704", + "issue:43873", + "issue:43931", + "issue:44038", + "issue:44162", + "issue:44188", + "issue:44222", + "issue:44297", + "issue:44351", + "issue:44361", + "issue:44368", + "issue:44623", + "issue:44661", + "issue:44734", + "issue:44749", + "issue:44869", + "issue:44933", + "issue:44945", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45137", + "issue:45230", + "issue:45278", + "issue:45310", + "issue:45356", + "issue:45362", + "issue:45375", + "issue:45381", + "issue:45397", + "issue:45459", + "issue:45478", + "issue:45600", + "issue:45656" + ], + "soft_pairs": [ + "issue:43673|issue:44734", + "issue:45137|issue:45600", + "issue:44933|issue:45459", + "issue:43931|issue:44368", + "issue:44869|issue:45356", + "issue:43232|issue:43673", + "issue:43673|issue:45278", + "issue:44297|issue:44623", + "issue:43646|issue:44162", + "issue:44623|issue:45656", + "issue:44361|issue:44749", + "issue:44749|issue:45310", + "issue:28282|issue:30990", + "issue:42994|issue:43482", + "issue:42371|issue:45092", + "issue:44038|issue:44623", + "issue:44749|issue:45656", + "issue:45230|issue:45397", + "issue:43475|issue:43643", + "issue:45375|issue:45478", + "issue:42673|issue:43704", + "issue:43475|issue:45381", + "issue:41628|issue:44933", + "issue:45310|issue:45375", + "issue:45081|issue:45103", + "issue:42994|issue:43525", + "issue:43475|issue:44661", + "issue:45362|issue:45375", + "issue:42831|issue:44222", + "issue:43873|issue:44945", + "issue:43232|issue:44188", + "issue:41628|issue:44351" + ], + "prepared_review_unit_hash": "49c5a7dd951e57194ef922f186e03ffb13169eececfdbd001239f370bf465cde", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37022, + "estimated_input_tokens": 9256, + "estimated_eval_tokens": 18768 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous; there are only a couple of clear duplicate pairs (the two identical Qwen3ForCausalLM VRAM-leak reports, and the two identical Qwen3.5 Moe from_pretrained reports). Most other soft edges are related only at a broad subsystem level and should stay separate.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45278 is the broadest umbrella-style report in the set (\u201cmany import errors after update from 4.57.0 to 5.5.0\u201d), so it is the closest thing to a cluster anchor even though the cluster is not a single duplicate topic.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the most representative standalone issue for the overall cluster because it captures a wide v5 compatibility regression pattern; the other items are mostly narrower, model-specific, or unrelated failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Both are generation/cache-related, but they describe different failing code paths and symptoms." + }, + { + "left": "issue:45137", + "right": "issue:45600", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque underflow is unrelated to the Sam3LiteText auto-mapping CI break." + }, + { + "left": "issue:44933", + "right": "issue:45459", + "accept": false, + "reason": "Missing import from image_utils and tokenizer protobuf error are different bugs." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Weight-shape mismatch for Qwen3-VL is not the same as a tie_word_embeddings warning." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Whisper timestamp decode crash and Kimi tokenizer codec regression are distinct issues." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "Both touch generation internals, but one is sync_gpus kwarg handling and the other is chunked_prefill cache state." + }, + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "Cache/generation bug is not the same underlying problem as broad import failures after upgrade." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Tokenizer.save_pretrained metadata mismatch and processor.save_pretrained missing files are different save paths." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Missing fields from trust_remote_code AutoConfig is unrelated to the ESM2 breakage." + }, + { + "left": "issue:44623", + "right": "issue:45656", + "accept": false, + "reason": "Save-pretrained file emission and double optimizer step under deepspeed are unrelated." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "MLukeTokenizer task AttributeError and upgrade-related slowdown are different failures." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "Performance regression is not the same code-path problem as Qwen3.5 Moe from_pretrained failure." + }, + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "Missing PyTorch import error and Sentence Transformers hanging on load are different problems." + }, + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Quantized save failure and GGUF loading failure are separate bugs." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "TF32 API settings guidance is unrelated to InternVL2 meta-initialization incompatibility." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe/Transformers v5 breakage is not the same as processor.save_pretrained missing files." + }, + { + "left": "issue:44749", + "right": "issue:45656", + "accept": false, + "reason": "Data-path slowdown and deepspeed optimizer double-step are unrelated." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "A generic bug report is not the same issue as gemma-4 zero3 from_pretrained failure." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "SAM 3 video attribute error and trust_remote_code missing fields are different model/config bugs." + }, + { + "left": "issue:45375", + "right": "issue:45478", + "accept": false, + "reason": "Missing deepstack_visual_indexes in a vision config is not the same as the Qwen3.5 Moe from_pretrained error." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": true, + "reason": "Exact same title and same Qwen3ForCausalLM VRAM leak in multiple dataloader threads." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "SAM 3 vision encoder attribute error and qwen2.5-vl video position-id bug are different vision-model failures." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "Both are import errors, but they involve different missing symbols and likely different fixes." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "One is a Qwen3.5 Moe from_pretrained failure; the other is a missing vision-config field being dropped." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Mistral regex patch crash and docstring annotation crash are unrelated." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Quantized save failure is unrelated to Llama4Config missing pad_token_id." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "SAM 3 encoder output attribute error is unrelated to add-new-model-like failing in TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:45362", + "right": "issue:45375", + "accept": false, + "reason": "Qwen3.5 chat crash and missing vision-config field are different problems." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "FP8 accuracy regression and FP8 save_pretrained bug are not the same code-path issue." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Quantization offloading behavior and incorrect pipeline-parallel output are different failures." + }, + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Generation kwargs sync_gpus handling is unrelated to torch.compile attention-kernel divergence." + }, + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they are separate missing exports rather than one underlying bug." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: it correctly treats the cluster as heterogeneous, picks issue:45278 as a reasonable umbrella anchor, and the only accepted soft edge is an exact duplicate pair. The rejected soft edges are conservative and the reasons generally match the issue titles and descriptions. The only minor caution is that the confidence feels a bit high for a heterogeneous cluster, but not enough to reject." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 34, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:30333", + "issue:30990", + "issue:33290", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42898", + "issue:42994", + "issue:43232", + "issue:43278", + "issue:43377", + "issue:43479", + "issue:43504", + "issue:43526", + "issue:43673", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:43874", + "issue:43976", + "issue:44038", + "issue:44060", + "issue:44079", + "issue:44164", + "issue:44188", + "issue:44222", + "issue:44242", + "issue:44373", + "issue:44496", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44779", + "issue:44792", + "issue:44869", + "issue:44933", + "issue:44964", + "issue:45092", + "issue:45103", + "issue:45125", + "issue:45137", + "issue:45200", + "issue:45310", + "issue:45356", + "issue:45561", + "issue:45712" + ], + "soft_pairs": [ + "issue:42222|issue:45200", + "issue:44222|issue:45310", + "issue:44038|issue:45310", + "issue:42994|issue:44164", + "issue:42175|issue:45712", + "issue:43526|issue:45561", + "issue:43377|issue:44242", + "issue:43673|issue:44188", + "issue:36010|issue:41628", + "issue:44496|issue:45561", + "issue:43232|issue:44792", + "issue:43504|issue:45561", + "issue:30990|issue:35141", + "issue:42898|issue:44568", + "issue:44964|issue:45137", + "issue:43976|issue:44933", + "issue:43479|issue:44079", + "issue:43479|issue:44869", + "issue:42175|issue:44933", + "issue:44623|issue:45310", + "issue:44188|issue:44373", + "issue:43784|issue:43824", + "issue:43278|issue:45137", + "issue:43723|issue:44568", + "issue:44060|issue:45125", + "issue:43874|issue:45092", + "issue:44734|issue:45103", + "issue:42898|issue:45356", + "issue:42898|issue:44779", + "issue:30333|issue:33290" + ], + "prepared_review_unit_hash": "8c62155ed164d9133c330c2b4b7266b303c45239d7f5709fb81e717c2cbc0771", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36710, + "estimated_input_tokens": 9178, + "estimated_eval_tokens": 18612 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is highly heterogeneous: most items are distinct Transformers bug reports with only superficial overlap around imports, tokenizers, or save/load flows. I do not see any true duplicate pair among the soft candidates.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45310 is the best anchor if one must pick a representative issue: it is a concrete, version-specific regression report about Qwen3.5-MoE from_pretrained behavior, and it is the closest match to the strongest soft similarities in the set. That said, the cluster is not a real duplicate group.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the strongest standalone issue in this set: it has high discussion/inbound reference activity and a clear, actionable import regression symptom, making it a good representative issue even though it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "Different models and failures: VitPose breakage vs Gemma 4 multimodal token defaults." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "Same broad MoE area, but different code paths and symptoms: FP8 save_pretrained vs from_pretrained load error." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen MoE models, but they target different model generations and different loading/runtime issues." + }, + { + "left": "issue:42994", + "right": "issue:44164", + "accept": false, + "reason": "Both involve save/load, but one is quantized model saving and the other is extra_state handling; not the same bug." + }, + { + "left": "issue:42175", + "right": "issue:45712", + "accept": false, + "reason": "Unrelated packaging/testing issues: missing TensorFlow backend vs dummy PT objects leaking into repo checks." + }, + { + "left": "issue:43526", + "right": "issue:45561", + "accept": false, + "reason": "Completely different bugs: BeitImageProcessorFast label reduction vs xdist file race in testing utils." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "Different model behaviors: missing padding-mask support in MIMI encoder vs missing load balancing loss when router logits are off." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "Both are generation-related, but one is cache availability in chunked prefill and the other is attention-kernel divergence under torch.compile." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different symbols and unrelated modules." + }, + { + "left": "issue:44496", + "right": "issue:45561", + "accept": false, + "reason": "Unrelated: model config recognition failure vs pytest-xdist captured-info race." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Different issues: generation kwargs after sync_gpus vs a janus image-generation test failure." + }, + { + "left": "issue:43504", + "right": "issue:45561", + "accept": false, + "reason": "Different subsystems: BEiT preset loading legacy field vs a parallel test-file race." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "Sentence-Transformers loading hang and embedding reinitialization are unrelated." + }, + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer regressions, but one is cleanup-space behavior and the other is special-token insertion; not the same bug." + }, + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Different failures: Phi-4 multimodal load error vs DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version compatibility issue vs missing import from image_utils; unrelated." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "Different None-handling bugs in different layers: multimodal config defaults vs ModelOutput key assignment." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Different subsystems and symptoms: config initialization vs Whisper timestamp decode crash." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Packaging/backend dependency issue vs nonexistent image_utils import; no shared underlying bug." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Processor save_pretrained missing files is a different path from Qwen3.5-MoE model loading." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "Attention-kernel runtime divergence and a wrong docstring are not the same issue." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Both are import failures, but they involve different missing symbols and different packages/code paths." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Training/eval dtype drift is unrelated to the ZeRO3 deque error." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "Tokenizer loading in v5 and missing BOS/EOS addition are separate tokenizer regressions." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Different Qwen model issues: tied-weights warning logic vs missing tensor-parallel plan." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "Both are multimodal/model-init issues, but they affect different models and different failure mechanisms." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Serving KV-cache indexing and auto-docstring annotation handling are unrelated." + }, + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-related, but one concerns cleanup spaces and the other codec handling in Kimi-K2.5." + }, + { + "left": "issue:42898", + "right": "issue:44779", + "accept": false, + "reason": "Tokenizer regressions, but different models and different underlying tokenization failures." + }, + { + "left": "issue:30333", + "right": "issue:33290", + "accept": false, + "reason": "MLflow reporting on failed jobs is unrelated to DeepSpeed Adafactor OOM." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 35, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:30333", + "issue:30990", + "issue:34689", + "issue:35141", + "issue:36331", + "issue:38175", + "issue:41628", + "issue:42757", + "issue:42831", + "issue:42886", + "issue:42898", + "issue:42994", + "issue:43295", + "issue:43479", + "issue:43644", + "issue:43723", + "issue:43824", + "issue:44206", + "issue:44222", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44442", + "issue:44448", + "issue:44483", + "issue:44484", + "issue:44485", + "issue:44514", + "issue:44623", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44977", + "issue:44991", + "issue:45092", + "issue:45103", + "issue:45310", + "issue:45357", + "issue:45405", + "issue:45406", + "issue:45459", + "issue:45656", + "issue:45712" + ], + "soft_pairs": [ + "issue:45405|issue:45712", + "issue:42898|issue:44448", + "issue:44295|issue:45656", + "issue:44933|issue:45712", + "issue:41628|issue:45459", + "issue:42898|issue:43295", + "issue:44222|issue:44297", + "issue:44297|issue:45656", + "issue:43479|issue:43644", + "issue:42831|issue:44623", + "issue:44222|issue:44623", + "issue:44206|issue:45103", + "issue:22355|issue:28282", + "issue:41628|issue:42757", + "issue:44297|issue:45310", + "issue:30333|issue:36331", + "issue:44263|issue:45092", + "issue:44263|issue:44360", + "issue:44222|issue:44484", + "issue:43723|issue:44977", + "issue:30333|issue:34689", + "issue:44483|issue:45406", + "issue:28282|issue:30333", + "issue:30333|issue:35141", + "issue:44295|issue:44623", + "issue:44373|issue:44485", + "issue:44261|issue:45092", + "issue:44514|issue:45103", + "issue:43723|issue:44871", + "issue:45092|issue:45357", + "issue:43723|issue:44991", + "issue:43723|issue:44442", + "issue:43479|issue:44913", + "issue:30990|issue:38175", + "issue:42886|issue:42994", + "issue:41628|issue:43824", + "issue:42831|issue:45310" + ], + "prepared_review_unit_hash": "29c6c181e4bdaf9f6883a672401afc914351a3ae4fe0f7a4fd0ac74408ec8035", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38598, + "estimated_input_tokens": 9650, + "estimated_eval_tokens": 19556 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab-bag of unrelated Transformers issues: tokenizer/import regressions, model config bugs, save/load failures, and a few training/runtime problems. Only one soft edge looks like a likely duplicate pair within the tokenizer-loading regression family; the rest are too broad or hit different code paths.", + "confidence": 0.44, + "canonical_issue_reason": "Issue 43723 is the best representative of the only clearly related subset here: v5 tokenizer-loading regressions. It is broader than the model-specific reports, so it makes the most plausible anchor among the issues, though the overall cluster is not truly homogeneous.", + "canonical_pr_reason": null, + "best_issue_reason": "43723 is the most central issue for the tokenizer-loading problem space and can absorb a specific example like 44991 if a single duplicate anchor is needed.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45405", + "right": "issue:45712", + "accept": false, + "reason": "Unrelated topics: PEFT version pinning vs leftover dummy classes and repo checks." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "Both are version-related, but one is tokenization-space behavior and the other is a model output divergence; not the same concrete bug." + }, + { + "left": "issue:44295", + "right": "issue:45656", + "accept": false, + "reason": "Different subsystems: position_ids buffer reading vs optimizer stepping twice under deepspeed." + }, + { + "left": "issue:44933", + "right": "issue:45712", + "accept": false, + "reason": "Both involve exposed names, but one is an image_utils import issue and the other is dummy class leakage; different fixes." + }, + { + "left": "issue:41628", + "right": "issue:45459", + "accept": false, + "reason": "Different import failures in different areas: AutoImageProcessor export vs tokenizer error handling with protobuf." + }, + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "Related to v5 behavior changes, but the concrete failures differ: clean_up_tokenization_spaces vs processor.tokenizer/image passing regression." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "Both mention save_pretrained, but one is FP8 MoE model saving and the other is tokenizer metadata mismatch." + }, + { + "left": "issue:44297", + "right": "issue:45656", + "accept": false, + "reason": "Tokenizer save metadata vs optimizer stepping; no shared code-path." + }, + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Config initialization bug vs non-persistent buffer initialization bug; different objects and failure modes." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "Different saving problems: FineGrainedFP8 accuracy/saving vs processor.save_pretrained missing files." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "Both are save-related, but one is quantized model saving and the other is processor artifact completeness." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "Unrelated regressions: feature extractor argument handling vs auto-docstring AttributeError under future annotations." + }, + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Both are import errors, but one is transformers.onnx and the other is missing PyTorch for AutoModel; different dependency/path issues." + }, + { + "left": "issue:41628", + "right": "issue:42757", + "accept": false, + "reason": "Both are top-level import failures, but for different missing symbols from different packages." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Tokenizer save metadata mismatch vs a specific Qwen3.5 MoE from_pretrained regression; not the same bug." + }, + { + "left": "issue:30333", + "right": "issue:36331", + "accept": false, + "reason": "MLflow reporting on failed jobs vs Trainer compute_loss signature mismatch; unrelated training issues." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "Different GLM/InternVL problems: torch.split return handling vs remote-code checkpoint/meta-init incompatibility." + }, + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "Both are DSA-indexer related, but one is split return handling and the other is missing ReLU; not mergeable as one fix." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "Quantized model save failure vs a question about default shard size; not the same bug." + }, + { + "left": "issue:43723", + "right": "issue:44977", + "accept": false, + "reason": "Both are generation/loading regressions, but one is tokenizer loading and the other is flash-attention generation behavior for Qwen3.5." + }, + { + "left": "issue:30333", + "right": "issue:34689", + "accept": false, + "reason": "Both are regressions, but MLflow reporting and Llama model loading are unrelated code paths." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "Chat-completions request handling vs Gemma4Processor missing _tokenizer; different API layers." + }, + { + "left": "issue:28282", + "right": "issue:30333", + "accept": false, + "reason": "Missing PyTorch import dependency vs MLflow reporting after failed training; unrelated." + }, + { + "left": "issue:30333", + "right": "issue:35141", + "accept": false, + "reason": "Training-job reporting vs embedding resize/post_init reinitialization; no common bug." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "Position_ids buffer access vs processor save_pretrained missing files; unrelated." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Docstring wording vs RoPE implementation; no shared underlying code failure." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Layernorm epsilon/config precision issue vs remote-code meta-init incompatibility; different problems." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "Processor chat-template batching crash vs auto-docstring AttributeError; unrelated." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Tokenizer loading regression vs eos_token_id configuration inconsistency; separate model/tokenizer concerns." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Both concern Qwen/InternVL-era regressions, but one is meta initialization compatibility and the other is save_pretrained visual-key serialization." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": true, + "reason": "Both describe the same tokenizer-loading regression in v5; 44991 is a concrete model-specific instance of the broader AutoTokenizer.from_pretrained failure in 43723, and one fix could plausibly address both." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Both mention tokenizers, but one is AutoTokenizer loading and the other is a specific tokenizer class load failure for FastSpeech2ConformerTokenizer." + }, + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Config default initialization vs GPTNeoX rotary_pct persistence; distinct config bugs." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Stuck loading Sentence Transformers vs unexpected zero probabilities in SigLIP2; different model/runtime failures." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Offline cache tokenizer loading vs quantized model saving; unrelated." + }, + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "Different missing top-level exports: AutoImageProcessor vs Qwen2_5_VLForConditionalGeneration." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "FP8 accuracy/saving issue vs Qwen3.5 MoE from_pretrained regression; different failures." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The analysis is grounded in the packet and stays conservative overall. The one accepted soft edge (43723/44991) is reasonably supported by the titles as the same tokenizer-loading regression family, and the remaining pairwise rejections are cautious and code-path specific. The summary also appropriately notes that the cluster is mostly heterogeneous rather than a single bug." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 36, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:29127", + "issue:30990", + "issue:33357", + "issue:36296", + "issue:39401", + "issue:41628", + "issue:42371", + "issue:42757", + "issue:42831", + "issue:42886", + "issue:42907", + "issue:43066", + "issue:43116", + "issue:43122", + "issue:43232", + "issue:43408", + "issue:43475", + "issue:43576", + "issue:43756", + "issue:43784", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44060", + "issue:44117", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44485", + "issue:44488", + "issue:44514", + "issue:44521", + "issue:44625", + "issue:44704", + "issue:44743", + "issue:44779", + "issue:44829", + "issue:44945", + "issue:44964", + "issue:45200", + "issue:45245", + "issue:45278", + "issue:45290", + "issue:45593", + "issue:45600" + ], + "soft_pairs": [ + "issue:42831|issue:44038", + "issue:44263|issue:44373", + "issue:42757|issue:45278", + "issue:43232|issue:45593", + "issue:43756|issue:44060", + "issue:43576|issue:43784", + "issue:42886|issue:43066", + "issue:43122|issue:44521", + "issue:42371|issue:44263", + "issue:44117|issue:44704", + "issue:30990|issue:33357", + "issue:42907|issue:44945", + "issue:43976|issue:44945", + "issue:44295|issue:44297", + "issue:44704|issue:45290", + "issue:43122|issue:44514", + "issue:43232|issue:44743", + "issue:39401|issue:44488", + "issue:43408|issue:43475", + "issue:42907|issue:43937", + "issue:43232|issue:45245", + "issue:43784|issue:45600", + "issue:41628|issue:45278", + "issue:44360|issue:44485", + "issue:44261|issue:44373", + "issue:39401|issue:44521", + "issue:44625|issue:44829", + "issue:43116|issue:45200", + "issue:29127|issue:30990", + "issue:44038|issue:44297", + "issue:43122|issue:44779", + "issue:42371|issue:42831", + "issue:42907|issue:44964", + "issue:44038|issue:44295", + "issue:36296|issue:43116" + ], + "prepared_review_unit_hash": "38b3928678c7f5f23a17307ff9f99ee7a160dd634c48dc13df80fdf9b2f30132", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 37778, + "estimated_input_tokens": 9445, + "estimated_eval_tokens": 19146 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated standalone bugs/feature issues. A few are thematically close around imports, tokenization, or generation, but none look like the same concrete underlying bug. No PRs are present.", + "confidence": 0.72, + "canonical_issue_reason": "issue:45278 is the broadest umbrella regression in the set: multiple import failures after upgrading from 4.57.0 to 5.5.0. It is the best representative of the import-regression subset, even though the overall cluster is not a single duplicate group.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the best global issue candidate because it covers the widest user-facing failure mode among the issues shown and can reasonably subsume several import-related reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "Both are model-specific bugs, but one is an FP8 accuracy issue and the other is a Qwen3-VL-Moe loading/runtime problem. Different root causes." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "One is about a torch.split return-value bug in a DSA indexer; the other is a docstring problem for position_ids. Not the same issue." + }, + { + "left": "issue:42757", + "right": "issue:45278", + "accept": false, + "reason": "Both mention imports, but 42757 is a specific huggingface_hub symbol import failure while 45278 is a broader Transformers upgrade regression. Different bugs." + }, + { + "left": "issue:43232", + "right": "issue:45593", + "accept": false, + "reason": "Generation state handling after sync_gpus is unrelated to D-FINE auxiliary-loss behavior when denoising is disabled." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "Smollm3 RoPE-layer mismatch and Qwen3-Next tied-weights warning affect different models and code paths." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "A broken env CLI command is not the same as an import-time NameError in sentence-transformers." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Both are tokenizer-related, but one is offline cache loading and the other is decoder-type metadata in v5. Different failure modes." + }, + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Both involve tokenization/chat templates, but one is regression in tokenization output and the other is all-zero assistant masks for multimodal inputs." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "TF32 configuration guidance is unrelated to a torch.split indexing bug." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "One is a TOKENIZER_MAPPING_NAMES null-handling bug; the other is AutoProcessor not forwarding kwargs to cached_file. Related loading area, but different code-paths." + }, + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "Both are loading-related complaints, but stuck loading Sentence Transformers and a MacOS bus error on a CLIP model are different issues." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Saving dequantized models and pipeline-parallel incorrect outputs are unrelated bugs." + }, + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Python-version support breakage is unrelated to pipeline parallelism output corruption." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "A position_ids buffer read error and a tokenizer_class mismatch on save_pretrained are distinct serialization issues." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "Passing kwargs to cached_file is unrelated to apply_chat_template crashing on tool-call assistant messages." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "Both involve tokenization/chat-template behavior, but one is output regression and the other is a batched-input padding crash. Not the same bug." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both touch generation/cache logic, but they concern different mechanisms: sync_gpus kwargs vs Qwen3.5 recurrent-state reset." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Tokenizer offset_mapping mismatch and failure to load a specific model are unrelated." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "A model-type warning and a missing attribute on Sam3VisionEncoderOutput are separate SAM3 issues." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Dequantized-save failures and invalid GenerationConfig errors are unrelated." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Generation kwargs handling has nothing to do with the category-count runtime limit." + }, + { + "left": "issue:43784", + "right": "issue:45600", + "accept": false, + "reason": "An import NameError in sentence-transformers is unrelated to stale auto_mappings references breaking CI." + }, + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "Both are import regressions, but 41628 is a specific AutoImageProcessor export failure while 45278 is a broader post-upgrade import breakage. Not the same concrete bug." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "The DSA ReLU discussion and GLM-5 RoPE implementation are different model/backend topics." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "Missing rms_norm_eps in MLA q_a_layernorm is unrelated to a wrong docstring for position_ids." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Qwen3 offset_mapping and multimodal assistant_masks are different tokenizer/template issues." + }, + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "num_labels propagation in Qwen3.5 is unrelated to degenerate training with flash_attention_3." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Multi-label classification returning empty results in an example script is unrelated to Gemma 4 mm_token_type_ids defaults." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "LayoutLMv3 error messaging and Sentence Transformers loading hang are not the same bug." + }, + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "Qwen3-VL-Moe behavior and tokenizer_class mismatch on save_pretrained are unrelated." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions across versions, but they affect different models and likely different code paths. Too broad to merge." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 settings guidance and FineGrainedFP8 accuracy loss are different precision-related problems." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor parallel training bugs and a multi-label example script issue are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 37, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:33453", + "issue:38617", + "issue:39401", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:42757", + "issue:42907", + "issue:43064", + "issue:43116", + "issue:43122", + "issue:43278", + "issue:43441", + "issue:43452", + "issue:43493", + "issue:43673", + "issue:43784", + "issue:43824", + "issue:43825", + "issue:43827", + "issue:43874", + "issue:43931", + "issue:43937", + "issue:43976", + "issue:43992", + "issue:44038", + "issue:44075", + "issue:44117", + "issue:44222", + "issue:44291", + "issue:44351", + "issue:44485", + "issue:44704", + "issue:44779", + "issue:44936", + "issue:44945", + "issue:44977", + "issue:45071", + "issue:45072", + "issue:45083", + "issue:45216", + "issue:45278", + "issue:45446", + "issue:45478", + "issue:45563", + "issue:45588" + ], + "soft_pairs": [ + "issue:44038|issue:44222", + "issue:42907|issue:43452", + "issue:43824|issue:44351", + "issue:45216|issue:45478", + "issue:42757|issue:44351", + "issue:43992|issue:44704", + "issue:42907|issue:44222", + "issue:43278|issue:44945", + "issue:43122|issue:44977", + "issue:43784|issue:45446", + "issue:38617|issue:45278", + "issue:43937|issue:44485", + "issue:42673|issue:45072", + "issue:42222|issue:43441", + "issue:22355|issue:29127", + "issue:42371|issue:43874", + "issue:43064|issue:43673", + "issue:43116|issue:43493", + "issue:43976|issue:45446", + "issue:44038|issue:45478", + "issue:44222|issue:45478", + "issue:44075|issue:44936", + "issue:43116|issue:43825", + "issue:43937|issue:45563", + "issue:45083|issue:45446", + "issue:33453|issue:39401", + "issue:39401|issue:44779", + "issue:43931|issue:44945", + "issue:44117|issue:45588", + "issue:43827|issue:45446", + "issue:43937|issue:44977", + "issue:44291|issue:45071" + ], + "prepared_review_unit_hash": "06c8d924556da6a0a0e16ff60dc0ce9f8d224af52d23ab6c04f9a5d23478dc4b", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 36939, + "estimated_input_tokens": 9235, + "estimated_eval_tokens": 18726 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft edge looks like the same underlying bug/change. The set is a heterogeneous mix of import failures, tokenizer regressions, model-specific loading/saving issues, training problems, and docs/error-message fixes.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the cluster does not converge on one bug theme closely enough to represent as a duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good global representative; the broadest candidate is still just an umbrella import-error report, not the same problem as the rest.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Both mention MoE/Qwen, but one is a model loading bug and the other is FP8 save_pretrained behavior; different code paths." + }, + { + "left": "issue:42907", + "right": "issue:43452", + "accept": false, + "reason": "Different failures: saving dequantized Ministral/Devstral models vs gguf_file breaking AutoTokenizer/AutoModel loading." + }, + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and different causes." + }, + { + "left": "issue:45216", + "right": "issue:45478", + "accept": false, + "reason": "Same model family/version area, but one is save_pretrained checkpoint regression and the other is from_pretrained load failure." + }, + { + "left": "issue:42757", + "right": "issue:44351", + "accept": false, + "reason": "Missing is_offline_mode from huggingface_hub is unrelated to missing HybridCache from transformers." + }, + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "UMT5Encoder weight-loading issue and AutoProcessor cached_file kwargs forwarding are separate loading bugs." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both involve saving, but one is dequantized Ministral/Devstral output and the other is FP8 MoE save_pretrained." + }, + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Embedding dtype drift during evaluation is not the same as incorrect output from pipeline parallelism." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer-version behavior change is unrelated to a flash-attention generation failure." + }, + { + "left": "issue:43784", + "right": "issue:45446", + "accept": false, + "reason": "Import NameError in sentence-transformers setup is unrelated to a PyTorch version check bug in flex_attention." + }, + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "Generic 'many import errors' is too broad to be the same concrete bug as one specific missing import." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "GLM-5 generation config validation and GLM-5 RoPE implementation are different problems." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "VRAM leak in multi-threaded loading and bfloat16 dtype mismatch in inference are unrelated." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "Vitpose breakage and Ministral-3 FlashAttention failure affect different models and different failure points." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "ONNX import failure and LayoutLMv3 error-message clarity are unrelated issues." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "TF32 API settings guidance is not the same bug as a missing image-processor method." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Wrong optimizer states under FSDP2/PEFT training and missing cache in chunked_prefill are distinct bugs." + }, + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "Example-script multi-label output bug and SigLIP2 HF-vs-JAX discrepancy are different code paths." + }, + { + "left": "issue:43976", + "right": "issue:45446", + "accept": false, + "reason": "Python version compatibility breakage is unrelated to an AuxRequest import version-check bug." + }, + { + "left": "issue:44038", + "right": "issue:45478", + "accept": false, + "reason": "Both are Qwen MoE-related, but one is a VL-Moe loading issue and the other is a Qwen3.5 from_pretrained error." + }, + { + "left": "issue:44222", + "right": "issue:45478", + "accept": false, + "reason": "FP8 save_pretrained for MoE and Qwen3.5 from_pretrained are different operations and likely different defects." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "SGD args not used and trainer.evaluate failing after train are not the same underlying trainer bug." + }, + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "A classification example returning empty results is unrelated to a pipeline() deprecation/error-message issue." + }, + { + "left": "issue:43937", + "right": "issue:45563", + "accept": false, + "reason": "Invalid GenerationConfig and a stale warning for num_return_sequences are different warning/validation paths." + }, + { + "left": "issue:45083", + "right": "issue:45446", + "accept": false, + "reason": "Qwen3_omni_moe helper behavior and flex_attention import version checking are unrelated." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Tokenizer loading regression is too broad to be the same as a specific Qwen3 offset_mapping bug." + }, + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Different tokenizer models and symptoms: Qwen3 offset mapping versus Deepseek tokenization drift." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Model weight shape mismatch on load is not the same as incorrect outputs under pipeline parallelism." + }, + { + "left": "issue:44117", + "right": "issue:45588", + "accept": false, + "reason": "Tokenizer mapping fallback bug and flash_attention crash on s_aux=None are unrelated." + }, + { + "left": "issue:43827", + "right": "issue:45446", + "accept": false, + "reason": "Docs still referencing pipeline() and a PyTorch version-check bug do not share the same underlying code path." + }, + { + "left": "issue:43937", + "right": "issue:44977", + "accept": false, + "reason": "GLM-5 GenerationConfig invalidity and Qwen3.5 flash-attention generation failure are different model-specific bugs." + }, + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "init_empty_weights argument handling and PretrainedConfig type-checking are separate compatibility issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 38, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:38617", + "issue:39401", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42371", + "issue:42617", + "issue:42673", + "issue:43012", + "issue:43066", + "issue:43097", + "issue:43122", + "issue:43408", + "issue:43421", + "issue:43450", + "issue:43577", + "issue:43582", + "issue:43673", + "issue:43742", + "issue:43874", + "issue:43906", + "issue:43937", + "issue:43986", + "issue:44075", + "issue:44117", + "issue:44220", + "issue:44261", + "issue:44410", + "issue:44483", + "issue:44484", + "issue:44493", + "issue:44496", + "issue:44829", + "issue:44933", + "issue:45081", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45341", + "issue:45405", + "issue:45446", + "issue:45459", + "issue:45464", + "issue:45478", + "issue:45542", + "issue:45561", + "issue:45656" + ], + "soft_pairs": [ + "issue:45405|issue:45446", + "issue:45341|issue:45561", + "issue:43450|issue:43986", + "issue:42673|issue:43906", + "issue:44829|issue:45216", + "issue:44410|issue:45310", + "issue:38617|issue:41628", + "issue:39401|issue:43421", + "issue:44117|issue:45081", + "issue:44484|issue:45310", + "issue:42371|issue:44261", + "issue:42222|issue:45542", + "issue:43408|issue:43742", + "issue:42673|issue:43582", + "issue:43937|issue:44496", + "issue:42175|issue:42222", + "issue:42673|issue:43577", + "issue:38617|issue:44220", + "issue:44933|issue:45278", + "issue:42617|issue:45341", + "issue:43012|issue:43577", + "issue:44075|issue:45656", + "issue:44483|issue:45464", + "issue:43066|issue:43122", + "issue:43673|issue:45341", + "issue:44410|issue:45478", + "issue:43874|issue:44261", + "issue:44484|issue:45478", + "issue:43097|issue:44493", + "issue:42222|issue:45459" + ], + "prepared_review_unit_hash": "74f5b387c4bfc2e2c47879f5eb8659ef2b2c83beb01616c6aaeeeaf2d6c87b33", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 35922, + "estimated_input_tokens": 8981, + "estimated_eval_tokens": 18218 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a loose collection of unrelated Transformers issues. The soft pairs are thematically similar in a few cases, but none are strong enough to be treated as the same underlying bug/change.", + "confidence": 0.79, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the broadest representative symptom in this set: a generic post-upgrade import regression affecting many symbols.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45405", + "right": "issue:45446", + "accept": false, + "reason": "Different problems: a PEFT version pin issue vs a PyTorch version check bug in flex_attention." + }, + { + "left": "issue:45341", + "right": "issue:45561", + "accept": false, + "reason": "Same file area, but different bugs: a testing_utils bug vs an xdist race on captured_info.txt." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "One is batched video processor shape handling; the other is an AutoProcessor crash without torchvision." + }, + { + "left": "issue:42673", + "right": "issue:43906", + "accept": false, + "reason": "No clear same bug: a Qwen3 VRAM leak vs an isolated reproduction of another issue." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "Different code paths: flash_attention_3 training degeneration vs a Qwen3.5 save_pretrained checkpoint regression." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "Different Qwen variants and failures: missing projections in qwen3next vs from_pretrained error in Qwen3.5 MoE." + }, + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and different code paths." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Tokenizer offset mapping bug vs runtime special-token/post-processor sync bug; not the same defect." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "Different tokenizer failures: a None mapping assumption vs a Mistral regex patch crash." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "Unrelated: save_pretrained shard-size question vs a Qwen3.5 MoE from_pretrained error." + }, + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 API guidance is unrelated to the MLA q_a_layernorm precision/config issue." + }, + { + "left": "issue:42222", + "right": "issue:45542", + "accept": false, + "reason": "Different backend problems: vitpose model breakage vs TensorFlow missing from a torch-only install." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "Different model-loading issues: sam3_video/sam3_tracker mismatch vs MobileLLM key error." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Backend packaging issue vs a broken vitpose model family; not the same bug." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "Qwen3 VRAM leak and Apple Silicon TypeError in caching_allocator_warmup are unrelated." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "GenerationConfig validation failure vs unrecognized model/config.json error." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "VRAM leak in dataloader threads is not the same as dtype remaining float32 on BLIP2 load." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "An import-name regression is unrelated to _torch_extract_fbank_features() behavior." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "A specific missing import is not enough to merge with a broader many-import-errors regression." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "Not the same area or bug: 3d_parallel.py failure vs a small testing_utils issue." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "Both touch dtype/precision, but one is a warning during compile and the other is incorrect model dtypes on load." + }, + { + "left": "issue:44075", + "right": "issue:45656", + "accept": false, + "reason": "Different optimizer defects: SGD args ignored vs optimizer.step being called twice with deepspeed." + }, + { + "left": "issue:44483", + "right": "issue:45464", + "accept": false, + "reason": "A chat/completions request rejection is not the same as a streaming inference failure on Qwen3.5-0.8B." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both tokenizer regressions, but one is a decoder-type mismatch and the other is changed tokenization output." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "GenerationMixin cache regression is unrelated to a testing_utils bug." + }, + { + "left": "issue:44410", + "right": "issue:45478", + "accept": false, + "reason": "Different Qwen3.5/Qwen3-next failures; no evidence they share one concrete code path." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "Image-patch counting bug vs MLA rms_norm_eps precision issue; unrelated." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "save_pretrained shard-size discussion is unrelated to a Qwen3.5 MoE loading error." + }, + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Removed tie_embeddings API vs unexpected position-id key warnings are different regressions." + }, + { + "left": "issue:42222", + "right": "issue:45459", + "accept": false, + "reason": "Vitpose model breakage and protobuf-related tokenizer error hiding are unrelated defects." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 39, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42757", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43097", + "issue:43335", + "issue:43352", + "issue:43408", + "issue:43441", + "issue:43452", + "issue:43502", + "issue:43531", + "issue:43824", + "issue:43825", + "issue:43873", + "issue:43874", + "issue:43906", + "issue:43931", + "issue:43976", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44062", + "issue:44117", + "issue:44261", + "issue:44263", + "issue:44315", + "issue:44351", + "issue:44368", + "issue:44380", + "issue:44410", + "issue:44484", + "issue:44485", + "issue:44655", + "issue:44704", + "issue:44829", + "issue:44987", + "issue:45003", + "issue:45071", + "issue:45103", + "issue:45245", + "issue:45341", + "issue:45372", + "issue:45375", + "issue:45542", + "issue:45588" + ], + "soft_pairs": [ + "issue:44263|issue:44485", + "issue:43441|issue:44829", + "issue:43012|issue:43408", + "issue:41628|issue:45071", + "issue:45071|issue:45542", + "issue:43931|issue:44410", + "issue:43986|issue:44484", + "issue:45003|issue:45341", + "issue:43931|issue:44038", + "issue:43986|issue:45341", + "issue:43335|issue:45071", + "issue:44117|issue:44987", + "issue:43097|issue:43452", + "issue:43874|issue:44263", + "issue:43408|issue:44368", + "issue:43976|issue:45071", + "issue:43531|issue:45341", + "issue:44829|issue:45245", + "issue:43986|issue:45372", + "issue:38617|issue:42757", + "issue:44261|issue:44485", + "issue:43064|issue:44315", + "issue:44410|issue:45375", + "issue:44038|issue:44410", + "issue:38617|issue:44351", + "issue:44062|issue:44117", + "issue:43097|issue:45588", + "issue:44704|issue:45103", + "issue:43502|issue:43992", + "issue:43986|issue:44117", + "issue:43824|issue:43986", + "issue:43097|issue:43873", + "issue:43066|issue:43906", + "issue:43352|issue:44380", + "issue:43825|issue:44655", + "issue:43992|issue:44484", + "issue:38617|issue:43097", + "issue:43012|issue:44368" + ], + "prepared_review_unit_hash": "c5b8db053f72d16ccd60ce79e1893474d2a5304d9ebdca737ac0f5777d35d1fe", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 38, + "serialized_chars": 39092, + "estimated_input_tokens": 9773, + "estimated_eval_tokens": 19802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly distinct regressions and import/runtime issues across different models, backends, and utilities; I did not find any soft-edge pair that looks like the same underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43502 is the most broadly relevant and well-documented regression in the set, with substantial discussion and references around a clear loading behavior problem.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43502 has the clearest reproduction and strongest user impact, making it the best representative issue despite the cluster being mostly unrelated.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44263", + "right": "issue:44485", + "accept": false, + "reason": "Different GLM-related problems: one is a torch.split return-value issue, the other is a RoPE implementation question." + }, + { + "left": "issue:43441", + "right": "issue:44829", + "accept": false, + "reason": "Both involve attention backends, but they affect different models and failure modes; not the same code-path bug." + }, + { + "left": "issue:43012", + "right": "issue:43408", + "accept": false, + "reason": "Unrelated symptoms: a bfloat16 precision warning versus a wrong model-type warning during SAM loading." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "An import failure for AutoImageProcessor is unrelated to PretrainedConfig type-checking behavior." + }, + { + "left": "issue:45071", + "right": "issue:45542", + "accept": false, + "reason": "One is a type-checking regression; the other is a TensorFlow/backend detection error." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Different Qwen model families and different loading failures: shape mismatch versus missing projections." + }, + { + "left": "issue:43986", + "right": "issue:44484", + "accept": false, + "reason": "AutoProcessor/torchvision crash is unrelated to save_pretrained shard-size behavior." + }, + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "No evidence these are the same bug; one concerns sys.modules access, the other a testing utility issue." + }, + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "Both are Qwen-related, but the concrete failures and affected model variants differ." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "Different subsystems and different failure classes; not a duplicate." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "A SwitchTransformers config bug is unrelated to PretrainedConfig typing changes." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "Tokenizer mapping None-handling and a physical-intelligence model load failure are not the same underlying issue." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "One is a removed config option warning, the other is a gguf tokenizer/model loading breakage." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "Different code paths: multimodal token counting versus GlmMoeDsaIndexer splitting behavior." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "Both are warning-related, but they concern different model families and different configuration checks." + }, + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "Python-version compatibility and config type-checking are unrelated regressions." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "A sliding_window model bug is unrelated to a testing utility issue." + }, + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "FlashAttention training degeneration and a category-count runtime limit are unrelated." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "Missing torchvision for video models is unrelated to a mistral_common import breakage." + }, + { + "left": "issue:38617", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but they come from different packages and different missing symbols." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "A missing rms_norm_eps config field is not the same as a RoPE implementation discussion." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "These are different training-stack issues: FSDP2/optimizer-state corruption versus Liger Kernel application." + }, + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Different Qwen vision config/model issues with different missing fields and failure modes." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "Both mention Qwen3 variants, but one is a general v5/moe bug and the other is missing layer projections." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "Different import errors involving different missing names and modules." + }, + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "A token construction TypeError is unrelated to tokenizer mapping assumptions." + }, + { + "left": "issue:43097", + "right": "issue:45588", + "accept": false, + "reason": "A removed embedding-tie option warning is unrelated to flash_attention sink handling." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "Both touch argument handling, but one is AutoProcessor kwargs propagation and the other is auto_docstring annotation parsing." + }, + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "Local-files-only network requests and a missing embed_tokens weight are different loading bugs." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "Different loader paths and error causes; not the same bug." + }, + { + "left": "issue:43824", + "right": "issue:43986", + "accept": false, + "reason": "An import error for a model class is not the same as a torchvision-dependent AutoProcessor crash." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "A removed config option warning is unrelated to quantization/offloading behavior." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same concrete problem." + }, + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "FlashAttention support gating for Nemotron and GPT2 attention scaling under SDPA/FlashAttention are different backend bugs." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "Pipeline translation-task messaging and Pipeline save_pretrained support are unrelated." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "A missing encoder weight during load is unrelated to the default shard-size choice in save_pretrained." + }, + { + "left": "issue:38617", + "right": "issue:43097", + "accept": false, + "reason": "A missing configuration import and a removed tie-embeddings option are different issues." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "A precision warning during compilation is unrelated to a tie_word_embeddings warning during fine-tuning." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 40, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:36010", + "issue:36683", + "issue:38617", + "issue:42757", + "issue:42886", + "issue:42907", + "issue:42994", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43208", + "issue:43299", + "issue:43352", + "issue:43381", + "issue:43475", + "issue:43502", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43582", + "issue:43618", + "issue:43756", + "issue:43761", + "issue:43824", + "issue:43867", + "issue:43901", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44077", + "issue:44188", + "issue:44220", + "issue:44230", + "issue:44263", + "issue:44380", + "issue:44483", + "issue:44485", + "issue:44492", + "issue:44514", + "issue:44655", + "issue:44683", + "issue:44938", + "issue:45216", + "issue:45290", + "issue:45381", + "issue:45440", + "issue:45479", + "issue:45593" + ], + "soft_pairs": [ + "issue:43012|issue:43550", + "issue:42886|issue:43381", + "issue:43208|issue:43550", + "issue:42907|issue:44655", + "issue:44380|issue:45479", + "issue:43352|issue:43976", + "issue:43064|issue:44380", + "issue:44483|issue:44514", + "issue:43618|issue:44380", + "issue:43208|issue:43526", + "issue:36683|issue:43582", + "issue:43618|issue:43761", + "issue:42757|issue:43824", + "issue:43012|issue:44683", + "issue:44038|issue:44230", + "issue:44483|issue:45290", + "issue:43299|issue:43867", + "issue:44483|issue:45381", + "issue:38617|issue:43502", + "issue:43756|issue:44485", + "issue:43901|issue:44655", + "issue:36683|issue:43475", + "issue:43867|issue:45216", + "issue:43352|issue:43937", + "issue:42994|issue:44655", + "issue:44220|issue:44655", + "issue:44077|issue:44380", + "issue:36683|issue:44077", + "issue:43066|issue:45381", + "issue:44263|issue:45440", + "issue:43976|issue:44938", + "issue:36010|issue:42757", + "issue:36683|issue:43525", + "issue:43064|issue:44492", + "issue:43352|issue:45593", + "issue:43618|issue:45216", + "issue:44188|issue:44380" + ], + "prepared_review_unit_hash": "4a9b8094710edd79e17020882d0edee5a409009903cd0ceeaecebecf7f9d0f66", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38401, + "estimated_input_tokens": 9601, + "estimated_eval_tokens": 19458 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is highly heterogeneous: most items are unrelated regressions, import errors, model-specific bugs, and API/documentation issues. The soft-similarity links mostly reflect shared vocabulary rather than the same underlying defect.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue fits this cluster; the items are too diverse to represent one duplicate family.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative because there is no single underlying bug or change spanning the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "Different problems: a PyTorch warning during compile vs a Bamba torch.compile+SDPA failure." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Both mention runtime behavior, but one is offline cache loading and the other is gradient checkpointing in eval mode." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated model/training bugs; only the broad topic of torch.compile overlaps." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Both involve saving, but one is dequantized model saving and the other is saving Pipeline objects." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "Different code paths: attention scaling backend behavior vs sequence-classification zero-loss bug." + }, + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "A model backend support issue is not the same as a Python-version compatibility failure." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "FSDP2 optimizer state corruption is unrelated to GPT2 attention scaling under SDPA/FlashAttention." + }, + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "Both are chat-related, but one is `/v1/chat/completions` request acceptance and the other is apply_chat_template batching/padding crash." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "CLIP attentions export is unrelated to GPT2 attention scaling behavior." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "Different xLSTM training bugs vs a BEiT image processor label-reduction bug." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "Gemma3Config missing vocab_size is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": false, + "reason": "Both touch CLIP, but one is missing attentions assignment and the other is a CLIPVisionModel hidden_states regression." + }, + { + "left": "issue:42757", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different missing symbols in different modules." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "A warning about bfloat16 precision is not the same as flex_attention compilation failing on torch>=2.9." + }, + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "Same Qwen3-VL family, but one is model loading breakage and the other is fp8 inference support." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Both involve chat templates/APIs, but they are different failure modes with different inputs." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Qwen3VL loading failure is not the same as a state_dict sorting load error." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "Chat/completions request handling is unrelated to Qwen2.5-VL video vision_position_ids placement." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "Importing layer_type_validation and making API requests despite local_files_only are distinct issues." + }, + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Different model families and different RoPE concerns; not the same bug." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Documentation mismatch for return_all_scores is unrelated to pipeline save_pretrained failure." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Different missing attributes in different model/config paths." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "General load-model error from state_dict ordering is not the same as a Qwen3.5 save_pretrained regression." + }, + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "Model support for Flash Attention 2 and invalid GenerationConfig are unrelated." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Both are save-related, but one is quantized model saving and the other is pipeline serialization." + }, + { + "left": "issue:44220", + "right": "issue:44655", + "accept": false, + "reason": "Audio feature extraction bug is unrelated to pipeline save_pretrained." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "patchtsmixer post_init validation and GPT2 attention scaling are different code paths." + }, + { + "left": "issue:36683", + "right": "issue:44077", + "accept": false, + "reason": "Gemma3Config vocab_size and patchtsmixer post_init are unrelated model-specific issues." + }, + { + "left": "issue:43066", + "right": "issue:45381", + "accept": false, + "reason": "Tokenizer decoder type regression is not the same as Qwen2.5-VL video position ids." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "Different MoE/architecture bugs; no shared concrete failure." + }, + { + "left": "issue:43976", + "right": "issue:44938", + "accept": false, + "reason": "Python-version compatibility for Transformers 5.1.0 is unrelated to a Python 3.14 load failure." + }, + { + "left": "issue:36010", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but they concern different missing names from different packages." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Different missing-config-attribute bugs in different model families." + }, + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "FSDP optimizer state corruption has nothing to do with a cache-strategy typo." + }, + { + "left": "issue:43352", + "right": "issue:45593", + "accept": false, + "reason": "Flash Attention support for Nemotron is unrelated to D-FINE auxiliary-loss handling." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "CLIP attentions regression is unrelated to Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Both involve attention/backends, but one is kernel divergence under torch.compile and the other is GPT2 scaling being ignored." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 41, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:28282", + "issue:33453", + "issue:36296", + "issue:36683", + "issue:41669", + "issue:42548", + "issue:42890", + "issue:43010", + "issue:43064", + "issue:43352", + "issue:43502", + "issue:43575", + "issue:43618", + "issue:43723", + "issue:43867", + "issue:43931", + "issue:43976", + "issue:43992", + "issue:44062", + "issue:44075", + "issue:44077", + "issue:44112", + "issue:44222", + "issue:44242", + "issue:44246", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44387", + "issue:44484", + "issue:44556", + "issue:44855", + "issue:44857", + "issue:44938", + "issue:44964", + "issue:45071", + "issue:45125", + "issue:45278", + "issue:45310", + "issue:45341", + "issue:45357", + "issue:45375", + "issue:45381", + "issue:45440", + "issue:45478", + "issue:45561", + "issue:45588", + "issue:45600" + ], + "soft_pairs": [ + "issue:36683|issue:44242", + "issue:43352|issue:43931", + "issue:43010|issue:45278", + "issue:43992|issue:45310", + "issue:41669|issue:44246", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:44355|issue:44855", + "issue:42890|issue:45561", + "issue:44938|issue:45071", + "issue:43867|issue:45357", + "issue:43992|issue:45478", + "issue:43010|issue:43976", + "issue:44938|issue:45381", + "issue:44360|issue:45440", + "issue:43618|issue:44077", + "issue:36296|issue:43575", + "issue:43867|issue:44484", + "issue:43618|issue:44857", + "issue:43618|issue:45588", + "issue:28282|issue:33453", + "issue:42890|issue:44964", + "issue:42890|issue:44112", + "issue:42890|issue:45600", + "issue:43502|issue:44062", + "issue:43723|issue:44556", + "issue:43010|issue:44075", + "issue:44222|issue:45125", + "issue:42890|issue:45341", + "issue:44387|issue:44556", + "issue:42548|issue:44351" + ], + "prepared_review_unit_hash": "ab5815d125c51baa584e114c3312573102be1dd2343d004b954970af066b0244", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 36522, + "estimated_input_tokens": 9131, + "estimated_eval_tokens": 18518 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This looks like a loose cluster of Transformers v5 regression reports, mostly import/loading/config/model-specific failures. The soft pairs share broad themes, but they do not appear to be the same underlying bug, so I would reject them all as duplicates.", + "confidence": 0.9, + "canonical_issue_reason": "issue:45278 is the best umbrella issue: it explicitly frames the problem as many import errors after upgrading from 4.57.0 to 5.5.0, which matches the broad cluster theme better than the narrower, model-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the most representative and general of the set; it captures the version-upgrade/import-regression theme without overfitting to one model or subsystem.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "Gemma3Config vocab_size missing and MoE load-balancing loss omission are unrelated bugs in different code paths." + }, + { + "left": "issue:43352", + "right": "issue:43931", + "accept": false, + "reason": "Flash Attention 2 support error for NemotronH and Qwen3-VL weight-shape mismatch are distinct model-specific load failures." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Missing no_grad on cache/layer update is a functional implementation bug, not the same as broad import errors after upgrade." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "UMT5Encoder missing embed_tokens.weight and Qwen3.5 MoE from_pretrained failure are different checkpoint-loading issues." + }, + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "Import * slowdown and occasional slow import are both import-performance topics, but the underlying causes are not shown to be the same." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "State_dict sorting load error and missing deepstack_visual_indexes are separate config/state serialization problems." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "Gemma3Config attribute missing and wrong optimizer states under FSDP2/PEFT are unrelated training/config bugs." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "Compiled Python file errors and PyTorch script parsing on Python 3.13 are different import/runtime failures." + }, + { + "left": "issue:42890", + "right": "issue:45561", + "accept": false, + "reason": "Missing set_seed in a flaky model test is unrelated to xdist races on captured_info.txt." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Python 3.14 load failure and PretrainedConfig type-checking regression are both version-related, but not the same defect." + }, + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "State_dict sorting load error and incorrect Qwen3.5 save_pretrained visual keys are separate serialization regressions." + }, + { + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "Missing embed_tokens.weight in UMT5Encoder and Qwen3.5 MoE from_pretrained errors involve different models and failure modes." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "no_grad decoration on cache/layer update and Python 3.9/3.10 support issue are unrelated." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Python 3.14 import failure and Qwen2.5-VL video vision_position_ids bug are different problems." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "DSA indexer ReLU complaint and DeepSeekV3 implementation divergence are not the same code-path defect." + }, + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "CLIPOutput attentions field regression and patchtsmixer post_init policy change are unrelated model API issues." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Tensor parallel training bug and Qwen2-57B-A14B-Instruct TP OOM are both TP-related, but not the same concrete failure." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "State_dict sorting load error and max_shard_size default question are different save/load behaviors." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "Missing CLIPOutput attentions assignment and LwDetrImageLoss AMP crash are separate bugs." + }, + { + "left": "issue:43618", + "right": "issue:45588", + "accept": false, + "reason": "CLIPOutput attentions regression and flash_attention.py crash on s_aux=None are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "PyTorch missing ImportError and tokenizer loading regression are both import/load related only at a high level; the underlying causes differ." + }, + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Flaky SamHQ test and Phi-4-multimodal-instruct loading error are unrelated." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Seed-related test flakiness and GraniteSpeech stale device override failure are distinct CI/test bugs." + }, + { + "left": "issue:42890", + "right": "issue:45600", + "accept": false, + "reason": "Missing set_seed in tests and removed Sam3LiteText configs breaking auto_mappings are not the same defect." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "local_files_only still making API requests and AddedToken duplicate special kwarg are different networking/tokenizer bugs." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Tokenizer loading regression in v5 and checkpoint reload failure after upgrading are broader version regressions, but not the same issue." + }, + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "Cache/layer update no_grad bug and unused SGD args are separate optimizer/training issues." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "FP8 save_pretrained MoE issue and missing _tp_plan for Qwen3_5Moe are different model infrastructure problems." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "Flaky missing set_seed tests and a bug in testing_utils.py are related to test infrastructure, but not the same concrete bug." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "Int4 quantization memory regression and reload failure after upgrading are different runtime regressions." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "cannot import PreTrainedModel and cannot import HybridCache are separate symbol/export regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 42, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:30333", + "issue:30990", + "issue:31795", + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38617", + "issue:41628", + "issue:42548", + "issue:42757", + "issue:42831", + "issue:43010", + "issue:43065", + "issue:43232", + "issue:43299", + "issue:43502", + "issue:43526", + "issue:43576", + "issue:43673", + "issue:43824", + "issue:43827", + "issue:43874", + "issue:43901", + "issue:44222", + "issue:44230", + "issue:44261", + "issue:44273", + "issue:44315", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44655", + "issue:44749", + "issue:44908", + "issue:44961", + "issue:45003", + "issue:45071", + "issue:45125", + "issue:45230", + "issue:45375", + "issue:45468", + "issue:45478", + "issue:45542" + ], + "soft_pairs": [ + "issue:43010|issue:43065", + "issue:45125|issue:45478", + "issue:43010|issue:43232", + "issue:43502|issue:44485", + "issue:45125|issue:45375", + "issue:30990|issue:31795", + "issue:44355|issue:44749", + "issue:30333|issue:33453", + "issue:44315|issue:44360", + "issue:43010|issue:44368", + "issue:43502|issue:44222", + "issue:43502|issue:44961", + "issue:43299|issue:44556", + "issue:44355|issue:44655", + "issue:44222|issue:44230", + "issue:42831|issue:43874", + "issue:30333|issue:36296", + "issue:44355|issue:45071", + "issue:33453|issue:34689", + "issue:42757|issue:43576", + "issue:43827|issue:44961", + "issue:33453|issue:36296", + "issue:43576|issue:45071", + "issue:43576|issue:43824", + "issue:43673|issue:44492", + "issue:44961|issue:45468", + "issue:38617|issue:43576", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:41628|issue:42548", + "issue:44908|issue:44961", + "issue:44485|issue:44961", + "issue:36296|issue:43901", + "issue:41628|issue:44355", + "issue:36296|issue:43526", + "issue:44261|issue:44360", + "issue:44273|issue:45003", + "issue:44623|issue:45230", + "issue:44273|issue:45542", + "issue:43576|issue:44556" + ], + "prepared_review_unit_hash": "bb92b5a8b9dde4bf604b9d99a07d9b185c896de88e8536caa830cbeeb24409d4", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 40, + "serialized_chars": 39027, + "estimated_input_tokens": 9757, + "estimated_eval_tokens": 19770 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is broad and mostly consists of unrelated Transformers v5 regressions, import errors, and model-specific bugs. I did not find any soft pair that clearly describes the same underlying bug/change closely enough to merge.", + "confidence": 0.92, + "canonical_issue_reason": "issue:42548 is the broadest, most central user-facing regression here: a core `PreTrainedModel` import failure with active discussion and strong overlap with the cluster\u2019s repeated import/load breakage theme.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42548 best represents the cluster because it is a high-impact, general import regression affecting core APIs rather than a narrow model-specific edge case.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "Different bugs: cache/layer `update()` no_grad decoration vs a dummy `nn.Conv2d` in Sam3PixelDecoder." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3.5 MoE, but one is missing `_tp_plan` and the other is a broader `from_pretrained` failure; not clearly the same fix." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Generation/cache-related, but the concrete failure modes and code paths differ." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Unrelated: local file access/network requests vs GLM-5 RoPE implementation." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Both are Qwen3.5 MoE-related, but one is tensor-parallel plan support and the other is a config field silently dropped; different underlying issues." + }, + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Sentence Transformers loading hang is unrelated to a documentation issue about forward arguments." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Compiled-file errors are unrelated to a performance regression after upgrade." + }, + { + "left": "issue:30333", + "right": "issue:33453", + "accept": false, + "reason": "MLFlow job-status reporting bug is unrelated to tokenizer loading regression." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Different subsystems and bugs: Liger kernel application vs DSA indexer/ReLU." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "Cache/no_grad decoration bug is unrelated to a tied-embeddings warning in Qwen3.5 LoRA." + }, + { + "left": "issue:43502", + "right": "issue:44222", + "accept": false, + "reason": "Offline-mode API calls vs FP8 `save_pretrained` on MoE are different code paths." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "No relation between network requests despite `local_files_only=True` and the generic 'racoon' issue." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Different version windows and different symptoms for model loading; not the same regression." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Compiled Python file errors are unrelated to pipeline `save_pretrained` support." + }, + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "One is saving FP8 MoE checkpoints; the other is inference support for Qwen3-VL/Qwen3.5 FP8." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "FP8 accuracy issue is unrelated to a missing image-patch helper in GLM46V image processing." + }, + { + "left": "issue:30333", + "right": "issue:36296", + "accept": false, + "reason": "MLFlow reporting failure is unrelated to tensor-parallel training." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "Compiled-file runtime errors are unrelated to `PretrainedConfig` type-checking regression." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Tokenizer loading regression and Llama 3.2 Vision model loading failure are different concrete problems." + }, + { + "left": "issue:42757", + "right": "issue:43576", + "accept": false, + "reason": "Both are v5 regressions, but one is a missing `huggingface_hub` import and the other is a broken `transformers env` command." + }, + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Documentation references to `pipeline()` are unrelated to the 'racoon' issue." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading vs tensor-parallel training are separate bugs." + }, + { + "left": "issue:43576", + "right": "issue:45071", + "accept": false, + "reason": "CLI environment command breakage is unrelated to `PretrainedConfig` typing regressions." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "Different failures: broken env command vs missing Qwen2.5-VL export." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "Generation cache missing in chunked prefill is unrelated to a cache-strategy typo." + }, + { + "left": "issue:44961", + "right": "issue:45468", + "accept": false, + "reason": "No clear relation between the generic 'racoon' issue and Gemma-4 audio positional encoding." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "Importing `layer_type_validation` and a broken env CLI are different issues." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Documentation confusion about forward args is unrelated to an import error." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "Broken env command and missing `HybridCache` import are separate regressions." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "Both are import errors, but for different symbols and likely different fixes." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Scheduler kwargs bug is unrelated to the generic 'racoon' issue." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "GLM-5 RoPE implementation and 'racoon' are unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "Tensor-parallel training bug is unrelated to docs mentioning `return_all_scores`." + }, + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "Different failures: missing `AutoImageProcessor` import vs compiled-file runtime errors." + }, + { + "left": "issue:36296", + "right": "issue:43526", + "accept": false, + "reason": "Tensor-parallel training bug is unrelated to BeitImageProcessorFast label reduction." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "MLA RMS norm precision issue is unrelated to a DSA indexer ReLU concern." + }, + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Lazy loading failure and unsafe `sys.modules` access are not the same bug." + }, + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "Processor `save_pretrained` missing files is unrelated to a generic bug report." + }, + { + "left": "issue:44273", + "right": "issue:45542", + "accept": false, + "reason": "Lazy loading bug is unrelated to a TensorFlow backend/package-install issue." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "Broken env CLI and checkpoint reload failures after v5.2/v5.3 are different problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 43, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:31515", + "issue:31795", + "issue:33453", + "issue:34689", + "issue:41669", + "issue:42371", + "issue:42890", + "issue:43352", + "issue:43408", + "issue:43502", + "issue:43519", + "issue:43576", + "issue:43606", + "issue:43673", + "issue:43704", + "issue:43723", + "issue:43827", + "issue:43976", + "issue:44038", + "issue:44077", + "issue:44246", + "issue:44261", + "issue:44263", + "issue:44273", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44351", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44655", + "issue:44704", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:45071", + "issue:45092", + "issue:45125", + "issue:45230", + "issue:45310", + "issue:45341", + "issue:45468", + "issue:45478" + ], + "soft_pairs": [ + "issue:44038|issue:45125", + "issue:41669|issue:43673", + "issue:44393|issue:45071", + "issue:44556|issue:45092", + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44704|issue:44908", + "issue:44246|issue:44273", + "issue:43408|issue:44315", + "issue:43976|issue:44393", + "issue:43352|issue:44393", + "issue:43576|issue:44908", + "issue:44297|issue:45230", + "issue:44908|issue:45071", + "issue:44295|issue:45230", + "issue:41669|issue:44351", + "issue:41669|issue:45341", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:44393|issue:45478", + "issue:44315|issue:44829", + "issue:31795|issue:33453", + "issue:43827|issue:44908", + "issue:44077|issue:44861", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:43576|issue:44273", + "issue:44655|issue:44861", + "issue:44360|issue:45468", + "issue:45230|issue:45310", + "issue:44273|issue:44908", + "issue:43519|issue:44485", + "issue:41669|issue:43704", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31515|issue:31795", + "issue:44315|issue:45092", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704", + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:43704|issue:45341" + ], + "prepared_review_unit_hash": "e33aa2ca2e738778716846bef91e91b05c5c3fc4524790d1a6b27ef07efecce2", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 47, + "serialized_chars": 41139, + "estimated_input_tokens": 10285, + "estimated_eval_tokens": 20826 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is not a single duplicate cluster; it mixes unrelated loading, tokenizer, config, performance, and model-specific bugs. All soft-edge matches are superficial keyword overlaps, so they should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No clear canonical issue: the items are too heterogeneous and do not share one underlying bug or change.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue: none of these issues is a representative duplicate anchor for the whole set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Same model family, but different failures: Qwen3-VL-Moe vs Qwen3.5 MoE tensor-parallel setup." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "One is import-time slowdown from broad imports; the other is a GenerationMixin cache/chunked prefill bug." + }, + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Both are about bad model behavior, but one is Qwen3-VL bbox output and the other is PretrainedConfig type checking." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both concern reloading old checkpoints after v5, but one is general v4.57 incompatibility and the other is InternVL2 remote-code/meta-init specific." + }, + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 behavior settings and cache-strategy typo are unrelated." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Test flakiness in SamHQ is unrelated to inverse_sqrt scheduler kwargs handling." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "AutoProcessor kwargs propagation and scheduler kwargs are different code paths." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": false, + "reason": "Import slowdown and lazy loading failure overlap only loosely; not the same concrete bug." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "Model class mismatch warning is unrelated to Liger Kernel not being applied with model_init." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility and Qwen3-VL output errors are unrelated." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash-Attention support error for Nemotron is unrelated to Qwen3-VL bounding-box output issues." + }, + { + "left": "issue:43576", + "right": "issue:44908", + "accept": false, + "reason": "Broken env CLI and scheduler kwargs ignore bug are unrelated." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "Tokenizer save metadata mismatch is not the same as the generic bug report." + }, + { + "left": "issue:44908", + "right": "issue:45071", + "accept": false, + "reason": "Scheduler kwargs handling is unrelated to PretrainedConfig type checking." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "position_ids buffer access error is unrelated to the vague bug report." + }, + { + "left": "issue:41669", + "right": "issue:44351", + "accept": false, + "reason": "Import-time performance regression is unrelated to missing HybridCache export." + }, + { + "left": "issue:41669", + "right": "issue:45341", + "accept": false, + "reason": "Import slowdown and a testing_utils bug are unrelated." + }, + { + "left": "issue:44492", + "right": "issue:44945", + "accept": false, + "reason": "Cache-strategy typo and pipeline-parallelism output corruption are unrelated." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import performance regression is unrelated to cache-strategy typo." + }, + { + "left": "issue:44393", + "right": "issue:45478", + "accept": false, + "reason": "Different Qwen bugs: 2D bbox output error vs Qwen3.5 MoE from_pretrained failure." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Liger Kernel application and flash_attention_3 training degeneration are different problems." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Documentation confusion in model.forward is unrelated to tokenizer loading regression." + }, + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Docs still referencing pipeline() is unrelated to inverse_sqrt scheduler kwargs." + }, + { + "left": "issue:44077", + "right": "issue:44861", + "accept": false, + "reason": "patchtsmixer post_init policy and tied-weight-key attribute crash are unrelated." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "Import performance and scheduler kwargs are different issues." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading in v5 is unrelated to Qwen3-VL bbox output failures." + }, + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Broken env CLI and lazy loading are not the same bug." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Pipeline save_pretrained support and tied-weight-key crash are unrelated." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "DSA indexer ReLU discussion is unrelated to Gemma-4 audio positional encoding." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "Generic bug report is not the same as the specific Qwen3.5 MoE from_pretrained regression." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading failure and scheduler kwargs bug are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different code paths." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "local_files_only network leak and timestamp calculation are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Forward-doc confusion and Llama 3.2 loading regression are different bugs." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Slow from_pretrained checkpoint loading is unrelated to forward-API documentation confusion." + }, + { + "left": "issue:44315", + "right": "issue:45092", + "accept": false, + "reason": "Both touch model creation/loading, but one is Liger Kernel application and the other is checkpoint compatibility with meta init." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak across dataloader threads is unrelated to RoPE implementation." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "GLM-5 RoPE and Gemma-4 audio positional encoding are different model-specific implementations." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 settings and VRAM leak are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "local_files_only network requests and timestamp calculation are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Timestamp calculation bug and processor.save_pretrained missing files are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Timestamp calculation bug and VRAM leak are unrelated." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return values bug and Gemma-4 audio positional encoding are unrelated." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "MLA q_a_layernorm precision issue and Gemma-4 audio positional encoding are unrelated." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch and Gemma-4 audio positional encoding are unrelated." + }, + { + "left": "issue:43704", + "right": "issue:45341", + "accept": false, + "reason": "VRAM leak in dataloader threads is unrelated to a testing_utils bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 44, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-382", + "nodes": [ + "issue:30333", + "issue:31515", + "issue:33453", + "issue:36296", + "issue:38617", + "issue:42913", + "issue:43066", + "issue:43298", + "issue:43525", + "issue:43572", + "issue:43618", + "issue:44230", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44861", + "issue:44908", + "issue:44998", + "issue:45290", + "issue:45310", + "issue:45341" + ], + "soft_pairs": [ + "issue:42913|issue:43066", + "issue:44908|issue:44998", + "issue:44998|issue:45341", + "issue:44492|issue:44998", + "issue:44861|issue:45290", + "issue:43618|issue:44998", + "issue:44556|issue:44998", + "issue:43525|issue:44861", + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:30333|issue:31515", + "issue:44230|issue:45310", + "issue:31515|issue:33453", + "issue:43298|issue:44230" + ], + "prepared_review_unit_hash": "3263df6e3d0cd2c544a1d4e3b1281a090277b200d366c9374a61048b5e041367", + "component_budget": { + "node_count": 382, + "item_count": 382, + "soft_pair_count": 1431, + "serialized_chars": 737928, + "estimated_input_tokens": 184482, + "estimated_eval_tokens": 369220 + }, + "budget": { + "node_count": 21, + "item_count": 21, + "soft_pair_count": 16, + "serialized_chars": 16524, + "estimated_input_tokens": 4131, + "estimated_eval_tokens": 8518 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly mixed issue noise. The tokenizer/v5 items are the only somewhat related theme, but the evidence is too thin to merge 42913 and 43066 as duplicates from titles/metadata alone, so that soft edge should be rejected conservatively.", + "confidence": 0.64, + "canonical_issue_reason": "issue:43066 is the most concrete tokenizer/v5 report, but the packet does not support treating it as a confirmed duplicate of 42913.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43066 is the best single representative for the limited tokenizer-regression theme because its title is specific and actionable.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both mention tokenizer/v5 behavior, but the packet only gives titles/metadata and does not establish the same underlying bug or the same decoder regression." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "Inverse_sqrt scheduler kwargs handling and a vague unrelated title are not the same bug." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "No shared code path or concrete bug is visible from the titles." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "A cache-strategy typo is unrelated to the unrelated title." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "Tied-weight key crashes and chat-template tool-call crashes are different failure modes in different paths." + }, + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "CLIPOutput attentions missing assignment is unrelated to the unrelated title." + }, + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Checkpoint reload compatibility after v5 upgrade is not the same as the unrelated title." + }, + { + "left": "issue:43525", + "right": "issue:44861", + "accept": false, + "reason": "Missing pad_token_id in Llama4Config and tied-weight-key list handling are distinct bugs." + }, + { + "left": "issue:43572", + "right": "issue:44861", + "accept": false, + "reason": "StableLmConfig pad_token_idx regression is unrelated to tied-weight key attribute handling." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "Slow checkpoint loading and an ImportError for layer_type_validation are different problems." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "FP8 inference support and missing processor.save_pretrained files are not the same issue." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Checkpoint load slowness and tensor-parallel training bugs are unrelated." + }, + { + "left": "issue:30333", + "right": "issue:31515", + "accept": false, + "reason": "MLflow reporting after failure is unrelated to checkpoint loading performance." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen3.5, but one is fp8 support and the other is a from_pretrained error; too different to merge." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Slow checkpoint loading and tokenizer loading regression are adjacent but not clearly the same bug." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Continuous batching for audio models and Qwen fp8 inference support are different feature gaps." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The cluster summary is a bit too strong for the packet. In particular, accepting issue:42913 \u2194 issue:43066 as a duplicate pair overstates the evidence: the packet only gives titles/metadata, and while the titles are similar, it does not establish the same underlying bug or the specific v4\u2192v5 decoder regression described in the reason. The rest of the rejections are mostly conservative, but the one accepted soft-edge verdict is not sufficiently grounded." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-26350-2", + "nodes": [ + "issue:26350", + "issue:33260" + ], + "soft_pairs": [ + "issue:26350|issue:33260" + ], + "prepared_review_unit_hash": "f376d05e9a02fb0d34006cb1687481b6600033eacf328ec2134ad349f8b5be6c", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1590, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1590, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate feature-request/community-contribution issues in the same general area but for different capabilities: Flash Attention 2 support vs GGUF support. They are not duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue #26350 is the better representative of the cluster only because it is older and has substantially more discussion activity, but it is not a duplicate of #33260.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:26350", + "right": "issue:33260", + "accept": false, + "reason": "Different requested features and different target problems: Flash Attention 2 architecture support versus GGUF support for more architectures. Shared 'community contribution' framing is not enough to treat them as the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "5b6a86a9415c6eabd8000c4c95e0358dc6f1e7138960cfc5bb27a425dd5f29b8", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4453, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4453, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are all documentation-translation requests, but only one is concrete (Turkish). The other three are near-identical placeholder-style issues and look like duplicates of each other, not of the Turkish-specific request.", + "confidence": 0.77, + "canonical_issue_reason": "issue:27088 is the only specific, language-filled request ([i18n-TR] Translating docs to Turkish) and it is the earliest issue in the set, so it is the clearest canonical representative.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:27088 is the most informative and concrete issue in the cluster; the others are generic placeholder variants, so this is the best global issue anchor.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": false, + "reason": "Turkish-specific request versus a generic placeholder translation issue; same broad topic, but not the same concrete request." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": false, + "reason": "Turkish-specific request versus a generic placeholder translation issue; related, but not a true duplicate." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": false, + "reason": "Turkish-specific request versus a generic placeholder translation issue; same subsystem, different underlying request." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": true, + "reason": "Both are generic placeholder translation issues with near-identical wording and no visible distinguishing target, so they look like the same underlying issue." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": true, + "reason": "Near-identical placeholder translation request; no concrete difference is visible, so they appear duplicate." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": true, + "reason": "Same placeholder title pattern and very similar body size; they look like duplicate copies of the same request." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet: issue:27088 is genuinely specific ([i18n-TR] Turkish), while issue:43834, issue:44365, and issue:44370 share the same placeholder title and extremely similar body sizes. The soft-edge verdicts are appropriately conservative: Turkish vs. placeholder issues are rejected, and the placeholder-to-placeholder pairs are accepted based on strong textual similarity. No overstatement is obvious from the available evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-14", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41115", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:36817|issue:41211", + "issue:36817|issue:43671", + "issue:37015|issue:43837", + "issue:37015|issue:44121", + "issue:37015|issue:43909", + "issue:37015|issue:42503", + "issue:37015|issue:45027", + "issue:41115|issue:43837", + "issue:41115|issue:44121", + "issue:41115|issue:42503", + "issue:41115|issue:45027", + "issue:41211|issue:43671", + "issue:42503|issue:43837", + "issue:42503|issue:43671", + "issue:42503|issue:42738", + "issue:42738|issue:45027", + "issue:43671|issue:43837", + "issue:43671|issue:45027", + "issue:43671|issue:45177", + "issue:43671|issue:44121", + "issue:43671|issue:43909", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:43837|issue:44121", + "issue:43909|issue:44205", + "issue:44121|issue:45322", + "issue:43837|issue:45027", + "issue:43837|issue:45177" + ], + "prepared_review_unit_hash": "98a69ed920b7bf0130804fc2aefba7bab2bfbc632d256000ca3f046be5e99188", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 17649, + "estimated_input_tokens": 4413, + "estimated_eval_tokens": 9082 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 17649, + "estimated_input_tokens": 4413, + "estimated_eval_tokens": 9082 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are not duplicates; they\u2019re separate model-request issues spanning different architectures and modalities (BERT variants, audio/TTS/ASR, OCR, perception, sparse transformers). The shared wording is mostly from a common \u201cadd model\u201d template, not the same underlying request.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "Different model families: EuroBERT config support vs DEIMv2 architecture addition." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBERT config support is unrelated to Qwen3-TTS support." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT and Qwen3-ASR are different models and different modalities." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT request is unrelated to the OpenAI weight-sparse transformer request." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT vs LFM2.5 Audio is a different model family and task." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "NeoBERT and ModernVBERT are separate model requests, not the same change." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT request is unrelated to Voxtral TTS support." + }, + { + "left": "issue:41115", + "right": "issue:43837", + "accept": false, + "reason": "MiniCPM3 architecture support and Qwen3-ASR support target different models." + }, + { + "left": "issue:41115", + "right": "issue:44121", + "accept": false, + "reason": "MiniCPM3 architecture addition is not the same as adding a sparse transformer model." + }, + { + "left": "issue:41115", + "right": "issue:42503", + "accept": false, + "reason": "MiniCPM3 and ModernVBERT are distinct architectures with different implementation needs." + }, + { + "left": "issue:41115", + "right": "issue:45027", + "accept": false, + "reason": "MiniCPM3 support is unrelated to Voxtral TTS support." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 and Qwen3-TTS are unrelated model requests." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT and Qwen3-ASR differ in both model family and modality." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT is not the same underlying request as Qwen3-TTS support." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "ModernVBERT and BERT-like models with RoPE are only loosely related; not the same concrete change." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "BERT-like RoPE support and Voxtral TTS support are different changes." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "TTS support and ASR support are separate model integrations." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-TTS and Voxtral TTS are different models, so not duplicates." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-TTS support is unrelated to DeepSeek-OCR2." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "TTS support is not the same as the weight-sparse transformer request." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-TTS and LFM2.5 Audio are different audio model requests." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-ASR and LFM2.5 Audio request different models and likely different code paths." + }, + { + "left": "issue:43837", + "right": "issue:44205", + "accept": false, + "reason": "ASR support is unrelated to SAM3-LiteText." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "Sparse transformer request and EUPE perception encoder are different model additions." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-ASR and Voxtral TTS are different modalities and models." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "ASR support is unrelated to DeepSeek-OCR2." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "6658af031150217ced8fe72d37f47c3914a50e95ad48f4f35711c24160143df4", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All items are distinct issue reports touching different components and bug types; none look like duplicates of the same underlying defect.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out: the reports span unrelated areas (Llama4 norm behavior, Mask2Former loss handling, ModernBERT config typing, a quantization typo, and a tokenizer warning regression).", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a safe cluster representative because there is no shared bug or code path across these reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "Different components and defects: Llama4 norm semantics vs ModernBERT config type-hint correctness." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Mask2Former ignore_value usage is unrelated to kwargs not being forwarded to a loss function." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "A model loss-parameter bug and a tokenizer regex warning regression are separate code paths." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "Unrelated issues: unused ignore_value in Mask2Former vs a typo in a quantization attribute name." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Different bug classes and modules: loss handling vs a config annotation issue." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "Forwarding kwargs to loss code is unrelated to correcting a type hint on norm_eps." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "ModernBERT config typing and tokenizer regex warnings do not share the same underlying bug." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "A config type-hint mistake is not the same as a typo in a quantization operation name." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "d059631273127e4ccb067ad6053f73242467806506ceaa74dcfb02c3fea8d0ed", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1480, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1480, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues do not appear to describe the same underlying problem: one is about block decoding behavior, the other is a feature/support request for DeepSeek v3.2. No duplicate relationship is supported by the titles or available metadata.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Different problem domains: decoding behavior vs model support request. The shared evidence is only weak title similarity, not the same bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "b7a0cd17bdc0eca6d60c1fa68df1b187f0c5ee5e9c6acf07c69ddceaffa75237", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1583, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1583, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two issues are related to Trainer customization, but they describe different problems: one is about `remove_unused_column` behavior, the other about FLOPs estimation for custom trainers. They do not look like duplicates.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Different underlying concerns: one targets input-column pruning/customizability in Trainer, the other targets FLOPs counting for custom trainer classes. Shared theme is too broad to treat as the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "73af4585b6689c78e3d0fea10f3248f9f604e095ae640e3e273868974fdd2173", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are not duplicates: one requests registration for Qwen3-Omni in AutoModel/AutoModelForConditionalGeneration, while the other requests AutoModelForSequenceClassification support for Qwen3.5 (Qwen3_5Config). They concern different model families and different auto-class capabilities.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue: the two tickets ask for different model support in different auto classes, so they should remain separate.", + "canonical_pr_reason": null, + "best_issue_reason": "Neither issue is a duplicate of the other; if a representative is needed, issue 43684 is a clearer standalone registration request, but it should not absorb 44405.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different model families and different API targets (registration for Qwen3-Omni vs sequence classification support for Qwen3.5); not the same underlying bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "c1db08501838b8d1b61ae45ba0fc7cd912b92f4706cb0275ccd9107783903653", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4262, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4262, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster contains one unrelated-looking issue ('Fett') and three near-identical placeholder issues titled 'huggingface '. With no bodies, targets, or file references, the safest read is that the huggingface items are the only plausible duplicates, but there is not enough concrete evidence to merge any pair with confidence.", + "confidence": 0.78, + "canonical_issue_reason": "Issue 43845 is the earliest of the three identical 'huggingface ' reports and is the most reasonable representative for that likely duplicate cluster; 43835 has a different title and looks unrelated.", + "canonical_pr_reason": null, + "best_issue_reason": "43845 is the best issue to keep as the cluster representative because it is the earliest of the matching-title issues and has no evidence of being distinct from 43846/43859.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Different titles and no body/detail evidence; this does not look like the same underlying issue." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "Different titles and no substantive evidence of a shared bug or report." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "Different titles and no substantive evidence of a shared bug or report." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "The titles match, but there is no body or target evidence to confirm they are the same underlying issue; too little information to merge." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "The titles match, but the available metadata is too thin to safely conclude these are the same issue." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "The titles match, but there is not enough concrete evidence to confirm a true duplicate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "d0f082238dff6040eb2e79771ae6ddb9525cb49e2ac8c779a90db1f9be16c8c7", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The three \"\" issues form a likely duplicate-spam subgroup, but the packet only supports high similarity rather than exact duplication. The other two issues have different titles and do not look like the same underlying report.", + "confidence": 0.8, + "canonical_issue_reason": "issue:44371 is the earliest of the highly similar \"\" issues and is a reasonable representative of that likely duplicate subgroup.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44371 is the best representative for the likely duplicate spam subgroup because it is earliest and matches the other two \"\" issues in the limited metadata shown.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": true, + "reason": "Same \"\" title and identical minimal metadata; likely duplicate spam reports, though the body text is not visible here." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": true, + "reason": "Same \"\" title and identical minimal metadata; likely duplicate spam reports, though the body text is not visible here." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "Different title and no evidence that the documentation suggestion is the same report as the spam issue." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "Different title and no indication it is the same underlying issue as the \"\" report." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "Documentation enhancement and \"\" are unrelated on the face of the packet." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Documentation enhancement and \"\" are unrelated on the face of the packet." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": true, + "reason": "Same \"\" title and identical minimal metadata; likely duplicate spam reports, though the body text is not visible here." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "Different title and no evidence that it is the same report as the spam issue." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "Different title and no evidence that it is the same report as the spam issue." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The grouping is directionally plausible, but the writeup overstates the evidence. The packet only shows identical titles and equal body lengths for the three \"\" items; it does not expose the actual body text or any explicit linkage. Calling them \"exact duplicates\" and \"the same spam report\" is stronger than what the packet supports. The safer wording would be that they appear highly similar or likely duplicate spam reports, while the other two have different titles and are not clearly the same issue." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "c28c5ff211d3f984c8e638f1585ac868965da7104519ccaf3857237af2c87eff", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two closed issues look like duplicates of the same request: they have identical titles and very similar body content, with no visible evidence of distinct underlying work.", + "confidence": 0.84, + "canonical_issue_reason": "#45145 is the better canonical issue because it shares the same title as #45151 and has slightly more external attention (1 inbound reference vs. 0).", + "canonical_pr_reason": null, + "best_issue_reason": "#45145 is the best issue representative for this cluster for the same reason: same request, plus marginally more linkage.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": true, + "reason": "Same title and strongly similar content suggest the same underlying issue/request; this is a reasonable duplicate match, though the evidence is only soft." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The packet supports strong similarity, but the analyst is a bit too confident for a soft match: only soft_similarity is present, and there\u2019s no deterministic linkage or other distinguishing evidence. Phrasing like \u201ceffectively identical\u201d and the 0.98 confidence overstate what the packet proves. " + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 13, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues describe the same missing Gemma 4 sequence-classification support; the later, more specific open issue is the better cluster representative.", + "confidence": 0.94, + "canonical_issue_reason": "issue #45373 is the clearest canonical tracker: it is open, explicitly names the missing class `Gemma4ForSequenceClassification`, and states the module-level gap relative to Gemma 2/3.", + "canonical_pr_reason": null, + "best_issue_reason": "issue #45373 best captures the concrete request and likely remaining work, while #45295 is a shorter earlier duplicate with less detail and is already closed.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 14, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45672-2", + "nodes": [ + "issue:45672", + "issue:45674" + ], + "soft_pairs": [ + "issue:45672|issue:45674" + ], + "prepared_review_unit_hash": "d935e3c283a5e01ec84b71f69e1cb43ee80da1f65e7475f7356fe9c0e58a2005", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1668, + "estimated_input_tokens": 417, + "estimated_eval_tokens": 1090 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1668, + "estimated_input_tokens": 417, + "estimated_eval_tokens": 1090 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues both concern 4-bit inference, but they describe different failures and code paths: one is a Gemma4/audio-module `torch.finfo()` TypeError on `uint8` weights, the other is a BitsAndBytesConfig regression where `llm_int8_skip_modules` drops the default `lm_head` exclusion and triggers an AssertionError. They should not be clustered as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "Issue #45674 is the better canonical issue because it describes a concrete, likely upstream configuration bug with a clear mechanism (`llm_int8_skip_modules` clearing the default exclusion) and broader applicability than the Gemma4-specific symptom.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45674 is the strongest representative for the cluster: it has the clearer root-cause description and is not tied to a single model family or module layout.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45672", + "right": "issue:45674", + "accept": false, + "reason": "Different underlying bugs: Gemma4 audio-module `torch.finfo()` on `uint8` weights vs. BitsAndBytesConfig `lm_head` exclusion being cleared and causing an AssertionError. Same broad area, but not the same fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-12", + "nodes": [ + "pull_request:39895", + "pull_request:41855", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45170", + "pull_request:45213", + "pull_request:45655" + ], + "soft_pairs": [ + "pull_request:39895|pull_request:43424", + "pull_request:41855|pull_request:43823", + "pull_request:41855|pull_request:43424", + "pull_request:42668|pull_request:43823", + "pull_request:43251|pull_request:43823", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:44827", + "pull_request:43424|pull_request:43823", + "pull_request:43823|pull_request:45655", + "pull_request:43823|pull_request:43995", + "pull_request:43995|pull_request:45213", + "pull_request:43995|pull_request:44827", + "pull_request:44827|pull_request:45170", + "pull_request:44827|pull_request:45655", + "pull_request:43823|pull_request:44827", + "pull_request:44827|pull_request:45213", + "pull_request:43823|pull_request:45213", + "pull_request:43251|pull_request:43254" + ], + "prepared_review_unit_hash": "89399897cfe3c9dc2ab8eac4f5cb32919fa3032c844f5935fe36fe6b7f032a01", + "component_budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 18, + "serialized_chars": 32873, + "estimated_input_tokens": 8219, + "estimated_eval_tokens": 16694 + }, + "budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 18, + "serialized_chars": 32873, + "estimated_input_tokens": 8219, + "estimated_eval_tokens": 16694 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is mostly a loose collection of unrelated PRs (model additions, refactors, docs, and tests). The only true duplicate-style pair is the `fixed_cross_entropy` kwargs fix: PRs 43251 and 43254 address the same code path and are plausibly mergeable into one change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43251 is the best canonical representative of the only real duplicate pair: it has the explicit issue link, a clear `fixed_cross_entropy` kwargs pass-through change, and a slightly fuller implementation context.", + "best_issue_reason": null, + "best_pr_reason": "PR 43251 is the strongest representative of the duplicate pair because it directly fixes the same `loss_utils.fixed_cross_entropy` path as 43254 and is the more explicit, issue-linked version of the change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "Videoprism model/docs addition and an executorch dynamic-shapes test are unrelated changes." + }, + { + "left": "pull_request:41855", + "right": "pull_request:43823", + "accept": false, + "reason": "A tokenizer method addition for Mistral common is unrelated to adding the MobileLLM model." + }, + { + "left": "pull_request:41855", + "right": "pull_request:43424", + "accept": false, + "reason": "Tokenizer API work and executorch export tests touch different code paths." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Processor auto-loading robustness changes are unrelated to the MobileLLM model addition." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss-function kwargs plumbing is unrelated to adding a new model implementation." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "The cross-entropy kwargs fix and Mistral4 test/model changes are separate bugs/changes." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "Executorch dynamic-shape export tests are unrelated to Mistral4 test fixes." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "Dynamic-shape export testing and MobileLLM model addition are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45655", + "accept": false, + "reason": "A new model addition and a config-resolution order fix are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "MobileLLM model addition and Falcon output-interface refactoring are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "Adding MobileLLM and fixing Mistral4 tests are separate model efforts." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "Model addition and the model-creation skill docs are not the same change." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Falcon refactoring and a new-model skill document are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon interface refactoring and Mistral4 test fixes do not address the same bug." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 test/model updates and CLIP-like `layrnorm` renaming are unrelated." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45655", + "accept": false, + "reason": "Mistral4 test/model changes and config-resolution ordering are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "Adding MobileLLM and fixing Mistral4 tests are different model-specific changes." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Both PRs patch `loss_utils.fixed_cross_entropy` to forward supported `cross_entropy` kwargs for the same issue; they target the same code path and are mergeable into one fix." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative overall. The lone accepted pair, 43251/43254, is supported by the shared `loss_utils.fixed_cross_entropy` edit and common issue target 43240, and the other pairs are reasonably described as unrelated based on their filenames/diffs. No obvious overstatement beyond the evidence in the packet." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "0e326ed65acd29634b8f23ef19080e471471d8101d5e8a767ca63fa5cce0320e", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs overlap on the VibeVoice family and shared plumbing, but they target different changes: one adds the base VibeVoice model, the other adds VibeVoice Realtime plus an acoustic tokenizer and generation changes. They are related, not duplicates.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 40546 is the cleaner canonical representative for the core VibeVoice implementation: it introduces the base model, docs, and auto-mapping integration. PR 42781 is a distinct follow-up feature focused on realtime support and the acoustic tokenizer.", + "best_issue_reason": null, + "best_pr_reason": "PR 40546 is the best single representative of the underlying core feature because it covers the foundational VibeVoice integration. PR 42781 is a separate extension rather than the same change set.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "Do not merge: 40546 implements the base VibeVoice model, while 42781 adds VibeVoice Realtime and an acoustic tokenizer. Shared files reflect common infrastructure, but the concrete feature scope differs and they would not plausibly be one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "90b3197db5b5e65697c0274d8104792e8877b24c2867c21ce3f0c565e7501dc1", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are unrelated: one adds TimesFM covariate forecasting, the other adds DINOv3 image classification support. They touch different models, different code paths, and different documentation/tests.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 41224 is the clearer standalone feature PR, with a focused DINOv3 image-classification scope and substantial review activity.", + "best_issue_reason": null, + "best_pr_reason": "PR 41224 is the best representative of the cluster only in the sense that it is more self-contained and better-scoped; it is not a duplicate of PR 40755.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Different features for different model families (TimesFM covariates vs DINOv3 image classification); no shared bug or mergeable code-path change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "aead1308c167b0bae6ff11bf9358340baa5e76f816618383418b505a10a9cd01", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6050, + "estimated_input_tokens": 1513, + "estimated_eval_tokens": 3282 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6050, + "estimated_input_tokens": 1513, + "estimated_eval_tokens": 3282 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two DEIMv2 pull requests target the same model family, but they are not the same change: PR 41356 is an early/basic model + image processor add, while PR 44339 is a much broader follow-up that adds loss code, auto-mappings, conversion tooling, modular files, and more integration work.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44339 is the more complete and representative DEIMv2 implementation; PR 41356 looks like an earlier partial version of the same model bring-up.", + "best_issue_reason": null, + "best_pr_reason": "PR 44339 is the strongest single artifact for the cluster because it subsumes the basic model work and includes the broader integration needed for a full Transformers addition.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": false, + "reason": "Same model area, but not the same concrete change: 44339 expands the implementation substantially and adds new code paths beyond the basic PR 41356. They look like related iterations, not mergeable duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "bd95b45c458a92b821abe01f81e0db7a79f0363b9f20801d8fec555ef94e95dc", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate model-addition pull requests for different code paths (DeepSeekOCR vs Isaac). The overlap in boilerplate auto-registration/docs files is incidental, not evidence of a duplicate change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 41797 is the stronger representative of the cluster by review activity and overall maturity, but it is a DeepSeekOCR integration and not the same change as Isaac.", + "best_issue_reason": null, + "best_pr_reason": "PR 41797 is the best standalone PR in the cluster on completeness/review signal, but it should not be conflated with PR 45186 because they implement different models.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Reject: these are different model integrations with different filenames, titles, and implementation targets. Shared auto/registry/doc scaffolding is expected and does not indicate the same underlying change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "35e5b2fdc40d7c9f05e53aaa501aaf19073439ab9907d89d10fa14d979dff577", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs both update GitHub Actions, but they target different action families and different maintenance goals: one is a Node 24 compatibility sweep for core workflow actions, the other bumps docker/build-push-action and a few related workflow dependencies. They are related in theme but not the same underlying change, so the soft duplicate edge should be rejected.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "pull_request:42975 is the better canonical representative because it is the more central, broader workflow-actions compatibility upgrade and affects many common CI workflows.", + "best_issue_reason": null, + "best_pr_reason": "pull_request:42975 is the best overall representative of the cluster: it has the clearest single migration goal (Node 24-compatible GitHub Actions) and the wider cross-workflow impact.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "They both update workflow actions, but 42975 upgrades core actions/checkout, cache, and upload-artifact for Node 24 compatibility, while 42976 bumps docker/build-push-action versions in docker/image workflows. Different action ecosystems and different concrete changes, so not the same PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-5", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451", + "pull_request:45490", + "pull_request:45630" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:42978|pull_request:45490", + "pull_request:43448|pull_request:43451", + "pull_request:43448|pull_request:45490", + "pull_request:43451|pull_request:45490", + "pull_request:45490|pull_request:45630" + ], + "prepared_review_unit_hash": "bc78d513262d493a9a34744ebf0ff31c03f8bd10ecd28f2436312014ed33b058", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 7, + "serialized_chars": 17625, + "estimated_input_tokens": 4407, + "estimated_eval_tokens": 9070 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 7, + "serialized_chars": 17625, + "estimated_input_tokens": 4407, + "estimated_eval_tokens": 9070 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All five PRs are separate model-addition changes, not duplicates. The only overlap is the usual Transformers boilerplate (docs, auto mappings, tests), but each PR targets a different model family and codepath.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: ViT NEPA, Molmo, Molmo2, CTSM, and Kimi2-6 are distinct model additions with different configs/modeling files and separate targets.", + "best_issue_reason": null, + "best_pr_reason": "No single best PR for deduplication; each PR is a standalone model integration rather than a duplicate of another.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "ViT NEPA vs Molmo are unrelated model additions; shared auto/docs boilerplate is not the same change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "ViT NEPA and Molmo2 add different models with different implementations and artifacts." + }, + { + "left": "pull_request:42978", + "right": "pull_request:45490", + "accept": false, + "reason": "ViT NEPA and CTSM are separate model integrations, not one concrete bugfix/change." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are different model families/releases with different codepaths; similarity is only boilerplate." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo and CTSM are distinct additions; overlapping docs/auto files do not indicate the same underlying change." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo2 and CTSM add different models and cannot plausibly be merged as one change." + }, + { + "left": "pull_request:45490", + "right": "pull_request:45630", + "accept": false, + "reason": "CTSM and Kimi2-6 are unrelated model additions; shared transformer scaffolding is expected boilerplate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43291-3", + "nodes": [ + "pull_request:43291", + "pull_request:43665", + "pull_request:44815" + ], + "soft_pairs": [ + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "569d063804485ce8219b8fb25bf5d9fdb14c3efa5548a4467dfbc41349c125fc", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7093, + "estimated_input_tokens": 1774, + "estimated_eval_tokens": 3804 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7093, + "estimated_input_tokens": 1774, + "estimated_eval_tokens": 3804 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These three PRs are not duplicates of one another: two are test-only edits with placeholder assertions, while the third is a substantive FP8/dequantization fix in a different code path. The cluster looks like low-quality soft similarity rather than one underlying change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44815 is the best representative because it contains the only clearly substantive implementation fix (model loading / FP8 dequant padding) and touches real runtime code, unlike the other two PRs which are mostly test placeholders.", + "best_issue_reason": null, + "best_pr_reason": "PR 44815 is the strongest single PR in the set: it addresses a concrete technical problem with code changes in core loading and finegrained FP8 integration, with supporting tests. The other PRs do not appear to fix the same bug/change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "Different models and different edits: Whisper tokenization/runtime behavior versus CLIP/ViT test stubs. They do not describe the same bug or change." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "Whisper decode/timestamp handling is unrelated to FP8 dequantization and model-loading padding fixes; no shared concrete code path." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "One is test placeholder churn for CLIP/ViT, the other is a real FP8 dequant/loading fix. They are not mergeable as one fix and do not target the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-3", + "nodes": [ + "pull_request:43488", + "pull_request:43532", + "pull_request:45476" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532", + "pull_request:43488|pull_request:45476", + "pull_request:43532|pull_request:45476" + ], + "prepared_review_unit_hash": "a5f36c85c1d79de8048a3aad1a6bc6df55d756bfcb36a676874dd9f316ae7458", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7468, + "estimated_input_tokens": 1867, + "estimated_eval_tokens": 3990 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7468, + "estimated_input_tokens": 1867, + "estimated_eval_tokens": 3990 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These three PRs are unrelated synthetic/don't-merge changes that only overlap on a shared test filename. They do not fix the same bug or implement the same change, so none should be clustered as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the three PRs target different areas (packaging/versioning, comment-driven CI workflow, and a PR CI caller plus unrelated edits).", + "best_issue_reason": null, + "best_pr_reason": "PR 43532 is the most coherent standalone change and the closest to a real repository fix, but it is still not a duplicate of the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "Different changes: 43488 is setup/dependency/version-bump oriented, while 43532 rewrites CI comment handling and related workflow logic. Shared vit test filename is incidental." + }, + { + "left": "pull_request:43488", + "right": "pull_request:45476", + "accept": false, + "reason": "No same underlying bug/change: 43488 updates packaging metadata and deps, whereas 45476 adds a new PR CI workflow and unrelated code/test edits." + }, + { + "left": "pull_request:43532", + "right": "pull_request:45476", + "accept": false, + "reason": "Both touch CI/testing files, but they address different code paths: comment-triggered self-CI vs a new PR CI caller workflow. Not mergeable as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "83ab1c3708109244e9f2657acfdfe05127621e27e7a2b4f88ad2b555f9d07743", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6081, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6081, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate feature PRs for different pipelines: promptable concept segmentation vs promptable visual segmentation. They share scaffolding changes in docs/registry utilities, but they target different APIs, filenames, and model/test sets, so they are not duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43613 is the broader, more integrated feature addition among the two separate pipeline PRs, but it is not a duplicate of 43612.", + "best_issue_reason": null, + "best_pr_reason": "If a single representative is needed, 43613 is the better one to surface because it covers a fuller pipeline integration and more model/test support; however, it remains a distinct change from 43612.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Different concrete features and code paths: 43612 adds Promptable Concept Segmentation, while 43613 adds Promptable Visual Segmentation with different pipeline modules, docs, model files, and tests. Shared registry/docs edits are insufficient to treat them as the same change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "360e4f4563064f8ad354ecb55675fc7b3756e32b9a9dbff51c607fd893ea267b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two separate Trainer extensibility PRs in the same file: one changes gradient-accumulation loss scaling behavior, the other factors out signature-column inspection for column filtering. They are related to `trainer.py` but address different code paths and bugs.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "Neither PR is a duplicate of the other. PR #43651 is about loss scaling during training steps for custom trainers; PR #43663 is about signature-column extraction for filtering inputs, especially with PEFT models.", + "best_issue_reason": null, + "best_pr_reason": "PR #43651 is the best representative for the loss-scaling change because it introduces the new `_loss_is_scaled_for_ga` hook in the training step path. PR #43663 is the best representative for the column-filtering refactor because it adds `_get_signature_columns` as a separate override point. Each is the clearest canonical item for its own change, but they are not the same underlying PR.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Both touch `trainer.py`, but one changes gradient-accumulation loss normalization and the other changes input signature column filtering. Different behaviors, different call sites, and not the same concrete bug or mergeable change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "bb74750cd955e06f1edf487578f6147a88c95ac2d6d7f100ae43de664b0c3b5c", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5993, + "estimated_input_tokens": 1499, + "estimated_eval_tokens": 3254 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5993, + "estimated_input_tokens": 1499, + "estimated_eval_tokens": 3254 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs are variations of the same CLI type-annotation NameError fix in `src/transformers/cli/serve.py`, with 43842 being the narrowest and clearest patch. 43836 is broader but still targets the same runtime annotation-evaluation problem.", + "confidence": 0.82, + "canonical_issue_reason": null, + "canonical_pr_reason": "`pull_request:43842` is the most focused, clearly scoped fix for the `TypeAdapter` NameError: it imports `TypeAdapter` only under `TYPE_CHECKING` and quotes the annotation, directly matching the reported runtime-import failure.", + "best_issue_reason": null, + "best_pr_reason": "`pull_request:43842` is the best representative PR because it addresses the concrete failure mode with the smallest change set and the clearest title/body alignment to the bug.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": true, + "reason": "Both patch the same `serve.py` annotation-evaluation NameError around `TypeAdapter`/CLI import time. They use different implementations (`from __future__ import annotations` vs quoted annotations), but they are solving the same underlying bug and could plausibly be merged into one fix." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "Both target the same `transformers cli serve` type-annotation NameError in the same file. `43836` is broader and more refactor-like, but its quoted-TypeAdapter workaround is the same underlying fix class as `43842`." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: all three PRs touch `src/transformers/cli/serve.py` and are clearly about the same CLI `TypeAdapter`/annotation NameError, with `43842` the narrowest fix. The soft-edge verdicts are also conservative enough; they describe the same underlying bug without claiming identical implementations." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests appear to implement the same SwanLab integration change: adding run id/resume support in `integration_utils.py` for issue #43698, with one variant also expanding the docs. They are strong duplicate PR candidates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43816 is the cleaner representative: it carries the same SwanLab id/resume support plus the accompanying documentation updates, making it the more complete version of the shared change.", + "best_issue_reason": null, + "best_pr_reason": "PR #43816 is the best global representative because it appears to include the full feature change and docs for SwanLab run id/resume support, while #43779 is the narrower implementation-focused variant.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": true, + "reason": "Same underlying SwanLab callback enhancement for id/resume support in the same file and same issue target; the doc differences do not change the core code-path change, so these are duplicate PRs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target issue #43698, touch the same file, and the previews indicate the same SwanLab id/resume support with #43816 adding docs. The duplicate/mergeable verdict is conservative enough given the shared target and overlapping code change." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44013", + "pull_request:44017", + "pull_request:44018", + "pull_request:44019", + "pull_request:44044", + "pull_request:44056", + "pull_request:44059", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44116", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44066|pull_request:44071", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44068|pull_request:44086", + "pull_request:44071|pull_request:44085", + "pull_request:44059|pull_request:44086", + "pull_request:44085|pull_request:44086", + "pull_request:44098|pull_request:44101", + "pull_request:44059|pull_request:44722", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:43997|pull_request:44073", + "pull_request:44017|pull_request:44071", + "pull_request:44073|pull_request:44101", + "pull_request:44059|pull_request:44071", + "pull_request:44059|pull_request:44068", + "pull_request:44056|pull_request:44101", + "pull_request:44018|pull_request:44059", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:43997|pull_request:44017", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101", + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44018", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:44018|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154" + ], + "prepared_review_unit_hash": "0e5808184fb4085c52130d8e1120ad4fe868c91b59dabfafc01381b907f7ad61", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 78, + "serialized_chars": 84884, + "estimated_input_tokens": 21221, + "estimated_eval_tokens": 42698 + }, + "budget": { + "node_count": 27, + "item_count": 27, + "soft_pair_count": 72, + "serialized_chars": 75082, + "estimated_input_tokens": 18771, + "estimated_eval_tokens": 37798 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of model-specific output-tracing refactors under the same umbrella issue, but the concrete code paths are usually different. The only clear duplicates are within GPTJ/CodeGen and GPT-Neo; the rest should stay separate.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44066 is the best canonical representative: it has an explicit tracking issue, touches the core GPTJ/CodeGen output-tracing refactor, and has a true duplicate-style overlap with 44722.", + "best_issue_reason": null, + "best_pr_reason": "PR 44066 is the strongest global representative for this cluster because it is the most complete concrete implementation of the shared GPTJ/CodeGen output-tracing change and cleanly anchors the duplicate set.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Same umbrella refactor theme, but different models and code paths (CVT/FNet vs GPTJ); not one mergeable change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "Different architectures and files; only the shared issue label matches, not the underlying bug/change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": true, + "reason": "Both are GPTJ output-tracing refactors in the same file; 44085 looks like the GPTJ slice of 44066's broader GPTJ/CodeGen change." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "Both are standardized output-tracing refactors, but for different vision models and separate code paths." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "EfficientNet output tracing and GPTJ/CodeGen output tracing are unrelated model-specific changes." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "GPTJ/CodeGen vs EfficientNet; no shared concrete code-path problem." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "MobileNetV2 and DeBERTa are unrelated implementations despite similar refactor wording." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "GPTJ/CodeGen and MGP-STR are different model families and different forward paths." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "MPT output tracing is a separate model-specific refactor, not the same change as GPTJ/CodeGen." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Both PRs target GPT-Neo's output-tracing refactor in the same file and appear to overlap on the same concrete implementation." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPTJ/CodeGen vs GPT-Neo; same umbrella pattern, different model-specific code paths." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet output tracing and GPTJ/CodeGen output tracing are unrelated changes." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "MGP-STR and GPTJ/CodeGen touch different models and different forward logic." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet and GPTJ are separate model implementations; no shared underlying bug." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44019", + "accept": false, + "reason": "GPT-Neo and ResNet are unrelated code paths; only the refactor pattern is similar." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44071", + "accept": false, + "reason": "ResNet and MPT are different model families with different output handling." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert/XLM are unrelated model-specific refactors." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo output tracing is separate from GPTJ/CodeGen; similar decorator migration only." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44722", + "accept": false, + "reason": "MPT and GPTJ/CodeGen are different code paths and should not be merged as duplicates." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert/XLM are unrelated model implementations." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo and MGP-STR are different model families; no same underlying bug." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44085", + "accept": false, + "reason": "GPT-Neo and GPTJ are different models, so this is not the same change." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are separate model-specific output tracing refactors." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT2 and MGP-STR are unrelated code paths despite similar refactor language." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "GPTJ and MGP-STR are different model families; not a duplicate change." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT and Flaubert/XLM are different architectures and do not share the same concrete fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44017", + "accept": false, + "reason": "RegNet and Segformer are different model-specific refactors." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet and MPNet are unrelated implementations; only the umbrella issue is shared." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44161", + "accept": false, + "reason": "MPT and LongT5 are different model families with separate output-tracing code." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet and GPT-Neo are unrelated model code paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet and Flaubert/XLM are separate model-specific changes." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "MPNet and LongT5 do not share the same concrete bug or fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44071", + "accept": false, + "reason": "RegNet and MPT are different model implementations." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBert and MPT are unrelated code paths." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "SqueezeBert and VisualBert are different model-specific output handling changes." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44722", + "accept": false, + "reason": "Segformer and GPTJ/CodeGen are unrelated changes." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44161", + "accept": false, + "reason": "Segformer and LongT5 do not share the same underlying code-path problem." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44073", + "accept": false, + "reason": "Segformer and VisualBert are separate model refactors." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44101", + "accept": false, + "reason": "VisualBert and Flaubert/XLM are different model families and fixes." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44101", + "accept": false, + "reason": "MPNet and Flaubert/XLM are unrelated code paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet and LongT5 are different model-specific output-tracing changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44074", + "accept": false, + "reason": "GPT-Neo and TextNet are separate model implementations." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44129", + "accept": false, + "reason": "MPT and SpeechT5 are different model families; shared issue target is just umbrella grouping." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44161", + "accept": false, + "reason": "CVT/FNet and LongT5 are unrelated model-specific refactors." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "CVT/FNet and Segformer are separate code paths." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44044", + "accept": false, + "reason": "SqueezeBert and DeBERTa are unrelated implementations." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44074", + "accept": false, + "reason": "DeBERTa and TextNet are different model-specific refactors." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded overall and appropriately conservative. The canonical/representative choices are supported by the packet, and the only accepted soft edges are the two same-model overlaps with shared files/issue context (GPTJ/CodeGen and GPT-Neo). The cross-model rejections are cautious and consistent with the evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 25, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44024", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44024|pull_request:44076", + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "1a8ec5328f39833263149c348bfd3c2e8bf9f749560d265e188fd9a0fc235bc6", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 78, + "serialized_chars": 84884, + "estimated_input_tokens": 21221, + "estimated_eval_tokens": 42698 + }, + "budget": { + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17123, + "estimated_input_tokens": 4281, + "estimated_eval_tokens": 8818 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs are all part of the same broad migration pattern to standardized output tracing, but they touch different model implementations and different code paths, so they should not be merged as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #44129 is the strongest representative of the set because it has the clearest scoped refactor, an explicit issue target, and a more concrete implementation change in SpeechT5.", + "best_issue_reason": null, + "best_pr_reason": "PR #44129 is the best standalone PR to track this work because it is the most explicitly tied to a specific task and appears more complete than the other similarly named refactors.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Both are output-tracing refactors, but one is FocalNet and the other is ImageGPT; they modify different model internals and are not the same underlying bug/change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "VisualBert vs TextNet are different model code paths; these are related migration style changes, not duplicate fixes for one concrete issue." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "MPNet and VisualBert each implement separate output-capture plumbing; same theme, but not the same change or mergeable duplicate." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "MPNet and TextNet touch unrelated model implementations, so this is not the same underlying bug or code-path fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert are distinct architectures with separate forward/output handling; these are not duplicates." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "SqueezeBert and SpeechT5 differ in model family and behavior; both refactor output tracing, but they do not fix the same concrete problem." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:43998|pull_request:44001", + "pull_request:43998|pull_request:44003", + "pull_request:43998|pull_request:44029", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:44000|pull_request:44001", + "pull_request:44000|pull_request:44002", + "pull_request:44000|pull_request:44025", + "pull_request:44000|pull_request:44026", + "pull_request:44001|pull_request:44002", + "pull_request:44001|pull_request:44025", + "pull_request:44001|pull_request:44026", + "pull_request:44002|pull_request:44029", + "pull_request:44002|pull_request:44025", + "pull_request:44001|pull_request:44003", + "pull_request:44002|pull_request:44003", + "pull_request:44000|pull_request:44003", + "pull_request:44003|pull_request:44028", + "pull_request:44003|pull_request:44004", + "pull_request:43998|pull_request:44004", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44029", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44028", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44029|pull_request:44030" + ], + "prepared_review_unit_hash": "44d2b9eb696568a2592cd3e762d9a47a4aac3e40ab679849cc3d945b8560ca69", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 43, + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 43, + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is a set of similar-but-separate model-specific output-tracing refactors. The edits follow the same general pattern, but they target different architectures and code paths, so they should not be merged as duplicates.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44003 is the broadest representative: it spans two related model files and introduces the shared tracing helpers, making it the most central exemplar of the cluster theme.", + "best_issue_reason": null, + "best_pr_reason": "PR 44003 is the best single representative of this cluster because it is a substantive, cross-file implementation of the common output-tracing refactor pattern.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "Same refactor theme, but timm_backbone and upernet are different model code paths." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "Both are output-tracing refactors, but they affect unrelated model implementations." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "Different architectures and forward paths; not the same underlying bug/change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "Mamba/falcon_mamba tracing changes are unrelated to timm_backbone's output handling." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model families and separate tracing logic; similarity is only at the refactor pattern level." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "MobileNetV1 and UnivNet are distinct models with separate output semantics." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "Shared refactor style only; no shared concrete code path." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "Different architectures and model files; not the same bug or change." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "MobileNetV1 output tracing is unrelated to vision_encoder_decoder." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "Separate seq2seq wrapper and image model refactors; not duplicates." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "Vision-text dual encoder and UnivNet are unrelated implementations." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model code paths; same broad theme only." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "These are separate model-specific tracing changes, not one fix." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44026", + "accept": false, + "reason": "Distinct architectures and file-level changes; not mergeable as one PR." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "UnivNet and UperNet are unrelated models with different forward behavior." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "No shared concrete code path; only a common refactor motif." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44026", + "accept": false, + "reason": "Seq2seq wrapper vs audio model; separate changes." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "UperNet and RWKV are unrelated model implementations." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44025", + "accept": false, + "reason": "Different models and distinct output-tracing code." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "UnivNet audio tracing is unrelated to Mamba/FalconMamba output tracing." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "UperNet and Mamba touch unrelated forward paths." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "Vision-text dual encoder and Mamba are separate subsystems." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44028", + "accept": false, + "reason": "Mamba tracing changes are unrelated to SuperPoint's output handling." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "CodeGen cache/output refactor is a different concrete code-path change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "Timm backbone and CodeGen are unrelated models." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "UnivNet and CodeGen do not share the same underlying fix." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "UperNet segmentation and CodeGen generation paths are different." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44004", + "accept": false, + "reason": "Different model families and separate output semantics." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "DepthAnything and VisionEncoderDecoder are distinct model implementations." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "DepthAnything and SpeechEncoderDecoder are not the same code path." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "DepthAnything and SuperPoint are unrelated changes." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "Different models; shared refactor pattern only." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "DepthAnything and DPR are separate architectures with separate forward logic." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "VisionEncoderDecoder and RWKV are unrelated model paths." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are different wrappers with different internals." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "Different models and distinct output-tracing implementations." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "No shared concrete bug/change; only similar refactor style." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "SpeechEncoderDecoder and SuperPoint are unrelated." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "Different architectures and output semantics." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "SpeechEncoderDecoder and DPR are separate model-specific changes." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "SuperPoint and RWKV do not share a concrete fix." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "Different model families; not duplicates." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "RWKV and DPR have unrelated forward/output code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "5d88ba93d54992148acbb17f2093420b331e115b35efa2afb23ff60ba4453316", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10913, + "estimated_input_tokens": 2729, + "estimated_eval_tokens": 5714 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10913, + "estimated_input_tokens": 2729, + "estimated_eval_tokens": 5714 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three unrelated model-addition PRs (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They share only common Transformers boilerplate like auto-registration and docs, not the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No PR is a true canonical duplicate here; the three changes target different models and code paths. If one had to be chosen as the most self-contained representative, PR 45075 is the most complete, but it is not a duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 45075 is the strongest standalone PR in this set because it has the most complete model integration and an explicit issue target, but it still does not represent the others' changes.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model integrations for PenguinVL vs Voxtral TTS; shared auto/docs files are boilerplate, not the same fix." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "Separate model additions (PenguinVL vs DeepSeek-OCR-2) with different architectures and files beyond shared registry/docs plumbing." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "DeepSeek-OCR-2 and Voxtral TTS are unrelated model ports; overlap is limited to standard Transformers registration and docs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "c64539ac5d04838d43edc2791a7547d2585d478efe4be471449ecf0d4ab1195a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two PRs in the same broad fusion area, but they target different layers: one adds a generic module-fusion API, the other wires in a specific KernelConfig-based n-to-1 fusion path. They are related, but not duplicates.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "No issue artifacts here. If choosing a canonical PR, #45363 is the more concrete feature implementation, while #44979 is the lower-level API scaffolding.", + "best_issue_reason": null, + "best_pr_reason": "#45363 is the best representative of the cluster because it implements a concrete kernel-fusion workflow across runtime codepaths, whereas #44979 mainly introduces the reusable module-fusion abstraction.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "Related but not the same change: #44979 adds a generic Module Fusion API, while #45363 adds KernelConfig-driven n-to-1 kernel fusion and integration plumbing. They could coexist in separate PRs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "56bec37046a24cac0f3aef64095c14cf4c286c9776e739bdc106c73a8eab4d9b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs fix the same AttributeError in `_patch_mistral_regex` by switching from `tokenizer.backend_tokenizer.pre_tokenizer` to `tokenizer.pre_tokenizer`; the newer PR also adds test coverage.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45317 is the better canonical PR: it fixes the same bug as #45086 but is more complete, with test coverage and a clearer title/reproduction context.", + "best_issue_reason": null, + "best_pr_reason": "PR #45317 is the best overall representative because it addresses the concrete code-path bug directly and includes validation, while #45086 is the earlier minimal patch.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Same underlying AttributeError fix in the same file and code path; the second PR is essentially the same patch plus tests, so they are mergeable as one change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45115-2", + "nodes": [ + "pull_request:45115", + "pull_request:45569" + ], + "soft_pairs": [ + "pull_request:45115|pull_request:45569" + ], + "prepared_review_unit_hash": "fc9b1583a87af08f923334243e05b6bce86a9a238eb9eba85a845f313a5efb98", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6066, + "estimated_input_tokens": 1517, + "estimated_eval_tokens": 3290 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6066, + "estimated_input_tokens": 1517, + "estimated_eval_tokens": 3290 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs touch NemotronH, but they are not the same change: #45115 is a narrow refactor to inherit from GraniteMoeHybrid and update conversion mappings, while #45569 is a larger architectural split into NemotronHDense/NemotronHSparse with docs and test updates. Related area, but not a duplicate change.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": "#45569 is the broader end-state refactor and more complete implementation; #45115 is a narrower precursor focused on inheritance/conversion mapping.", + "best_issue_reason": null, + "best_pr_reason": "#45569 is the most representative and comprehensive PR in the cluster, covering the model split plus docs/tests, whereas #45115 is a limited structural refactor.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45115", + "right": "pull_request:45569", + "accept": false, + "reason": "Same model family, but different concrete change sets and scopes: one refactors NemotronH toward GraniteMoeHybrid, the other splits NemotronH into dense/sparse variants with broader API/docs/test updates. Not mergeable as one duplicate PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "d98028daaa0886f8fd66e32aa2fbb8a413e577b7e328cc0f6c96b9734fd00554", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5397, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5397, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two related but distinct PRs: one adds heterogeneous per-layer configuration support, the other adds broader heterogeneous model/runtime support that likely builds on it. They are not the same underlying change, so this is not a duplicate pair.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45332 is the broader end-to-end change, touching cache/modeling/masking plus tests for heterogeneous behavior, so it is the better representative PR for the cluster. PR 45333 is a narrower foundational config-only piece.", + "best_issue_reason": null, + "best_pr_reason": "PR 45332 is the most complete implementation of the heterogeneous feature set in this cluster and best captures the overall work. PR 45333 is useful groundwork but not the main user-facing change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "Different scope and likely dependency chain: 45333 adds per-layer config plumbing, while 45332 adds broader modeling/cache support. They are related, but not the same concrete fix/change and would not plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "4367f3fd7cdd6bb8bef75ea4b9aec4d4088df0e06db845e82338f83c18ce3cb1", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5514, + "estimated_input_tokens": 1379, + "estimated_eval_tokens": 3014 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5514, + "estimated_input_tokens": 1379, + "estimated_eval_tokens": 3014 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs are typing-related, but they target different changes: one is a broad repo-wide type-checking sweep, the other adds specific typing protocols/helpers for model utilities and PEFT integration. They are not the same underlying change.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45425 is the more concrete and representative typing change: it adds shared typing helpers/protocols and updates specific call sites, whereas #45415 is a broad type-checking/config sweep across many files.", + "best_issue_reason": null, + "best_pr_reason": "PR #45425 is the better representative for this cluster because it makes a focused code change in the typing layer instead of a wide lint/type-checking pass.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "They share a typing theme and some files, but #45415 is a broad type-checking enablement sweep while #45425 adds specific typing protocols and related code updates. Different concrete changes, so not duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45487-2", + "nodes": [ + "pull_request:45487", + "pull_request:45615" + ], + "soft_pairs": [ + "pull_request:45487|pull_request:45615" + ], + "prepared_review_unit_hash": "ea9c697b5955024aa34eb08b9df3ca5e131cc0a0ee0e29bbb3b44ccab8f74acc", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5159, + "estimated_input_tokens": 1290, + "estimated_eval_tokens": 2836 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5159, + "estimated_input_tokens": 1290, + "estimated_eval_tokens": 2836 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs are unrelated: one fixes a real model-parallel device placement bug in AltCLIP/ChineseCLIP and related models, while the other only adds XPU test expectations for Qianfan OCR.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45487 is the substantive code fix: it patches device placement in model-parallel forward paths across several model implementations, addressing a concrete runtime bug.", + "best_issue_reason": null, + "best_pr_reason": "PR #45487 is the best overall candidate because it changes production code to resolve the underlying bug; PR #45615 is tests-only and does not fix the code path itself.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45487", + "right": "pull_request:45615", + "accept": false, + "reason": "Reject: they target different problems. #45487 fixes token type id/device handling in model parallel execution, while #45615 only updates expected outputs for XPU integration tests in Qianfan OCR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45548-2", + "nodes": [ + "pull_request:45548", + "pull_request:45662" + ], + "soft_pairs": [ + "pull_request:45548|pull_request:45662" + ], + "prepared_review_unit_hash": "b54b89bf068b785ef6230723022e4f4a5b2d4a3f9d8bb3c05c68eb3741039059", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4508, + "estimated_input_tokens": 1127, + "estimated_eval_tokens": 2510 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4508, + "estimated_input_tokens": 1127, + "estimated_eval_tokens": 2510 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related only at a broad EP/model-loading level, but they fix different bugs: PR 45548 preserves distributed_config during config serialization and adds a has_ep flag for DeepSpeed ZeRO-3/accelerate launch loading, while PR 45662 prevents expert parameters from being overwritten by rank-0 broadcast in EP + FSDP2 loading.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:45548", + "right": "pull_request:45662", + "accept": false, + "reason": "Different concrete code-paths and failure modes: 45548 is about config serialization and EP detection during loading, while 45662 is about post-shard wrapping to avoid expert weights being overwritten under FSDP2. They are not mergeable as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45639-2", + "nodes": [ + "pull_request:45639", + "pull_request:45645" + ], + "soft_pairs": [ + "pull_request:45639|pull_request:45645" + ], + "prepared_review_unit_hash": "c2e5e57c354c65043464bee80ceb77bf902a280202a053c433a06bc497ca7bda", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4590, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4590, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs address the same xdist/captured_info collision in testing debug logs. PR 45639 fixes the file handling in `testing_utils.py`; PR 45645 includes that same fix and also updates CI log collection to read multiple `captured_info*.txt` files, making it the broader and more complete change.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45645 is the better canonical PR because it contains the core xdist-safe fix in `testing_utils.py` and also updates the workflow so CI can collect the new per-worker log files.", + "best_issue_reason": null, + "best_pr_reason": "PR 45645 is the best overall match: it addresses the same underlying xdist collision bug as 45639, but also covers the downstream CI artifact handling needed for the fix to work end-to-end.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45639", + "right": "pull_request:45645", + "accept": true, + "reason": "Same underlying bug: xdist collisions when writing `captured_info` debug logs. 45645 builds on the same code-path fix in `testing_utils.py` and adds workflow support for multiple output files, so they could plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 25, + "component_count": 25, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45683-2", + "nodes": [ + "pull_request:45683", + "pull_request:45734" + ], + "soft_pairs": [ + "pull_request:45683|pull_request:45734" + ], + "prepared_review_unit_hash": "b52ca6e04dd943e7cbb6fcbfa4fd68de7103de43e930c9387db6e618a3dfe6df", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 3891, + "estimated_input_tokens": 973, + "estimated_eval_tokens": 2202 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 3891, + "estimated_input_tokens": 973, + "estimated_eval_tokens": 2202 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs touch the same quantizer helper, but they fix different bugs: one excludes audio tower/embed_audio modules from quantization, while the other makes user-supplied skip_modules additive with the default auto-detected skips and adds tests.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45734 is the better cluster representative because it fixes the core skip_modules regression in the quantizer flow and includes test coverage; PR 45683 is a separate audio-module exclusion change.", + "best_issue_reason": null, + "best_pr_reason": "PR 45734 best captures the main quantizer behavior bug and is more complete due to the added regression test; PR 45683 addresses a different model-specific audio-path issue.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45683", + "right": "pull_request:45734", + "accept": false, + "reason": "Different underlying bugs: 45683 skips audio modules for multimodal models, while 45734 changes how skip_modules combines with default protected modules to prevent lm_head quantization. Shared file/issue target is not enough to treat them as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +} diff --git a/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/manifest.json b/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..75dd6bb632456ab6903228eebc635998f855dd6b --- /dev/null +++ b/snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/manifest.json @@ -0,0 +1,14 @@ +{ + "analysis_id": "hybrid-model-20260501t113108z", + "artifacts": { + "hybrid": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "model": null, + "published_at": "2026-05-01T11:39:27Z", + "repo": "huggingface/transformers", + "schema_version": 1, + "snapshot_id": "20260501T113108Z", + "variant": "hybrid" +} diff --git a/snapshots/20260501T113108Z/manifest.json b/snapshots/20260501T113108Z/manifest.json index 85d4405c8bb0c62c2d138648baf48930041ffdf8..8cf7d32e41f2b9ebf361405166a0cd9b94f62ff1 100644 --- a/snapshots/20260501T113108Z/manifest.json +++ b/snapshots/20260501T113108Z/manifest.json @@ -32,6 +32,24 @@ "timeline_events": 2316 }, "extracted_at": "2026-05-01T11:31:08Z", + "published_analysis": { + "canonical_analysis_id": "hybrid-model-20260501t113108z", + "runs": { + "hybrid-model-20260501t113108z": { + "analysis_id": "hybrid-model-20260501t113108z", + "artifacts": { + "hybrid": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "manifest_path": "snapshots/20260501T113108Z/analysis-runs/hybrid-model-20260501t113108z/manifest.json", + "model": null, + "published_at": "2026-05-01T11:39:27Z", + "variant": "hybrid" + } + }, + "schema_version": 1 + }, "repo": "huggingface/transformers", "snapshot_id": "20260501T113108Z", "watermark": {