diff --git a/analysis/current/analysis-report-hybrid.json b/analysis/current/analysis-report-hybrid.json index c049595e0ac61e0bf1dc9512b54f8ce83e85490d..74b79bb2efa0dd7bf8324c02f96c294342ff4a9d 100644 --- a/analysis/current/analysis-report-hybrid.json +++ b/analysis/current/analysis-report-hybrid.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260425T060019Z", - "generated_at": "2026-04-25T06:10:11Z", + "snapshot_id": "20260425T120019Z", + "generated_at": "2026-04-25T12:06:34Z", "evidence_quality": "full", "llm_enrichment": true, "meta_bugs": [ @@ -1150,73 +1150,152 @@ ] }, { - "cluster_id": "cluster-43240-3", - "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "cluster_id": "cluster-43656-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43824.", "status": "open", - "confidence": 0.75, - "canonical_issue_number": 43240, - "canonical_pr_number": 43251, + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43656, "issue_numbers": [ - 43240 + 43824 ], "pr_numbers": [ - 43251, - 43254 + 43656, + 43836, + 43842 ], "evidence_types": [ "closing_reference", - "shared_issue_target" + "shared_issue_target", + "soft_similarity" ], "pr_comparisons": [ { - "left_pr_number": 43251, - "right_pr_number": 43254, - "code_similarity": 0.794, - "size_similarity": 0.64, + "left_pr_number": 43656, + "right_pr_number": 43836, + "code_similarity": 0.36, + "size_similarity": 0.176, "file_overlap": 1.0, - "area_overlap": 0.667, - "patch_similarity": 0.882, + "area_overlap": 0.051, + "patch_similarity": 0.048, "shared_filenames": [ - "src/transformers/loss/loss_utils.py" + "src/transformers/cli/serve.py" ], "shared_file_areas": [ { - "filename": "src/transformers/loss/loss_utils.py", + "filename": "src/transformers/cli/serve.py", "left_ranges": [ + [ + 11, + 18 + ], [ 30, - 50 + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 ] ], "right_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ] + } + ] + }, + { + "left_pr_number": 43656, + "right_pr_number": 43842, + "code_similarity": 0.405, + "size_similarity": 0.5, + "file_overlap": 1.0, + "area_overlap": 0.0, + "patch_similarity": 0.036, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], [ 30, - 43 + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 ] ] } ] - } - ] - }, - { - "cluster_id": "cluster-43824-3", - "summary": "Cluster of 1 issues and 2 PRs centered on issue #43824.", - "status": "open", - "confidence": 0.55, - "canonical_issue_number": 43824, - "canonical_pr_number": 43836, - "issue_numbers": [ - 43824 - ], - "pr_numbers": [ - 43836, - 43842 - ], - "evidence_types": [ - "closing_reference" - ], - "pr_comparisons": [ + }, { "left_pr_number": 43836, "right_pr_number": 43842, @@ -1268,6 +1347,56 @@ } ] }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, { "cluster_id": "cluster-45081-3", "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", @@ -1385,6 +1514,81 @@ } ] }, + { + "cluster_id": "cluster-45561-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45561.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45561, + "canonical_pr_number": 45639, + "issue_numbers": [ + 45561 + ], + "pr_numbers": [ + 45639, + 45645 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45639, + "right_pr_number": 45645, + "code_similarity": 0.532, + "size_similarity": 0.619, + "file_overlap": 0.5, + "area_overlap": 0.543, + "patch_similarity": 0.451, + "shared_filenames": [ + "src/transformers/testing_utils.py", + "tests/utils/test_testing_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/testing_utils.py", + "left_ranges": [ + [ + 3529, + 3536 + ], + [ + 3753, + 3787 + ] + ], + "right_ranges": [ + [ + 3525, + 3558 + ], + [ + 3782, + 3788 + ] + ] + }, + { + "filename": "tests/utils/test_testing_utils.py", + "left_ranges": [ + [ + 1, + 86 + ] + ], + "right_ranges": [ + [ + 1, + 114 + ] + ] + } + ] + } + ] + }, { "cluster_id": "cluster-44018-2", "summary": "Cluster of 2 related pull requests linked by soft_similarity.", @@ -1681,6 +1885,16 @@ "target_issue_number": 43240, "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." }, + { + "cluster_id": "cluster-43656-4", + "canonical_pr_number": 43656, + "duplicate_pr_numbers": [ + 43836, + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-43656-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, { "cluster_id": "cluster-43698-3", "canonical_pr_number": 43779, @@ -1690,15 +1904,6 @@ "target_issue_number": 43698, "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." }, - { - "cluster_id": "cluster-43824-3", - "canonical_pr_number": 43836, - "duplicate_pr_numbers": [ - 43842 - ], - "target_issue_number": 43824, - "reason": "PRs in cluster-43824-3 are treated as duplicates because they converge on issue #43824 with closing_reference evidence." - }, { "cluster_id": "cluster-43979-11", "canonical_pr_number": 44007, @@ -1733,6 +1938,15 @@ ], "target_issue_number": 45081, "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-45561-3", + "canonical_pr_number": 45639, + "duplicate_pr_numbers": [ + 45645 + ], + "target_issue_number": 45561, + "reason": "PRs in cluster-45561-3 are treated as duplicates because they converge on issue #45561 with closing_reference, shared_issue_target, soft_similarity evidence." } ], "best_issue": { diff --git a/analysis/current/analysis-report-hybrid.llm-reviews.json b/analysis/current/analysis-report-hybrid.llm-reviews.json index 715dc1a474709dfd6884f6651904c566f7b1f83c..70a202f8787d9d6c96a588ac653317f1aaf799fd 100644 --- a/analysis/current/analysis-report-hybrid.llm-reviews.json +++ b/analysis/current/analysis-report-hybrid.llm-reviews.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260425T060019Z", - "generated_at": "2026-04-25T06:10:11Z", + "snapshot_id": "20260425T120019Z", + "generated_at": "2026-04-25T12:06:34Z", "model": "gpt-5.4-mini?service_tier=flex", "reviews": [ { @@ -26,12 +26,12 @@ "issue:13244|issue:16618", "issue:15980|issue:16618" ], - "prepared_review_unit_hash": "9c0637be12a32d10044e3b0909db2a845cb6aa29108062979602cbe64c140d1f", + "prepared_review_unit_hash": "5261026724ccac7c81c613ac10f734b77b78697cd1065111eb9eb96e35682f9a", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 2477, + "serialized_chars": 2479, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, @@ -39,7 +39,7 @@ "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 2477, + "serialized_chars": 2479, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, @@ -48,24 +48,24 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These three issues are not duplicates of one another: one is about Tapas tokenization mismatch, one about a bad auth/error message for private models, and one about failing to load bert-base-uncased. The cluster looks spurious rather than a single underlying bug.", - "confidence": 0.98, - "canonical_issue_reason": "No single canonical issue fits here; the reports describe unrelated problems.", + "summary": "These issues are not a cohesive duplicate set: one is about Tapas tokenization, one about a private-model auth/download error, and one about a generic model-loading failure. The only somewhat related pair is the two loading-related issues, but they still appear to describe different failure modes.", + "confidence": 0.35, + "canonical_issue_reason": "Issue #16618 is the best representative only because it is the broadest and most central to model-loading behavior, with the strongest discussion signal among the three.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 16618 is the broadest and most active report in the set, but it still does not represent the other two bugs.", + "best_issue_reason": "#16618 is the clearest cluster anchor: it is the most general model-loading report and is more representative than the highly specific tokenization issue or the private-download auth error.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:13244", "right": "issue:16618", "accept": false, - "reason": "Tapas tokenization mismatch vs. model loading failure for bert-base-uncased are different code-path problems." + "reason": "Tapas tokenization mismatch is a different problem from a failure to load bert-base-uncased; they do not look like the same underlying bug." }, { "left": "issue:15980", "right": "issue:16618", "accept": false, - "reason": "Private-model login/error-message handling is unrelated to the general 'can't load bert-base-uncased' report." + "reason": "Both involve model loading/download, but one is specifically about unauthenticated access to private models and the other is a generic 'can't load the model' report; too different to treat as the same bug." } ] }, @@ -203,14 +203,14 @@ "issue:41093|issue:41762", "issue:43296|issue:43366" ], - "prepared_review_unit_hash": "0803d07dfe8d664e5b8b7f87a0c8722b9cd99c34a2bcdf649913c3fa521b2477", + "prepared_review_unit_hash": "be3ae08a2bf4057898a5d5072f602548ad6ce94141ebb4d78562a0c18088b8bb", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, @@ -225,336 +225,348 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous, but there is a small clear duplicate family around DeepSpeed ZeRO-3 loading failures that produce \"index 0 is out of bounds for dimension 0 with size 0\". Most other soft pairs are only loosely similar and should stay separate.", - "confidence": 0.76, - "canonical_issue_reason": "Issue 43596 is the clearest representative of the ZeRO-3 index-out-of-bounds loading bug and matches the strongest near-duplicate reports in this set.", + "summary": "This cluster is mostly heterogeneous: many unrelated model/config/docs regressions were grouped by loose textual similarity. The only clear duplicate pair is the DeepSpeed ZeRO-3 BERT index-out-of-bounds report (43596/43638).", + "confidence": 0.91, + "canonical_issue_reason": "issue:43596 is the best canonical issue because it is the clearest representative of the only true duplicate pair in the set, with issue:43638 describing the same DeepSpeed ZeRO-3 BERT index-0-out-of-bounds failure.", "canonical_pr_reason": null, - "best_issue_reason": "43596 is the most central issue to anchor the duplicate family because it names the concrete DeepSpeed ZeRO-3 failure mode without being overly model-specific.", + "best_issue_reason": "issue:43596 is the most suitable global issue representative: it is concrete, reproducible, and its duplicate (43638) confirms the same underlying bug. The rest of the cluster is too mixed to serve as a single canonical topic.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44322", "right": "issue:45464", "accept": false, - "reason": "Both are Qwen3.5-related, but they describe different failures in different entrypoints and are not the same concrete bug." + "reason": "Both are Qwen3.5-related, but one is a missing config field and the other is a streaming chat/completions failure; different bugs." }, { "left": "issue:43716", "right": "issue:45237", "accept": false, - "reason": "One is a dtype mismatch in image preprocessing, the other is a GPU/AMD compatibility problem; too different." + "reason": "Different models and different failure contexts; both mention model/GPU issues but not the same underlying bug." }, { "left": "issue:43828", "right": "issue:45237", "accept": false, - "reason": "Autocast dtype mismatch is not the same underlying issue as model failing on AMD GPUs." + "reason": "Both are runtime dtype/GPU-adjacent errors, but on different model families and code paths." }, { "left": "issue:45237", "right": "issue:45507", "accept": false, - "reason": "Different model families and different failure modes; no clear shared code-path bug." + "reason": "Different problems: AMD GPU execution failure vs invalid method call in GraniteMoEHybrid." }, { "left": "issue:44292", "right": "issue:45237", "accept": false, - "reason": "Both mention Qwen-3 variants, but the reported errors are unrelated." + "reason": "Qwen-3-8B-NVFP4 error and GPT-OSS AMD GPU failure are unrelated." }, { "left": "issue:30277", "right": "issue:35545", "accept": false, - "reason": "DeepSpeed collective mismatch and ONNX export failure are unrelated." + "reason": "DeepSpeed collective mismatch vs ONNX export error; no shared concrete bug." }, { "left": "issue:35545", "right": "issue:42915", "accept": false, - "reason": "Different models and different export/runtime problems; not the same bug." + "reason": "Different model/export failure modes: ONNX export vs FineGrainedFP8Config load failure." }, { "left": "issue:35545", "right": "issue:41720", "accept": false, - "reason": "ONNX export error and CUDA assert under auto device mapping are different issues." + "reason": "ONNX export error and auto device mapping cudaErrorAssert are unrelated." }, { "left": "issue:44509", "right": "issue:44512", "accept": false, - "reason": "Both are docs regressions, but they cover different removed commands/tasks and do not look like one change." + "reason": "Both are docs issues for v5 removals, but they reference different removed features and not the same concrete change." }, { "left": "issue:35545", "right": "issue:36010", "accept": false, - "reason": "Export to ONNX and missing GenerationMixin import are unrelated." + "reason": "ONNX export and GenerationMixin import error are separate regressions." }, { "left": "issue:43638", "right": "issue:44805", "accept": false, - "reason": "Both are index/shape errors, but the titles point to different concrete failure patterns." + "reason": "Both are index/shape errors, but one is DeepSpeed ZeRO-3 BERT loading and the other is a mask-shape mismatch; too different to merge." }, { "left": "issue:43596", "right": "issue:44805", "accept": false, - "reason": "Zero-3 init index-out-of-bounds and mask-shape mismatch are not the same bug." + "reason": "Different error signatures and code paths; only the DeepSpeed BERT pair is a true duplicate." }, { "left": "issue:44805", "right": "issue:45161", "accept": false, - "reason": "Different error signatures and different subsystems." + "reason": "Mask/tensor shape mismatch vs GPT-OSS tensor-parallel failure are not the same bug." }, { "left": "issue:44805", "right": "issue:44918", "accept": false, - "reason": "These are separate training/runtime issues with different error types." + "reason": "Both involve tensor-shape-like failures, but the Qwen3.5 SFT issue is distinct from the mask index error." }, { "left": "issue:44661", "right": "issue:44805", "accept": false, - "reason": "Tokenizer mapping logic and mask/tensor shape mismatch are unrelated." + "reason": "Tokenizer mapping/name-generation bug vs mask shape mismatch are unrelated." }, { "left": "issue:30277", "right": "issue:43638", "accept": false, - "reason": "Both involve DeepSpeed, but one is a collective mismatch and the other is a size-0 index error." + "reason": "Different DeepSpeed-related failures with different models and symptoms." }, { "left": "issue:34634", "right": "issue:35545", "accept": false, - "reason": "Processor voice_preset and ONNX export problems are unrelated." + "reason": "BarkProcessor voice preset issue is unrelated to ONNX export." }, { "left": "issue:43596", "right": "issue:43638", "accept": true, - "reason": "These describe the same ZeRO-3 index-0-out-of-bounds loading failure with only model-specific wording differences." + "reason": "Same concrete DeepSpeed ZeRO-3 bug: index 0 out of bounds during BERT loading/non-pretrained BERT initialization." }, { "left": "issue:43975", "right": "issue:44457", "accept": false, - "reason": "Different language/model-specific inference behavior; no evidence they are the same bug." + "reason": "Both mention detokenization / LoRA, but they are distinct language/model-weight issues." }, { "left": "issue:43039", "right": "issue:45161", "accept": false, - "reason": "Liger Kernel cross-entropy invocation and GPT-OSS tensor-parallel failure are different problems." + "reason": "Liger Kernel cross_entropy routing and GPT-OSS tensor-parallel support are unrelated." }, { "left": "issue:44292", "right": "issue:44457", "accept": false, - "reason": "Different Qwen3.5/LoRA-related reports with different symptoms." + "reason": "Qwen-3-8B-NVFP4 runtime failure and LoRA merge/save mismatch are different bugs." }, { "left": "issue:34928", "right": "issue:44805", "accept": false, - "reason": "Activation-checkpointing/FSDP recomputation mismatch is distinct from the mask index error." + "reason": "Activation-checkpointing/FSDP recompute mismatch is not the same as a mask-shape indexing error." }, { "left": "issue:24643", "right": "issue:30277", "accept": false, - "reason": "Both mention DeepSpeed, but the training weight-dimension error and collective mismatch are different bugs." + "reason": "Both involve DeepSpeed, but the failures are different: weight dimensionality vs collective mismatch." }, { "left": "issue:43975", "right": "issue:45237", "accept": false, - "reason": "Unrelated model detokenization vs AMD GPU runtime issue." + "reason": "Unrelated problems: detokenization bug vs AMD GPU model loading failure." }, { "left": "issue:43638", "right": "issue:45161", "accept": false, - "reason": "Zero-3/BERT load failure is unrelated to GPT-OSS tensor-parallel behavior." + "reason": "DeepSpeed BERT index error and GPT-OSS tensor-parallel issue are unrelated." }, { "left": "issue:34928", "right": "issue:36331", "accept": false, - "reason": "FSDP checkpointing mismatch and CustomTrainer signature change are unrelated." + "reason": "FSDP activation checkpointing tensor mismatch vs CustomTrainer API signature change are unrelated." }, { "left": "issue:43596", "right": "issue:45161", "accept": false, - "reason": "Different model-loading bug versus tensor-parallel runtime issue." + "reason": "Different models and different failure modes." }, { "left": "issue:43716", - "right": "issue:44292", + "right": "issue:45507", "accept": false, - "reason": "Different models and different error contexts; not the same underlying defect." + "reason": "Both are model-related, but one is dtype mismatch and the other is an invalid method call." }, { "left": "issue:39290", "right": "issue:45081", "accept": false, - "reason": "Gemma3/vLLM attribute error and Mistral tokenizer patch crash are unrelated." + "reason": "Qwen3/Gemma sliding-window config problems and tokenizer regex patch crash are different bugs." }, { "left": "issue:43828", "right": "issue:45161", "accept": false, - "reason": "Autocast dtype mismatch and GPT-OSS TP failure do not match." - }, - { - "left": "issue:30277", - "right": "issue:36331", - "accept": false, - "reason": "Collective mismatch in DeepSpeed and trainer kwarg incompatibility are separate issues." + "reason": "Autocast dtype mismatch in Phi-tiny-MoE is unrelated to GPT-OSS tensor-parallel failure." }, { - "left": "issue:43039", - "right": "issue:44805", + "left": "issue:44918", + "right": "issue:45161", "accept": false, - "reason": "Liger Kernel cross-entropy invocation is unrelated to mask/tensor indexing errors." + "reason": "TRL SFT embedding unpacking failure and GPT-OSS TP issue are different code paths." }, { "left": "issue:34928", - "right": "issue:36010", + "right": "issue:41720", "accept": false, - "reason": "Activation checkpointing/FSDP issue does not match the import error." - }, - { - "left": "issue:41762", - "right": "issue:43596", - "accept": true, - "reason": "Both are the same DeepSpeed ZeRO-3 index-0-out-of-bounds failure appearing during model loading." + "reason": "FSDP/activation-checkpointing recompute mismatch is unrelated to device-mapping cudaErrorAssert." }, { "left": "issue:43827", "right": "issue:44512", "accept": false, - "reason": "Both are docs regressions, but they target different removed guidance and do not look like one duplicate." + "reason": "Both are docs regressions from v5 removals, but they concern different removed items and should stay separate." }, { "left": "issue:43716", "right": "issue:44560", "accept": false, - "reason": "Different model families and different symptom classes." + "reason": "Different models and different symptoms: image-preprocessor dtype mismatch vs video StopIteration." }, { "left": "issue:43716", "right": "issue:44918", "accept": false, - "reason": "Image-preprocessor dtype mismatch and Qwen3.5 SFT embedding unpacking are unrelated." + "reason": "Dtype mismatch and embedding unpacking failure are not the same underlying bug." }, { "left": "issue:43039", "right": "issue:43828", "accept": false, - "reason": "Both touch loss/dtype behavior, but they are not the same concrete bug." + "reason": "Liger Kernel cross-entropy issue and autocast dtype mismatch are separate regressions." }, { "left": "issue:30277", "right": "issue:36331", "accept": false, - "reason": "DeepSpeed rank collective mismatch and CustomTrainer API mismatch are unrelated." + "reason": "DeepSpeed collective mismatch and trainer API change are unrelated." }, { "left": "issue:43039", - "right": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Liger Kernel cross_entropy call and mask-shape indexing error are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "FSDP checkpointing mismatch vs GenerationMixin import failure are unrelated." + }, + { + "left": "issue:41762", + "right": "issue:43596", "accept": false, - "reason": "Liger Kernel cross-entropy usage and Qwen3-vl embedding video StopIteration are different failures." + "reason": "Gemma3 DeepSpeed ZeRO-3 load failure and BERT ZeRO-3 index error are different model-specific bugs." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Docs about return_all_scores and docs about the removed run command are different documentation issues." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Different model families and different runtime failures." }, { "left": "issue:43866", "right": "issue:44863", "accept": false, - "reason": "Corrupted checkpoint and checkpoint loading failure are too vague and not clearly the same artifact." + "reason": "Corrupted checkpoint vs model implementation inability to load checkpoints are not the same issue." }, { "left": "issue:43854", "right": "issue:43866", "accept": false, - "reason": "Unit test load failure and corrupted checkpoint are different problems." + "reason": "Unit-test load failure for GLM-4.7-Flash is unrelated to Ovis2 checkpoint corruption." }, { "left": "issue:43425", - "right": "issue:45070", + "right": "issue:44292", "accept": false, - "reason": "Torch compatibility and pydantic model field regression are unrelated." + "reason": "Torch version compatibility and Qwen NVFP4 runtime failure are unrelated." }, { "left": "issue:41093", "right": "issue:43638", "accept": false, - "reason": "Mask shape mismatch and ZeRO-3 zero-size index error are different error signatures." + "reason": "Both have index/shape errors, but the masked-tensor mismatch and the DeepSpeed BERT zero-size index error are distinct." }, { "left": "issue:39290", "right": "issue:43531", "accept": false, - "reason": "Both involve recent Qwen/Gemma-style models, but the reported missing attribute differs and there is no clear same bug." + "reason": "Different sliding-window-related config bugs on different model families; not the same concrete defect." }, { "left": "issue:43404", "right": "issue:43866", "accept": false, - "reason": "Untied lm_head weights and corrupted checkpoint are unrelated." + "reason": "Untied lm_head weights and checkpoint corruption are unrelated." }, { "left": "issue:45362", "right": "issue:45464", "accept": false, - "reason": "Both are Qwen3.5, but different sizes and different runtime paths make this too weak to merge." + "reason": "Both are Qwen3.5 issues, but one is chat-template crash and the other is streaming chat/completions failure." }, { "left": "issue:43792", "right": "issue:45070", "accept": false, - "reason": "Whisper inference failure and pydantic field regression are unrelated." + "reason": "Whisper model runtime failure and pydantic PretrainedConfig field breakage are unrelated." }, { "left": "issue:43541", "right": "issue:43828", "accept": false, - "reason": "Grouped_mm tracing error and autocast dtype mismatch are different runtime issues." + "reason": "Grouped_mm tracing failure in Mixtral and autocast dtype mismatch in Phi-tiny-MoE are different code paths." }, { "left": "issue:36010", "right": "issue:39290", "accept": false, - "reason": "ImportError for GenerationMixin and Gemma3 config attribute error are unrelated." + "reason": "GenerationMixin import regression and Gemma3 sliding-window attribute error are unrelated." }, { "left": "issue:44841", "right": "issue:45084", "accept": false, - "reason": "Processor failure and template compilation error are not the same bug." + "reason": "Processor failure for Voxtral and template-node compilation error are different bugs." }, { "left": "issue:41093", "right": "issue:41762", "accept": false, - "reason": "Both are index-related, but one is a mask-size mismatch and the other is a zero-size load failure." + "reason": "Similar IndexError wording, but different models and likely different tensor shapes/causes." }, { "left": "issue:43296", "right": "issue:43366", "accept": false, - "reason": "Different model-support requests/errors; not the same concrete issue." + "reason": "PaddleOCR-VL load failure in vLLM and GGUF gpt-oss support are distinct issues." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The two accepted ZeRO-3 index-out-of-bounds pairs are well supported by the titles, and the rest of the soft-edge rejections do not overclaim duplication from the available evidence." + "feedback": "The summary is well grounded in the packet and stays conservative. The only accepted soft edge (43596/43638) is clearly supported by matching DeepSpeed ZeRO-3 index-out-of-bounds reports on BERT, and the other pairings are appropriately rejected without overclaiming. The heterogeneity assessment is also consistent with the titles and available evidence." }, "evaluator_used": true, "retried": false, @@ -657,23 +669,23 @@ "issue:43828|issue:44928", "issue:41553|issue:43883", "issue:43866|issue:44403", - "issue:36331|issue:41762", - "issue:44560|issue:45507" + "issue:44560|issue:45507", + "issue:36331|issue:41762" ], - "prepared_review_unit_hash": "8e4a76b427a830fbff24d9f89ece1457b2939ffc99ee7f27ae39ec5dec96a71e", + "prepared_review_unit_hash": "d9988ee5495cddd227294a48716725dc65d6bb305c7964d95ea3544d053aa32b", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 34, - "serialized_chars": 37569, + "serialized_chars": 37571, "estimated_input_tokens": 9393, "estimated_eval_tokens": 19042 }, @@ -682,222 +694,222 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Most pairs are not true duplicates; the only clear duplicate cluster is the non-persistent buffer corruption regression in Transformers v5 (`43644`, `43950`, `44534`).", - "confidence": 0.84, - "canonical_issue_reason": "`issue:43950` is the clearest and most specific statement of the underlying bug (`from_pretrained` corrupting `register_buffer(persistent=False)` buffers), and it matches the other buffer-corruption reports.", + "summary": "This cluster is mostly a set of unrelated issue reports. The only clear duplicate group is the Transformers 5.x non-persistent-buffer corruption regression: #43644, #43950, and #44534 are the same underlying bug. The rest are only loosely similar by model family, API area, or v5 regression theme and should stay separate.", + "confidence": 0.91, + "canonical_issue_reason": "#43950 is the clearest representative of the shared non-persistent-buffer regression: it names the failure mode directly and matches the duplicate chain with #43644 and #44534.", "canonical_pr_reason": null, - "best_issue_reason": "`issue:43950` is the most precise, actionable representative of the duplicate cluster and best captures the concrete regression.", + "best_issue_reason": "#43950 is the best overall issue to anchor this cluster because it most precisely describes the shared bug and is the most explicit of the duplicate reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43572", "right": "issue:45070", "accept": false, - "reason": "Both are v5 config-related, but one is a missing `pad_token_idx` field in StableLM and the other is a pydantic `PretrainedConfig` regression; not the same bug." + "reason": "Both are v5/config regressions, but one is StableLm pad_token_idx and the other is a generic PretrainedConfig/pydantic field break; different bugs." }, { "left": "issue:44387", "right": "issue:45005", "accept": false, - "reason": "Both mention v5/quantization or weights, but the symptoms and code paths differ: int4 memory/OOM vs tied weights in translation models." + "reason": "Different failures: int4 quantization/OOM versus tied-weights behavior on translation models." }, { "left": "issue:43541", "right": "issue:45161", "accept": false, - "reason": "Both involve MoE models, but one is a torch dynamo grouped_mm failure and the other is a TP-only support issue; not the same underlying problem." + "reason": "Both mention MoE/GPT-OSS-style models, but one is a grouped_mm tracing error and the other is a tensor-parallel setup issue." }, { "left": "issue:43828", "right": "issue:45464", "accept": false, - "reason": "Different failures and subsystems: autocast dtype mismatch vs chat/completions streaming inference." + "reason": "Unrelated symptoms and code paths: autocast dtype mismatch versus streaming chat/completions failure." }, { "left": "issue:44291", "right": "issue:45070", "accept": false, - "reason": "`init_empty_weights` unexpected-arg error is a constructor/init path bug, while 45070 is a pydantic config-field regression." + "reason": "Both touch v5 initialization/config internals, but one is init_empty_weights/_is_hf_initialized and the other is a pydantic PretrainedConfig field regression." }, { "left": "issue:43366", "right": "issue:45464", "accept": false, - "reason": "GGUF gpt-oss support and Qwen3.5 streaming API failures are unrelated changes with different targets." + "reason": "Different architectures and failures; one is GGUF gpt-oss support, the other is Qwen3.5 streaming inference." }, { "left": "issue:43531", "right": "issue:45070", "accept": false, - "reason": "Qwen3-MoE sliding-window handling and pydantic `PretrainedConfig` field issues are not the same bug." + "reason": "Qwen3-MoE sliding_window behavior is a different bug from the PretrainedConfig/pydantic regression." }, { "left": "issue:44661", "right": "issue:45464", "accept": false, - "reason": "Tokenizer mapping/add-new-model-like logic is unrelated to the Qwen3.5 streaming inference failure." + "reason": "Tokenizer mapping/add-new-model-like issue is unrelated to the Qwen3.5 streaming API failure." }, { "left": "issue:39290", "right": "issue:43742", "accept": false, - "reason": "Gemma3/vLLM missing attribute and MobileLLM loading key error are different model-specific loading bugs." + "reason": "Different models and stack traces; Gemma3 vLLM attribute error is not the same as MobileLLM key error." }, { "left": "issue:43054", "right": "issue:43994", "accept": false, - "reason": "Both are about SigLIP2, but one reports embedding quality degradation and the other a pipeline/AutoModel output issue; not clearly the same concrete bug." + "reason": "Both are SigLIP2-related, but one reports degraded embeddings and the other reports nonsensical AutoModel/pipeline outputs; too little evidence of the same root cause." }, { "left": "issue:43541", "right": "issue:44589", "accept": false, - "reason": "Dynamo tracing on Mixtral float32 and missing Float8 storage are different runtime failures." + "reason": "Different MoE/quantization/runtime failures; no shared concrete code path." }, { "left": "issue:41093", "right": "issue:43596", "accept": false, - "reason": "Both are index errors, but they arise in different paths: masking mismatch vs DeepSpeed ZeRO-3/BertModel init." + "reason": "Both are IndexErrors, but they occur in different model-loading paths and don't indicate the same bug." }, { "left": "issue:42915", "right": "issue:43541", "accept": false, - "reason": "Qwen3Moe FP8 failure and Mixtral torch dynamo grouped_mm failure are unrelated model/runtime bugs." + "reason": "Different MoE issues: FineGrainedFP8Config versus torch dynamo grouped_mm tracing failure." }, { "left": "issue:40990", "right": "issue:44960", "accept": false, - "reason": "Perplexity regression on gpt-oss-20b and the generic `GLM5` issue do not appear to describe the same bug." + "reason": "Unrelated model/perplexity report versus a generic GLM5 issue title." }, { "left": "issue:39186", "right": "issue:44918", "accept": false, - "reason": "FSDP `'weight' must be 2-D` and Qwen3.5 embedding unpacking with TRL SFT trainer are different training/loading issues." + "reason": "FSDP weight-shape error is a different training-time failure from TRL unpacking embeddings." }, { "left": "issue:43404", "right": "issue:45005", "accept": false, - "reason": "Both mention tied weights, but one is a Mistral3 lm_head tying bug and the other is a broader translation-model v5 report; not the same concrete failure." + "reason": "Both involve tied weights, but the affected models and failure modes are different; not clearly the same bug." }, { "left": "issue:43645", "right": "issue:45399", "accept": false, - "reason": "Jupyter custom-model initialization and flash-attn2 fallback checks are unrelated regressions." + "reason": "Notebook custom-model initialization and flash-attn fallback blocking are unrelated." }, { "left": "issue:44530", "right": "issue:45005", "accept": false, - "reason": "PagedAttentionCache/linear_attention on Qwen3.5 is unrelated to tied-weights handling in translation models." + "reason": "PagedAttentionCache linear_attention crash is unrelated to tied-weight regressions." }, { "left": "issue:44863", "right": "issue:45005", "accept": false, - "reason": "NemotronH checkpoint loading and tied-weights translation-model issues are different model-specific bugs." + "reason": "NemotronH checkpoint loading and tied-weights translation regression are different issues." }, { "left": "issue:43873", "right": "issue:45005", "accept": false, - "reason": "Quantization/offloading behavior and tied-weights handling are different failure modes." + "reason": "Quantization/offloading behavior is not the same bug as tied weights." }, { "left": "issue:44360", "right": "issue:44512", "accept": false, - "reason": "DSA indexer behavior and removed-run-command docs are unrelated." + "reason": "A DSA indexer bug and a docs typo about the removed run command are unrelated." }, { "left": "issue:42915", "right": "issue:45399", "accept": false, - "reason": "Qwen3Moe FP8 support and flash-attn2 fallback gating are different code paths." + "reason": "FineGrainedFP8Config on Qwen3-MoE is a different problem from flash-attn2 fallback logic." }, { "left": "issue:44918", "right": "issue:45507", "accept": false, - "reason": "TRL SFT unpacking and GraniteMoEHybrid invalid method calls are unrelated trainer/model bugs." + "reason": "Different model families and failures: Qwen3.5 TRL unpacking versus GraniteMoEHybrid invalid method call." }, { "left": "issue:44492", "right": "issue:44512", "accept": false, - "reason": "Cache-strategy typo and docs about a removed command are both documentation-level but not the same issue." + "reason": "Both are documentation-ish, but they target different content and do not represent the same change." }, { "left": "issue:43975", "right": "issue:44292", "accept": false, - "reason": "Detokenization on DeepSeek Coder and Qwen-3-8B-NVFP4 runtime errors are different model problems." + "reason": "Different models and different symptoms; detokenization bug versus NVFP4 runtime error." }, { "left": "issue:43950", "right": "issue:44534", "accept": true, - "reason": "Same regression: both report v5 corrupting non-persistent buffers / `register_buffer(persistent=False)` state during loading." + "reason": "Same Transformers 5.x non-persistent-buffer corruption regression, just described with different wording." }, { "left": "issue:43644", "right": "issue:43950", "accept": true, - "reason": "Same underlying bug: Transformers v5/from_pretrained is filling or corrupting non-persistent buffers with junk." + "reason": "Same underlying bug: Transformers 5.x corrupts non-persistent buffers during load/save behavior." }, { "left": "issue:44292", "right": "issue:45507", "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime error and GraniteMoEHybrid invalid method are unrelated model/runtime failures." + "reason": "Qwen-3-8B-NVFP4 runtime error and GraniteMoEHybrid invalid-method call are unrelated." }, { "left": "issue:39692", "right": "issue:43054", "accept": false, - "reason": "Both involve SigLIP2, but one is docs-example/quantization issues and the other is a text-embedding quality complaint; not the same bug." + "reason": "Both mention SigLIP2, but one is a broken documentation example and the other is a model quality complaint." }, { "left": "issue:43828", "right": "issue:44928", "accept": false, - "reason": "Autocast dtype mismatch and RLHF gradient explosion from 3D position ids are distinct failures." + "reason": "Different models and different failures; dtype mismatch under autocast versus Qwen3.5 RLHF gradient explosion." }, { "left": "issue:41553", "right": "issue:43883", "accept": false, - "reason": "Bad AutoTokenizer error messaging for Voxtral and missing `all_tied_weights_keys` on MolmoForCausalLM are unrelated." + "reason": "Bad AutoTokenizer error messaging for Voxtral is unrelated to Molmo tied-weights attribute failure." }, { "left": "issue:43866", "right": "issue:44403", "accept": false, - "reason": "Corrupted Ovis2 checkpoint and generic loading noise are not the same concrete bug." + "reason": "Checkpoint corruption and noisy loading output are not the same bug." }, { - "left": "issue:36331", - "right": "issue:41762", + "left": "issue:44560", + "right": "issue:45507", "accept": false, - "reason": "CustomTrainer `compute_loss` signature mismatch and Gemma3 ZeRO-3 loading index error are unrelated training/loading bugs." + "reason": "Qwen3-vl video StopIteration and GraniteMoEHybrid invalid method call are different code paths." }, { - "left": "issue:44560", - "right": "issue:45507", + "left": "issue:36331", + "right": "issue:41762", "accept": false, - "reason": "Qwen3-vl-embedding video StopIteration and GraniteMoEHybrid invalid method calls are different errors." + "reason": "CustomTrainer unexpected-argument TypeError and Gemma3 ZeRO-3 IndexError are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The cluster summary is grounded in the packet, and the accepted soft-edge links are conservative: both accepted pairs clearly point to the same Transformers v5 non-persistent buffer corruption regression. The rejections are also generally cautious and the reasoning does not overstate equivalence." + "feedback": "The cluster summary is conservative and grounded in the packet. The only clearly supported duplicate set is the Transformers 5.x non-persistent-buffer regression around #43644, #43950, and #44534, and the other pairwise rejections stay on the safe side of the evidence. No overclaiming stands out." }, "evaluator_used": true, "retried": false, @@ -978,8 +990,8 @@ "issue:43975|issue:45520", "issue:43643|issue:45020", "issue:42947|issue:43856", - "issue:44403|issue:44488", "issue:43782|issue:44960", + "issue:44403|issue:44488", "issue:42491|issue:44368", "issue:41762|issue:42915", "issue:43975|issue:44451", @@ -1005,256 +1017,256 @@ "issue:42915|issue:43872", "issue:43257|issue:45305" ], - "prepared_review_unit_hash": "cd8879a6f1e6dd337c20e23631136f7b305c5486979dda1591d027ee5aeb276f", + "prepared_review_unit_hash": "429445cb02b3bf45d9726756c2d41c585d01862f6dd72e4c4bd0dc4ac17d42b3", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38601, - "estimated_input_tokens": 9651, - "estimated_eval_tokens": 19558 + "serialized_chars": 38599, + "estimated_input_tokens": 9650, + "estimated_eval_tokens": 19556 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous, but a few soft pairs are true duplicates: SigLIP2 implementation mismatch, stale v5 docs, Whisper fbank extraction, Qwen3 ZeRO-3 weight loading, and the _is_hf_initialized/init_empty_weights TypeError. Most other pairs are only broadly related by subsystem, not the same bug.", - "confidence": 0.89, - "canonical_issue_reason": "issue:45020 is the broadest representative of the cluster\u2019s main theme: recent Transformers v5 loading/tokenizer regressions, especially around remote_code and model initialization.", + "summary": "This cluster is mostly a loose set of Transformers 5.x regressions around model/tokenizer loading and a few documentation/reporting issues. The only clear duplicate subgroups are the SigLIP2 discrepancy reports and the pipeline-docs regression; most other pairs are only superficially similar and should stay separate.", + "confidence": 0.46, + "canonical_issue_reason": "issue:45020 is the broadest and most representative open issue in the set: it captures the recurring theme of recent-version regressions affecting remote_code/model loading.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 is the best single representative because it is broad, current, and central to the model-loading regression theme spanning several items in this cluster.", + "best_issue_reason": "issue:45020 best represents the cluster\u2019s main user-facing failure mode and is the most useful umbrella issue to anchor related reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41553", "right": "issue:43881", "accept": false, - "reason": "Different models and failures: Voxtral AutoTokenizer error-message issue vs GLM-4v-9b loading failure." + "reason": "Both are loading failures, but they involve different models and different failure modes; not the same bug." }, { "left": "issue:43872", "right": "issue:44589", "accept": false, - "reason": "Both are quantization/storage-related, but the root causes differ: bitsandbytes _is_hf_initialized vs missing Float8 storage type." + "reason": "Different root causes: bitsandbytes init kwargs vs missing torch storage class." }, { "left": "issue:38175", "right": "issue:43054", - "accept": false, - "reason": "Same model family, but zero probabilities vs degraded text embeddings are not clearly the same underlying bug." + "accept": true, + "reason": "Both report SigLIP2 output/quality regressions and point to the same underlying implementation discrepancy." }, { "left": "issue:42947", "right": "issue:44387", "accept": false, - "reason": "Both concern training/memory behavior, but gradient checkpointing/LoRA and int4 CUDA reserved memory are distinct issues." + "reason": "Both are memory-related, but one is about LoRA/gradient checkpointing behavior and the other about int4 quantization OOM." }, { "left": "issue:43054", "right": "issue:43493", "accept": true, - "reason": "Both point to a SigLIP2 implementation mismatch versus reference behavior, with degraded embeddings as a likely symptom of the same bug." + "reason": "Both concern SigLIP2 behaving differently from the expected/original implementation, so they look like the same underlying bug." }, { "left": "issue:43881", "right": "issue:43883", "accept": false, - "reason": "Unrelated failures: GLM-4v loading vs Molmo missing all_tied_weights_keys." + "reason": "Different model families and different crashes; no shared concrete bug." }, { "left": "issue:44403", "right": "issue:44451", "accept": false, - "reason": "Generic loading noise is not the same as a specific ScandiBERT loading regression." + "reason": "Generic loading noise vs a specific model load failure are not the same issue." }, { "left": "issue:43975", "right": "issue:45520", "accept": false, - "reason": "Different areas: tokenizer detokenization regression vs flash_attn import KeyError on Python 3.13." + "reason": "Tokenizer detokenization regression and a Python 3.13 import_utils KeyError are unrelated." }, { "left": "issue:43643", "right": "issue:45020", "accept": false, - "reason": "Both involve remote_code/config handling, but one is missing fields in AutoConfig and the other is broader model breakage; not the same concrete bug." + "reason": "43643 is a specific AutoConfig trust_remote_code field-loss bug; 45020 is a broad umbrella for remote_code breakages, not the same concrete defect." }, { "left": "issue:42947", "right": "issue:43856", "accept": false, - "reason": "LoRA gradient checkpointing and Qwen3 MoE memory usage are different training bugs." + "reason": "Different Qwen3-related training/memory problems with different symptoms and likely different code paths." }, { - "left": "issue:44403", - "right": "issue:44488", + "left": "issue:43782", + "right": "issue:44960", "accept": false, - "reason": "Both are loading-related, but the underlying models and symptoms differ." + "reason": "Different model families and different loading failures; no concrete overlap." }, { - "left": "issue:43782", - "right": "issue:44960", + "left": "issue:44403", + "right": "issue:44488", "accept": false, - "reason": "Qwen3VL weight_only loading error and GLM5 loading are unrelated model-specific problems." + "reason": "One is a generic log-noise complaint, the other is a specific model loading failure." }, { "left": "issue:42491", "right": "issue:44368", "accept": false, - "reason": "LoRA model compatibility on hf4.x/hf5.x is not the same as a tie_word_embeddings warning." + "reason": "The LoRA compatibility regression and the tie_word_embeddings warning are related to Qwen3 fine-tuning, but not the same bug." }, { "left": "issue:41762", "right": "issue:42915", "accept": false, - "reason": "Both mention DeepSpeed, but Gemma3 ZeRO-3 index error and Qwen3 MoE FP8 failure are different code paths." + "reason": "Different models and different DeepSpeed/FP8 failure modes." }, { "left": "issue:43975", "right": "issue:44451", "accept": false, - "reason": "Different tokenizer/load failures for different models." + "reason": "Different user-visible regressions: detokenization vs model loading." }, { "left": "issue:45020", "right": "issue:45356", "accept": false, - "reason": "Broad remote_code regressions and Kimi-K2.5 tokenizer codec/fix_mistral_regex regression are distinct bugs." + "reason": "Broad remote_code regressions vs a specific tokenizer codec/fix_mistral_regex regression are not the same defect." }, { "left": "issue:44661", "right": "issue:45020", "accept": false, - "reason": "add-new-model-like failure in tokenizer mapping is a separate codegen/tooling bug, not the same as remote_code model breakage." + "reason": "add-new-model-like / TOKENIZER_MAPPING_NAMES failure is a separate registry bug, not the same as remote_code loading breakage." }, { "left": "issue:43054", "right": "issue:43278", "accept": false, - "reason": "SigLIP2 quality discrepancy and BF16-to-FP32 dtype change are different symptoms with no clear shared root cause." + "reason": "SigLIP2 embedding quality and BF16->FP32 dtype drift are different symptoms with different likely causes." }, { "left": "issue:43824", "right": "issue:43872", "accept": false, - "reason": "Qwen2.5-VL import error and bitsandbytes init_empty_weights incompatibility are unrelated." + "reason": "Importing Qwen2.5-VL and bitsandbytes Int8Params initialization are unrelated." }, { "left": "issue:44991", "right": "issue:45399", "accept": false, - "reason": "Tokenizer loading failure for est-roberta and flash-attn2 fallback blocking are unrelated." + "reason": "Tokenizer loading for est-roberta and flash-attn fallback gating are unrelated." }, { "left": "issue:43827", "right": "issue:44509", "accept": true, - "reason": "Same documentation regression: stale docs still reference pipeline tasks removed in v5." + "reason": "Both are the same documentation regression: stale pipeline task references after v5 removals." }, { "left": "issue:43792", "right": "issue:44220", - "accept": true, - "reason": "Both describe the same Whisper audio-feature path; the fbank extraction helper is the likely concrete cause of Whisper failing to run." + "accept": false, + "reason": "Whisper failing to run and a low-level fbank feature helper issue are not clearly the same bug from the available evidence." }, { "left": "issue:45081", "right": "issue:45399", "accept": false, - "reason": "Mistral regex patch crash and flash-attn2 fallback blocking are separate issues." + "reason": "Mistral regex patch crash and flash-attn fallback gating are different code paths." }, { "left": "issue:43404", "right": "issue:43950", "accept": false, - "reason": "lm_head tying in Mistral3 and silent corruption of non-persistent buffers are unrelated model-loading bugs." + "reason": "Tied lm_head weights and non-persistent buffer corruption are distinct model state issues." }, { "left": "issue:43257", "right": "issue:43866", "accept": false, - "reason": "Qwen3 MoE weight conversion with accelerate+deepspeed and Ovis2 checkpoint corruption are different problems." + "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed and an Ovis2 checkpoint corruption report are different problems." }, { "left": "issue:42915", "right": "issue:45020", "accept": false, - "reason": "Qwen3 MoE FP8 training failure is narrower and different from the broad remote_code loading regressions." + "reason": "FineGrainedFP8Config failure is a specific Qwen3 MoE training issue, not the same as broad remote_code regressions." }, { "left": "issue:43742", "right": "issue:45399", "accept": false, - "reason": "MobileLLM loading failure and flash-attn2 fallback checks are unrelated." + "reason": "MobileLLM config/key loading and flash-attn fallback gating are unrelated." }, { "left": "issue:41553", "right": "issue:44991", "accept": false, - "reason": "Both are tokenizer-loading issues, but they affect different models and likely different root causes." + "reason": "Tokenizer error messaging and est-roberta tokenizer loading failure are different bugs." }, { "left": "issue:43742", "right": "issue:45020", "accept": false, - "reason": "MobileLLM KeyError is a specific model load bug, not the same as the broad remote_code regression bucket." + "reason": "Different failure classes: config/key error vs broad remote_code breakage." }, { "left": "issue:43940", "right": "issue:45313", - "accept": true, - "reason": "Same concrete failure class: Qwen3-family DeepSpeed ZeRO-3 weight loading leaves parameters missing." + "accept": false, + "reason": "Both are Qwen3 ZeRO-3 loading failures, but they affect different model variants and the available evidence is not enough to treat them as the same concrete bug." }, { "left": "issue:43872", "right": "issue:44291", - "accept": true, - "reason": "These are the same TypeError class: loading with init_empty_weights hits unexpected _is_hf_initialized handling." + "accept": false, + "reason": "Both mention _is_hf_initialized / init_empty_weights style loading, but the reported failure modes differ enough to keep them separate." }, { "left": "issue:43854", "right": "issue:44863", "accept": false, - "reason": "GLM-4.7-Flash unit-test loading problem and NemotronH checkpoint loading failure are distinct model-specific bugs." + "reason": "GLM-4.7 test loading and NemotronH checkpoint loading are distinct model-specific failures." }, { "left": "issue:44928", "right": "issue:45588", "accept": false, - "reason": "RLHF NaN from 3D position_ids/SDPA fallback and flash_attention s_aux=None crash are different failures." + "reason": "RLHF gradient explosion from SDPA/BF16 behavior and a flash_attention.py s_aux crash are unrelated." }, { "left": "issue:41553", "right": "issue:44488", "accept": false, - "reason": "Different tokenizer/model-loading failures for Voxtral and sleng-bert." + "reason": "Different models, different tokenizer/loading failures." }, { "left": "issue:42915", "right": "issue:43872", "accept": false, - "reason": "Qwen3 MoE FP8 training failure and bitsandbytes init_empty_weights incompatibility are unrelated." + "reason": "DeepSpeed/FP8 Qwen3 training failure and bitsandbytes Int8Params construction error are unrelated." }, { "left": "issue:43257", "right": "issue:45305", "accept": false, - "reason": "Qwen3 MoE weight conversion/loading and gradient averaging under DeepSpeed are different bugs." + "reason": "Both involve Qwen3 and distributed training, but one is MoE weight loading and the other is gradient averaging with GAS." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative on the soft edges. The accepted pairs are plausibly the same bug class, and the rejected pairs are mostly only subsystem-level matches rather than clear duplicates. No overstatement stands out." + "feedback": "The summary is grounded in the packet, and the soft-edge calls are generally conservative. The accepted duplicate groupings (SigLIP2 reports and the pipeline-docs regression) are reasonably supported, while the rejections avoid over-claiming shared root cause. No PR-mergeability issue applies here since these are issue pairs." }, "evaluator_used": true, "retried": false, @@ -1355,220 +1367,220 @@ "issue:42491|issue:43720", "issue:44960|issue:44977" ], - "prepared_review_unit_hash": "aa7006a996d29ff284f956a94d66ee4d595c3f1c16fbf2747192deb628227d94", + "prepared_review_unit_hash": "bef9b2e3f24c8f0879e639a11fadb4042e980950897c97ce9c0dea0fa2042104", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36151, - "estimated_input_tokens": 9038, - "estimated_eval_tokens": 18332 + "serialized_chars": 36154, + "estimated_input_tokens": 9039, + "estimated_eval_tokens": 18334 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly a bag of unrelated issues, with only two clear duplicate pairs: the Transformers 5.0 custom model initialization regression, and the non-persistent buffer corruption regression. The rest are only superficially similar by title and should stay separate.", - "confidence": 0.86, - "canonical_issue_reason": "No single canonical issue exists for the full set because the items span many unrelated bugs; if a duplicate anchor is required, issue 43644 is the best representative of the non-persistent-buffer regression and is essentially duplicated by 44534.", + "summary": "Mostly unrelated issues with two real duplicate candidates: the Transformers 5.0 custom model initialization regression, and the non-persistent buffer corruption regression. The rest are only broadly similar by subsystem or error shape and should stay separate.", + "confidence": 0.88, + "canonical_issue_reason": "Issue 43644 is the cleanest canonical issue in this set because it captures a concrete regression directly duplicated by 44534, with a clear symptom and the strongest duplicate signal.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43644 is the strongest issue candidate because it is the clearest, earliest report of the non-persistent buffer junk regression and has a near-identical duplicate in 44534.", + "best_issue_reason": "Issue 43644 is the best representative issue here: it is the most exact, self-contained bug report among the true duplicates and can anchor triage better than the more specific or loosely related reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36331", "right": "issue:41093", "accept": false, - "reason": "Different failures: trainer API mismatch vs mask/tensor shape IndexError." + "reason": "Different failures: custom trainer loss signature mismatch vs. a mask/tensor shape IndexError. Same training area, but not the same bug." }, { "left": "issue:41093", "right": "issue:44805", "accept": false, - "reason": "Only the error wording overlaps; the reported shapes/models and likely root causes differ." + "reason": "Both mention a generic mask-shape mismatch, but the reports are too underspecified to confirm the same underlying code path or model/repro." }, { "left": "issue:43872", "right": "issue:45538", "accept": false, - "reason": "Unrelated bugs: bitsandbytes initialization error vs tokenizer model_max_length behavior." + "reason": "Completely different problems: bitsandbytes Int8Params initialization vs. CLIPTokenizer max_length handling." }, { "left": "issue:44479", "right": "issue:44560", "accept": false, - "reason": "Same broad Qwen3/VL area, but different concrete bugs and code paths (video regression vs embedding/tracks issue)." + "reason": "Both are Qwen video-related regressions, but they affect different model families and have different failure modes (video input regression vs StopIteration)." }, { "left": "issue:44530", "right": "issue:44863", "accept": false, - "reason": "Different subsystems and failures: cache/group-type crash vs checkpoint loading for a different model family." + "reason": "Unrelated: PagedAttentionCache linear_attention crash vs. NemotronH checkpoint loading failure." }, { "left": "issue:43366", "right": "issue:43828", "accept": false, - "reason": "Unrelated: GGUF gpt-oss architecture support vs autocast dtype mismatch." + "reason": "Different areas and symptoms: GGUF/gpt-oss support vs. autocast dtype mismatch in Phi-tiny-MoE." }, { "left": "issue:24643", "right": "issue:30064", "accept": false, - "reason": "DeepSpeed training weight-shape error is not the same bug as void segmentation map processing." + "reason": "Unrelated: DeepSpeed weight dimensionality error vs. image processor void segmentation map handling." }, { "left": "issue:44155", "right": "issue:45584", "accept": false, - "reason": "Different models and mechanics: batched AudioFlamingo track leakage vs Whisper empty-transcription handling." + "reason": "Different models and code paths: AudioFlamingo3 batch leakage vs. Whisper empty-transcription generation after token alignment." }, { "left": "issue:43366", "right": "issue:43531", "accept": false, - "reason": "Both mention Qwen-related topics, but one is GGUF support and the other is a sliding_window issue; not the same bug." + "reason": "Both touch Qwen/large-model support, but one is GGUF gpt-oss support and the other is a sliding_window bug in Qwen3-MoE." }, { "left": "issue:43550", "right": "issue:44910", "accept": false, - "reason": "Different attention/backend crashes with different models and error modes." + "reason": "Different concrete bugs: torch.compile+SDPA on Bamba vs. Flash Attention illegal memory access from 3D position_ids on Qwen3.5." }, { "left": "issue:43645", "right": "issue:43646", "accept": true, - "reason": "Same underlying Transformers 5.0 custom model initialization regression; the notebook report is a context-specific manifestation of the general bug." + "reason": "Same underlying regression: custom model initialization breaks in Transformers 5.0.0; the notebook report is just a specific repro of the broader custom-model init failure." }, { "left": "issue:43950", "right": "issue:45325", "accept": false, - "reason": "Non-persistent buffer corruption regression vs Qwen2.5-VL rope scaling bug; different code paths." + "reason": "Different bugs: non-persistent buffer corruption in from_pretrained vs. Qwen2.5-VL rope index scaling for still images." }, { "left": "issue:44560", "right": "issue:44918", "accept": false, - "reason": "Both involve Qwen models, but one is a video StopIteration regression and the other is a TRL embedding unpacking failure." + "reason": "Both involve Qwen3.5/video or embeddings, but one is a video StopIteration regression and the other is a TRL SFT embedding-unpacking failure." }, { "left": "issue:43366", "right": "issue:44589", "accept": false, - "reason": "No shared bug: GGUF support request vs missing Float8 storage type." + "reason": "Unrelated: GGUF gpt-oss support vs. missing Float8 storage class error." }, { "left": "issue:43824", "right": "issue:43957", "accept": false, - "reason": "ImportError for Qwen2.5-VL is unrelated to meta-device loading regressions." + "reason": "Both are loading/import failures, but one is a missing Qwen2.5-VL symbol and the other is a meta-device loading regression for some models." }, { "left": "issue:42915", "right": "issue:43957", "accept": false, - "reason": "Qwen3Moe FP8 failure and meta-device loading problems are different issues." + "reason": "Different problems: Qwen3Moe FineGrainedFP8Config failure vs. meta-device loading regression." }, { "left": "issue:42617", "right": "issue:43366", "accept": false, - "reason": "Different problems: 3d_parallel.py execution vs GGUF architecture support." + "reason": "Not the same bug: 3D parallel script execution failure vs. GGUF gpt-oss support." }, { "left": "issue:38175", "right": "issue:43994", "accept": false, - "reason": "Same model family, but one is zero-probabilities behavior and the other is an AutoModel/pipeline nonsensical-results report with different root cause." + "reason": "Same model family, but the reports are not specific enough to prove the same underlying issue; one is zero probabilities, the other is nonsensical outputs with AutoModel/pipeline." }, { "left": "issue:43646", "right": "issue:43950", "accept": false, - "reason": "Custom model init regression is distinct from the non-persistent buffer corruption regression." + "reason": "Both are Transformers 5 regressions, but one is custom model initialization and the other is non-persistent buffer corruption; different code paths." }, { "left": "issue:44560", "right": "issue:44805", "accept": false, - "reason": "Different Qwen video issues; the reported failures and symptoms do not line up." + "reason": "Different bugs despite similar-looking error text: Qwen3-vl-embedding video StopIteration vs. a generic mask-shape IndexError report." }, { "left": "issue:36010", "right": "issue:42915", "accept": false, - "reason": "ImportError for GenerationMixin is unrelated to Qwen3Moe FineGrainedFP8 loading." + "reason": "Unrelated: GenerationMixin import failure vs. Qwen3Moe FP8 config failure." }, { "left": "issue:43638", "right": "issue:44661", "accept": false, - "reason": "DeepSpeed/Bert index error vs tokenizer mapping-name constraint are unrelated." + "reason": "Different loading/config issues: deepspeed zero3 Bert index error vs. add-new-model-like failing with TOKENIZER_MAPPING_NAMES." }, { "left": "issue:44877", "right": "issue:45030", "accept": false, - "reason": "Both are config-loading regressions, but the models and failure mechanisms differ." + "reason": "Both involve config validation, but they affect different model families and different failure conditions." }, { "left": "issue:43596", "right": "issue:44661", "accept": false, - "reason": "Different bugs: ZeRO-3 init index error vs add-new-model-like failing in TOKENIZER_MAPPING_NAMES." + "reason": "Unrelated: zero3 Bert index-out-of-bounds vs. tokenizer mapping/add-new-model-like logic." }, { "left": "issue:43317", "right": "issue:44756", "accept": false, - "reason": "device_map/offload loading failure is not the same as mmap/OOM on Strix Halo." + "reason": "Different symptoms and likely root causes: gpu/cpu offload loading failure vs. Strix Halo mmap OOM avoidance." }, { "left": "issue:43644", "right": "issue:44534", "accept": true, - "reason": "Same underlying Transformers v5 regression: non-persistent buffers get corrupted/filled with junk." + "reason": "Same underlying regression with effectively identical symptom: Transformers v5 fills non-persistent buffers with junk; 44534 is a duplicate phrasing of the same bug." }, { "left": "issue:45313", "right": "issue:45357", "accept": false, - "reason": "Different Qwen3.5 regressions: ZeRO-3 language_model weight loading vs save_pretrained visual encoder keys." + "reason": "Both are Qwen3.5 issues, but one is ZeRO-3 weight loading and the other is save_pretrained writing wrong visual encoder keys." }, { "left": "issue:38175", "right": "issue:39692", "accept": false, - "reason": "SigLIP2 output anomaly vs docs example/model-processor mismatch and quantization failure are not the same bug." + "reason": "Related SigLIP2 area, but not the same bug: zero probabilities vs. a documentation example with multiple errors including model/processor mismatch and quantization failure." }, { "left": "issue:42491", "right": "issue:43720", "accept": false, - "reason": "LoRA checkpoint compatibility on hf5.x is unrelated to packed-weight handling in BitNet AutoBitLinear." + "reason": "Different bugs: LoRA checkpoint compatibility across branches vs. packed-weight handling during accelerate loading." }, { "left": "issue:44960", "right": "issue:44977", "accept": false, - "reason": "GLM5 and Qwen3.5 flash-attention failures are different model-specific bugs." + "reason": "Unrelated model families and failures: GLM5 vs. Qwen3.5 flash-attention generation issues." } ] }, "evaluator_result": { "accept": true, - "feedback": "The analyst output is grounded in the packet and stays conservative overall. The two accepted duplicate pairs are well supported by the titles, and the rejected soft-edge pairs mostly hinge on clearly different model/subsystem behaviors. No mergeability issues apply here because there are no PR pairs." + "feedback": "The conclusions are conservative and supported by the packet. The two accepted pairs are strongly title-aligned, especially 43644\u219444534, and the rejections avoid overclaiming where the evidence is only thematic or underspecified. The canonical/best-issue choice is also grounded in the clearest duplicate signal." }, "evaluator_used": true, "retried": false, @@ -1677,232 +1689,268 @@ "issue:43646|issue:43708", "issue:45292|issue:45335" ], - "prepared_review_unit_hash": "9c84d8d354c202417f84c58d5517d2e55f9e2a68aaff7647ce8319de72f6fe11", + "prepared_review_unit_hash": "555da00e5d82820ebe83c154f72b0d6b407336d7d68e61822877c6e3c9384e56", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 38, - "serialized_chars": 38392, - "estimated_input_tokens": 9598, - "estimated_eval_tokens": 19452 + "serialized_chars": 38401, + "estimated_input_tokens": 9601, + "estimated_eval_tokens": 19458 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly unrelated. The only clear duplicate set is the `resize_token_embeddings` regression around model-specific embeddings/output heads; the rest are separate model-loading, quantization, checkpointing, or runtime bugs that only overlap at a broad subsystem level.", - "confidence": 0.92, - "canonical_issue_reason": "Issue 45276 is the best canonical issue: it is the broadest and most representative report for the `resize_token_embeddings` propagation bug, and it predates the closest follow-up reports.", + "summary": "This cluster is mostly a collection of unrelated Transformers issues. The only clear duplicate subclusters are the resize_token_embeddings/output_embeddings reports and the missing lm_head weight-tying reports.", + "confidence": 0.63, + "canonical_issue_reason": "issue:45292 is the best representative because it states the underlying resize_token_embeddings/output_embeddings bug in the most general way and is directly supported by two very similar follow-on reports.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45276 is the strongest global issue representative because it captures the core regression most generally and matches the other accepted duplicate reports.", + "best_issue_reason": "issue:45292 is the strongest global issue candidate: it is general, centrally positioned in the only obvious duplicate subcluster, and its title matches the shared failure mode more broadly than the model-specific variants.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44451", "right": "issue:44488", - "accept": false, - "reason": "Both are model-loading complaints, but they involve different models and no evidence of the same root cause." + "accept": true, + "reason": "Both report the same model-loading regression: current/latest Transformers versions fail to load specific BERT models." }, { "left": "issue:43708", "right": "issue:44075", "accept": false, - "reason": "Different bugs: checkpoint resume step \u0917\u0923culation vs optimizer arguments being ignored." + "reason": "Completely different bugs: checkpoint resumption/max_steps vs SGD optimizer arguments." }, { "left": "issue:43854", "right": "issue:43931", "accept": false, - "reason": "Different models and different failure modes; not the same underlying defect." + "reason": "Both are model-loading failures, but for different models and different failure modes; no clear same underlying bug." }, { "left": "issue:43856", "right": "issue:43873", "accept": false, - "reason": "One is training memory usage, the other is quantization/offloading behavior; too different to be duplicates." + "reason": "One is MoE training memory usage, the other is quantization/offloading behavior; too broad to be the same bug." }, { "left": "issue:42491", "right": "issue:45276", "accept": false, - "reason": "LoRA hf4.x/hf5.x compatibility is unrelated to the `resize_token_embeddings` regression." + "reason": "Different problems: a qwen3_moe LoRA compatibility issue vs resize_token_embeddings not updating outputs." }, { "left": "issue:41720", "right": "issue:42915", "accept": false, - "reason": "Both mention Qwen3/MoE, but the concrete failures are different code paths and symptoms." + "reason": "Both mention Qwen3, but the failures are different: device mapping CUDA assert vs FineGrainedFP8 loading." }, { "left": "issue:43257", "right": "issue:43854", "accept": false, - "reason": "Accelerate/deepspeed conversion issue vs model-load test failure; not the same bug." + "reason": "Unrelated model families and failure modes." }, { "left": "issue:39692", "right": "issue:43994", "accept": false, - "reason": "Same model family, but one is a docs/example problem and the other is inference quality; insufficient to merge." + "reason": "Related model family, but one is a docs example with multiple issues and the other is a pipeline/AutoModel inference regression." }, { "left": "issue:44488", "right": "issue:44991", "accept": false, - "reason": "Different affected models and different tokenizer/load regressions." + "reason": "Both are tokenizer/model loading complaints, but for different models and likely different root causes." }, { "left": "issue:45362", "right": "issue:45406", "accept": false, - "reason": "Different products and failures: chat crash vs processor attribute error." + "reason": "Different subsystems and symptoms: chat crash vs transformers serve processor attribute error." }, { "left": "issue:43856", "right": "issue:44387", "accept": false, - "reason": "Both are memory-related, but the reported causes are different (training footprint vs int4 OOM regression)." + "reason": "Both involve memory, but one is training inefficiency and the other is int4 quantization OOM." }, { "left": "issue:43644", "right": "issue:43645", "accept": false, - "reason": "Both affect Transformers 5 custom-model scenarios, but the specific defects are different." + "reason": "Both are Transformers 5.0 regressions, but the specific bugs are distinct." }, { "left": "issue:42915", "right": "issue:44756", "accept": false, - "reason": "Unrelated: Qwen3 MoE FP8 failure vs mmap OOM mitigation on Strix Halo." + "reason": "Different issues: Qwen3Moe FP8 loading vs Strix Halo mmap OOM." }, { "left": "issue:44661", "right": "issue:44683", "accept": false, - "reason": "Different areas entirely: tokenizer mapping validation vs compiled flex_attention on newer torch." + "reason": "No shared concrete bug; one is tokenizer mapping validation, the other is compiled flex_attention on new torch." }, { "left": "issue:42491", "right": "issue:43931", "accept": false, - "reason": "Different models and unrelated failures." + "reason": "Different model-specific load/compatibility problems." }, { "left": "issue:44910", "right": "issue:45588", "accept": false, - "reason": "Both touch flash attention, but one is a packed-sequence/position_ids bug and the other is an s_aux None AttributeError." + "reason": "Both hit flash_attention, but one is a Qwen3.5 packed-sequence misparse and the other is an s_aux=None crash for sink-less models." }, { "left": "issue:43404", "right": "issue:43454", + "accept": true, + "reason": "Both describe the same underlying issue class: lm_head weights are not tied, causing bad generation in multimodal models." + }, + { + "left": "issue:42491", + "right": "issue:43856", "accept": false, - "reason": "Both mention untied lm_head weights, but the model families and reported behavior differ enough that this is not clearly the same bug." + "reason": "LoRA compatibility on a model-family/version boundary is unrelated to Qwen3 MoE memory usage." }, { "left": "issue:45276", "right": "issue:45335", "accept": true, - "reason": "Same underlying bug: `resize_token_embeddings` does not propagate to model-specific embedding/output modules." + "reason": "Same resize_token_embeddings bug, just reported for different model variants and output paths." }, { "left": "issue:36010", "right": "issue:39404", "accept": false, - "reason": "Unrelated import error vs Whisper pipeline regression." + "reason": "ImportError for GenerationMixin is unrelated to Whisper return_language pipeline behavior." }, { "left": "issue:44164", "right": "issue:44291", "accept": false, - "reason": "Different save/load initialization failures with different triggers." + "reason": "Both touch loading/saving, but one is extra_state handling and the other is init_empty_weights argument compatibility." }, { "left": "issue:44402", "right": "issue:44488", "accept": false, - "reason": "Different models and different tokenizer/vocab symptoms; not a duplicate pair." + "reason": "Tokenizer vocab-size mismatch vs a model-loading failure; not the same bug." }, { "left": "issue:41720", "right": "issue:44155", "accept": false, - "reason": "Different domains: Qwen3 device-mapping crash vs AudioFlamingo3 batched inference leak." + "reason": "Different model paths and symptoms: Qwen3 device mapping vs AudioFlamingo3 batched inference leakage." }, { "left": "issue:43873", "right": "issue:44387", "accept": false, - "reason": "Both are quantization/memory issues, but they describe different regressions and code paths." + "reason": "Related to quantization and memory, but offloading failure is not the same as increased reserved memory/OOM." }, { "left": "issue:39692", "right": "issue:44960", "accept": false, - "reason": "SigLIP2 docs/inference issue is unrelated to GLM5." + "reason": "Different model families and unrelated symptoms." }, { "left": "issue:43425", "right": "issue:43957", "accept": false, - "reason": "Torch version incompatibility is unrelated to meta-device loading failures." + "reason": "Torch version incompatibility is not the same as meta-device loading breakage." }, { "left": "issue:44387", "right": "issue:44756", "accept": false, - "reason": "Different OOM scenarios with different proposed fixes; not the same defect." + "reason": "Different performance/memory regressions with no shared concrete code path." }, { "left": "issue:43854", "right": "issue:45216", "accept": false, - "reason": "Different models and different regressions: load failure in tests vs incorrect save_pretrained checkpoint." + "reason": "Different bugs: test-time model loading failure vs incorrect checkpoint saving in Qwen3.5." }, { "left": "issue:42947", "right": "issue:43381", "accept": false, - "reason": "Both mention gradient checkpointing, but one is inefficacy under LoRA and the other is eval-mode prohibition." + "reason": "Both mention gradient checkpointing, but one is ineffectiveness with PEFT LoRA and the other is an eval-mode restriction." }, { "left": "issue:44292", "right": "issue:44912", "accept": false, - "reason": "Different quantization formats and different loading failures; not mergeable as one bug." + "reason": "Different quantization formats and different models; not the same underlying defect." }, { "left": "issue:44291", "right": "issue:44756", "accept": false, - "reason": "Unexpected `_is_hf_initialized` argument is unrelated to mmap/OOM on Strix Halo." + "reason": "init_empty_weights TypeError and mmap OOM are unrelated." }, { "left": "issue:44743", "right": "issue:44960", "accept": false, - "reason": "Recurrent-state cache bug vs GLM5 issue; unrelated." + "reason": "Different model implementations and failure modes." }, { "left": "issue:45276", "right": "issue:45292", "accept": true, - "reason": "Same `resize_token_embeddings` propagation bug, with `output_embeddings` called out in both reports." + "reason": "Same resize_token_embeddings/output_embeddings bug; 45292 is the generic formulation." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Both involve training behavior, but gradient checkpointing in eval mode is unrelated to the Qwen3.5 SDPA/BF16 crash." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Meta-device loading breakage vs mmap OOM are unrelated." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "save/from_pretrained extra_state handling is unrelated to Strix Halo mmap behavior." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom-model init regression vs trainer resume/max_steps bug are not the same issue." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Both are the same resize_token_embeddings bug manifested in different model components." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative on the soft edges. The two accepted duplicates around `resize_token_embeddings` are well-supported by the issue titles and similarity, and the other pairwise rejections do not overclaim shared root cause. No mergeability problems for PR pairs here." + "feedback": "The summary is grounded in the packet and stays conservative: the only clearly supported duplicate subclusters are the resize_token_embeddings/output_embeddings reports and the lm_head weight-tying reports. The accepted soft-edge pairs are plausible, and the rejected pairs avoid overstating shared root causes." }, "evaluator_used": true, "retried": false, @@ -2010,244 +2058,244 @@ "issue:43425|issue:45161", "issue:43611|issue:43644" ], - "prepared_review_unit_hash": "5b5edd9395ff00742810924d51557e560100c9789e579dbe13df3b617f3446d0", + "prepared_review_unit_hash": "682631a4c02b9fd01fbb98ddbef4d9956219e81f8ca02cefef009821ccfb29e5", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38484, - "estimated_input_tokens": 9621, - "estimated_eval_tokens": 19498 + "serialized_chars": 38485, + "estimated_input_tokens": 9622, + "estimated_eval_tokens": 19500 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a loose grab-bag of Transformers v5 regressions, mostly around model loading, tokenizers, and a few unrelated runtime/docs bugs. The most representative issue is the broad v5 model-loading regression around `base_model_prefix`.", - "confidence": 0.61, - "canonical_issue_reason": "Issue 43611 is the broadest, most representative v5 regression in the set: it directly summarizes a general model-loading breakage that overlaps the cluster\u2019s dominant theme.", + "summary": "This cluster is mostly loose similarity around Transformers v5 regressions, tokenizer/model-loading failures, and unrelated doc/runtime issues. I found no soft-edge pair that is clearly the same underlying bug/change under a strict duplicate standard.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43611 is the clearest broad, concrete v5 regression in core model loading and is more central/actionable than the more model-specific or documentation-only reports.", "canonical_pr_reason": null, - "best_issue_reason": "43611 is the best cluster anchor because its scope is general enough to represent the recurring Transformers v5 loading failures without being tied to one specific model family.", + "best_issue_reason": "issue:43611 is the best representative issue for the cluster: it describes a specific loading regression with wide potential impact and a clear code-path compared with the other, narrower reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44479", "right": "issue:44877", "accept": false, - "reason": "Both are v5 regressions, but one is a video-input bug for Qwen VL models and the other is a config-loading issue for granite_speech; different code paths." + "reason": "Different models and failures: Qwen video-input regression vs granite_speech config strictness." }, { "left": "issue:44402", "right": "issue:44451", "accept": false, - "reason": "Both involve tokenizer/model loading failures, but the symptoms and likely causes differ: vocab-size mismatch vs inability to load a specific tokenizer." + "reason": "Both are loading-related, but they describe different model-specific tokenizer/config problems." }, { "left": "issue:43645", "right": "issue:44164", "accept": false, - "reason": "Custom-model notebook initialization and save/from_pretrained extra_state handling are different loading/saving problems." + "reason": "Custom model init in notebooks and extra_state save/load handling are different bugs." }, { "left": "issue:43257", "right": "issue:44863", "accept": false, - "reason": "Different model families and failure modes: Qwen3 MoE conversion with accelerate/deepspeed vs NemotronH checkpoint loading." + "reason": "Different architectures and failure modes: Qwen3 MoE conversion vs NemotronH checkpoint loading." }, { "left": "issue:43425", "right": "issue:43828", "accept": false, - "reason": "Torch-version incompatibility is not the same as an autocast dtype mismatch in a specific model." + "reason": "General Torch 2.10 incompatibility is not the same as Phi-tiny-MoE autocast dtype mismatch." }, { "left": "issue:43828", "right": "issue:43957", "accept": false, - "reason": "One is an autocast dtype mismatch; the other is meta-device loading breakage. Related only at a high level." + "reason": "Autocast dtype mismatch and meta-device loading are distinct code paths." }, { "left": "issue:44589", "right": "issue:44756", "accept": false, - "reason": "Storage lookup failure and mmap/OOM behavior are unrelated bugs." + "reason": "Float8 storage lookup error is unrelated to Strix Halo mmap OOM mitigation." }, { "left": "issue:43931", "right": "issue:44863", "accept": false, - "reason": "Both are model-loading errors, but they concern different architectures and likely different conversion/shape issues." + "reason": "Different model families and problems: Qwen3-VL shape mismatch vs NemotronH checkpoint loading." }, { "left": "issue:43906", "right": "issue:44530", "accept": false, - "reason": "A reproduction of an older issue and a PagedAttentionCache crash are not the same underlying bug." + "reason": "No clear same bug; isolated reproduction of another issue vs PagedAttentionCache linear_attention crash." }, { "left": "issue:43611", "right": "issue:43645", "accept": false, - "reason": "Both are v5 regressions, but one is about `base_model_prefix` loading and the other about notebook-defined custom models." + "reason": "Both involve loading/init behavior, but one is base_model_prefix handling and the other is notebook-defined custom models." }, { "left": "issue:39404", "right": "issue:44912", "accept": false, - "reason": "Whisper pipeline language return and GPT-OSS MXFP4 quantization fallback are unrelated." + "reason": "Whisper pipeline language return regression and GPT-OSS MXFP4 quantization fallback are unrelated." }, { "left": "issue:43425", "right": "issue:43716", "accept": false, - "reason": "Torch compatibility and image-preprocessor/model dtype mismatch are distinct issues." + "reason": "Torch version compatibility and Mistral-3 image dtype mismatch are different issues." }, { "left": "issue:44530", "right": "issue:45084", "accept": false, - "reason": "PagedAttentionCache group-type handling and template-node compilation errors are different failures." + "reason": "PagedAttentionCache Qwen3.5 crash and template-node compilation error are unrelated." }, { "left": "issue:43901", "right": "issue:44509", "accept": false, - "reason": "Both are docs-related, but they target different APIs and removals." + "reason": "Both are docs complaints, but they cover different removed/changed pipeline behaviors." }, { "left": "issue:43299", "right": "issue:43931", "accept": false, - "reason": "Both mention Qwen3-VL loading, but one is about MoE weight conversion and the other about weight-shape mismatch; not enough to treat as the same bug." + "reason": "Same broad Qwen3-VL family, but not the same concrete failure: MoE loading vs weight-shape mismatch." }, { "left": "issue:44265", "right": "issue:44898", "accept": false, - "reason": "Torch export with `torch_compilable_check` and Perceiver non-default resolution loading are different code paths." + "reason": "Different model classes and failures: torch.export/torch_compilable_check vs Perceiver interpolation." }, { "left": "issue:44479", "right": "issue:44805", "accept": false, - "reason": "A video-input regression and a mask shape IndexError are unrelated." + "reason": "Video-input regression and mask shape mismatch are not the same bug." }, { "left": "issue:43994", "right": "issue:44079", "accept": false, - "reason": "Incorrect SigLIP2 outputs and `ModelOutput` key assignment are different problems." + "reason": "Incorrect Siglip2 outputs and ModelOutput None-key assignment are unrelated." }, { "left": "issue:44671", "right": "issue:44991", "accept": false, - "reason": "CamemBERT masked-LM prediction errors and tokenizer loading failures for a different model are not the same bug." + "reason": "CamemBERT prediction regression and tokenizer load failure for est-roberta are separate issues." }, { "left": "issue:43856", "right": "issue:44756", "accept": false, - "reason": "Memory inefficiency in Qwen3 MoE training is unrelated to Strix Halo mmap OOM avoidance." + "reason": "Training memory usage and AppleSilicon mmap OOM are different performance/runtime problems." }, { "left": "issue:43425", "right": "issue:44918", "accept": false, - "reason": "Torch version incompatibility and TRL SFT embedding unpacking are different failures." + "reason": "Torch compatibility and Qwen3.5 embedding unpacking in TRL are unrelated." }, { "left": "issue:39404", "right": "issue:44671", "accept": false, - "reason": "Whisper return_language and CamemBERT masked-LM regressions are unrelated model-specific bugs." + "reason": "Whisper pipeline behavior and CamemBERT masked LM predictions do not match." }, { "left": "issue:40990", "right": "issue:42947", "accept": false, - "reason": "High perplexity on GPT-OSS and ineffective gradient checkpointing with LoRA are not the same issue." + "reason": "Perplexity regression and gradient checkpointing/LoRA effectiveness are different bugs." }, { "left": "issue:43596", "right": "issue:44683", "accept": false, - "reason": "DeepSpeed ZeRO-3 initialization IndexError and compiled flex_attention on torch>=2.9 are unrelated." - }, - { - "left": "issue:43883", - "right": "issue:44488", - "accept": false, - "reason": "Missing `all_tied_weights_keys` on Molmo and loading `cjvt/sleng-bert` are different model-loading problems." + "reason": "Zero3 init indexing error and compiled flex_attention torch-version failure are unrelated." }, { - "left": "issue:44488", - "right": "issue:45081", + "left": "issue:44589", + "right": "issue:45084", "accept": false, - "reason": "Tokenizer load failure for `sleng-bert` and the Mistral regex patch crash are distinct tokenizer bugs." + "reason": "Float8 storage lookup and template compilation errors are different failures." }, { "left": "issue:36032", "right": "issue:43452", "accept": false, - "reason": "T5 tokenizer special-token naming conflict and `gguf_file` loading breakage are unrelated." + "reason": "Tokenizer method name conflict and gguf_file from_pretrained breakage are unrelated." }, { "left": "issue:44991", "right": "issue:45081", "accept": false, - "reason": "Both are tokenizer regressions, but they affect different tokenizers and likely different fix paths." + "reason": "Tokenizer loading failure for est-roberta and Mistral regex patch crash are different code paths." }, { "left": "issue:43582", "right": "issue:43632", "accept": false, - "reason": "Apple Silicon allocator warmup TypeError and `_is_hf_initialized` breakage are different runtime issues." + "reason": "AppleSilicon warmup TypeError and _is_hf_initialized flag regression are unrelated." }, { "left": "issue:32090", "right": "issue:39404", "accept": false, - "reason": "Trainer GPU broadcast TypeError and Whisper pipeline language return are unrelated." + "reason": "Trainer GPU broadcast TypeError and Whisper return_language regression are different bugs." }, { "left": "issue:43957", "right": "issue:44164", "accept": false, - "reason": "Meta-device loading issues and extra_state save/load handling are distinct code paths." + "reason": "Meta-device loading breakage and extra_state save/from_pretrained handling are separate issues." }, { "left": "issue:40990", "right": "issue:43421", "accept": false, - "reason": "Perplexity anomaly and runtime special-token post-processor updates are unrelated." + "reason": "Perplexity regression and runtime special-token post-processor update are unrelated." }, { "left": "issue:43883", "right": "issue:44488", "accept": false, - "reason": "These are separate tokenizer/model-loading failures with no clear shared root cause." + "reason": "Missing all_tied_weights_keys and sleng-bert load failure are different model-loading problems." }, { - "left": "issue:44589", - "right": "issue:45084", + "left": "issue:44479", + "right": "issue:45081", + "accept": false, + "reason": "Qwen video-input regression and Mistral regex patch crash are unrelated." + }, + { + "left": "issue:44991", + "right": "issue:45081", "accept": false, - "reason": "Float8 storage lookup and template-node compilation errors are unrelated." + "reason": "Tokenizer load failure and backend_tokenizer attribute crash are different failures." }, { "left": "issue:43611", "right": "issue:43644", "accept": false, - "reason": "Both are broad v5 regressions, but one is about `base_model_prefix` and the other about non-persistent buffer initialization; not the same bug." + "reason": "Base_model_prefix loading breakage and non-persistent buffer junk are different regressions." } ] }, @@ -2333,11 +2381,11 @@ "issue:44451|issue:44987", "issue:43452|issue:43482", "issue:43317|issue:43482", - "issue:36032|issue:42915", "issue:43541|issue:45084", + "issue:36032|issue:42915", "issue:39404|issue:43825", - "issue:44849|issue:45325", "issue:42915|issue:44530", + "issue:44849|issue:45325", "issue:43404|issue:44466", "issue:43262|issue:43377", "issue:36032|issue:43645", @@ -2354,232 +2402,232 @@ "issue:41553|issue:43582", "issue:44220|issue:44683" ], - "prepared_review_unit_hash": "a5b18d7e3de6320ee75afcbb76ef1a71fb1ecb2dd09f23756acea42d923f91e2", + "prepared_review_unit_hash": "340d289e745eb0d1d6a0b500eb6cbaf8e0f0147f18325fb46bb007f2630876be", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 32, - "serialized_chars": 37198, - "estimated_input_tokens": 9300, - "estimated_eval_tokens": 18856 + "serialized_chars": 37207, + "estimated_input_tokens": 9302, + "estimated_eval_tokens": 18860 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly heterogeneous, with many unrelated Transformer bugs. The only clear duplicate-like pair is the Qwen3.5 save_pretrained regression (#45216 / #45357); the rest are too model- or symptom-specific to merge together.", + "summary": "This cluster is mostly heterogeneous noise: many items are only loosely related by subsystem/model family, not by the same underlying defect. The only strong duplicate-like pair is the Qwen3.5 `save_pretrained` regression pair (45216/45357), which appears to describe the same checkpoint-serialization bug with different symptom detail.", "confidence": 0.82, - "canonical_issue_reason": "No single canonical issue across the full set: most items describe unrelated bugs. If a duplicate anchor is needed, #45216 is the best one because #45357 is a later, narrower follow-up of the same Qwen3.5 save_pretrained regression.", + "canonical_issue_reason": "No single canonical issue for the whole cluster; the items span many unrelated bugs and regressions across different models and APIs.", "canonical_pr_reason": null, - "best_issue_reason": "#45216 is the best representative issue for the only strong duplicate pair, since it is earlier and describes the broader regression; #45357 is a narrower persistence of the same problem.", + "best_issue_reason": "If a representative is needed, issue 45216 is the broader regression report; 45357 reads like a narrower follow-up about the same Qwen3.5 `save_pretrained` serialization problem.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44811", "right": "issue:45584", "accept": false, - "reason": "Both are Whisper-related, but one is batch_decode skip-special-token behavior and the other is empty-transcription generation after align_special_tokens; different failure modes." + "reason": "Both involve Whisper, but one is about `batch_decode(skip_special_tokens)` and the other about empty-generation behavior after `align_special_tokens`; different code paths and symptoms." }, { "left": "issue:44206", "right": "issue:44220", "accept": false, - "reason": "Both touch audio feature extraction, but one is an unsupported `center` arg in `LasrFeatureExtractor` and the other is a low-level `_torch_extract_fbank_features()` issue; not the same bug." + "reason": "Both are audio-feature related, but the titles point to different failures (`center` arg regression vs. a generic `_torch_extract_fbank_features()` problem) without enough evidence they are the same bug." }, { "left": "issue:44488", "right": "issue:44987", "accept": false, - "reason": "Both are loading regressions, but they involve different models and likely different code paths (`cjvt/sleng-bert` vs `physical-intelligence/fast`)." + "reason": "Both are loading failures, but they concern different models and likely different tokenizer/model compatibility issues; too broad to treat as one duplicate." }, { "left": "issue:43716", "right": "issue:45084", "accept": false, - "reason": "Completely different problems: Mistral-3 image preprocessor dtype mismatch vs a template compilation TypeError." + "reason": "Completely different problems: Mistral-3 image dtype mismatch vs. a template compilation error." }, { "left": "issue:34567", "right": "issue:40444", "accept": false, - "reason": "Unrelated: Trainer token accounting vs Qwen2.5-VL iterable dataset multi-image loading failure." + "reason": "Trainer token accounting and Qwen2.5-VL multi-image IterableDataset failure are unrelated." }, { "left": "issue:43883", "right": "issue:44991", "accept": false, - "reason": "Both are loading-related, but one is a missing `all_tied_weights_keys` attribute and the other is a tokenizer load failure for a different model; not the same underlying bug." + "reason": "Different model-loading/tokenizer issues for different models; same broad area, but not the same concrete bug." }, { "left": "issue:42947", "right": "issue:44756", "accept": false, - "reason": "Different subsystems and symptoms: LoRA gradient checkpointing vs Apple Silicon mmap/OOM handling." + "reason": "LoRA gradient-checkpointing behavior and Strix Halo mmap OOM are unrelated." }, { "left": "issue:43644", "right": "issue:43646", "accept": false, - "reason": "Both are Transformers 5 breakages, but one is non-persistent buffer initialization and the other is custom model initialization; too broad and not the same defect." + "reason": "Both touch Transformers v5 initialization behavior, but one is about non-persistent buffers and the other about custom model initialization; not the same underlying defect." }, { "left": "issue:43643", "right": "issue:44530", "accept": false, - "reason": "Different issues: missing fields from `AutoConfig.from_pretrained(trust_remote_code=True)` vs PagedAttentionCache group-type handling on Qwen3.5." + "reason": "Missing fields from `AutoConfig.from_pretrained(trust_remote_code=True)` is unrelated to Qwen3.5 PagedAttentionCache group-type crashes." }, { - "left": "issue:44451", - "right": "issue:44987", + "left": "issue:39404", + "right": "issue:43825", "accept": false, - "reason": "Both fail on model loading, but they are different models with no evidence of a shared code-path bug." + "reason": "Whisper `return_language` pipeline behavior and a bad pipeline error message are different issues." }, { "left": "issue:43452", "right": "issue:43482", "accept": false, - "reason": "Both mention GGUF, but one is a broad `gguf_file` API break and the other is a model-specific v5 loading failure; not enough evidence they are the same bug." + "reason": "Both involve GGUF loading, but one is a generic `gguf_file` regression and the other is a specific Qwen2.5-GGUF failure; not enough evidence of one bug." }, { "left": "issue:43317", "right": "issue:43482", "accept": false, - "reason": "Different loading paths: `device_map=auto` offload/dequantization vs GGUF model loading." + "reason": "Different loading paths and failures: dequantized device-map offload vs. Qwen2.5-GGUF/v5 loading." }, { - "left": "issue:36032", - "right": "issue:42915", + "left": "issue:43541", + "right": "issue:45084", "accept": false, - "reason": "Unrelated: T5 tokenizer method conflict vs Qwen3Moe FP8 configuration failure." + "reason": "Torch dynamo/grouped_mm tracing failure is unrelated to template-node compilation errors." }, { - "left": "issue:43541", - "right": "issue:45084", + "left": "issue:36032", + "right": "issue:42915", "accept": false, - "reason": "Different compilation failures with no shared code path." + "reason": "T5 tokenizer method-name conflict and Qwen3MoE FP8 failure are unrelated." }, { "left": "issue:39404", "right": "issue:43825", "accept": false, - "reason": "Both involve pipeline/Whisper wording, but one is a functional `return_language` regression and the other is an error-message issue about unsupported translation tasks." + "reason": "Same as above: different Whisper/pipeline bug versus error-message wording issue." }, { - "left": "issue:44849", - "right": "issue:45325", + "left": "issue:42915", + "right": "issue:44530", "accept": false, - "reason": "Different Qwen variants and different bugs: hidden-states behavior vs image temporal position-id scaling." + "reason": "Fine-grained FP8 config failure and Qwen3.5 linear_attention cache crash are unrelated." }, { - "left": "issue:42915", - "right": "issue:44530", + "left": "issue:44849", + "right": "issue:45325", "accept": false, - "reason": "Both mention Qwen, but one is FP8 config failure and the other is an invalid cache group type; not the same concrete bug." + "reason": "Qwen3.5 hidden-states output bug and Qwen2.5-VL rope-index scaling bug affect different models and code paths." }, { "left": "issue:43404", "right": "issue:44466", "accept": false, - "reason": "Related to tied weights, but one is a missing tie in `Mistral3ForConditionalGeneration` and the other is inconsistent serialization by device; separate issues." + "reason": "Both mention tied weights/lm_head, but one is a missing tie in a model class and the other is inconsistent serialization behavior; not the same bug." }, { "left": "issue:43262", "right": "issue:43377", "accept": false, - "reason": "Both are audio-processing issues, but one is chat-template sampling-rate defaults and the other is missing padding-mask support in MIMI batching." + "reason": "Audio chat-template sampling-rate defaulting and MIMI padding-mask batch discrepancy are different issues." }, { "left": "issue:36032", "right": "issue:43645", "accept": false, - "reason": "Different Transformers 5 regressions: tokenizer method name collision vs custom model initialization in notebooks." + "reason": "Tokenizer method conflict and notebook custom-model initialization are unrelated." }, { "left": "issue:44987", "right": "issue:44991", "accept": false, - "reason": "Both are v5 loading regressions, but they concern different models and different failure modes; no shared underlying bug is evident." + "reason": "Both are model-loading regressions, but for different models and likely different failure causes." }, { "left": "issue:45084", "right": "issue:45507", "accept": false, - "reason": "Different template/model-method failures with no concrete overlap." + "reason": "Template compilation error and GraniteMoEHybrid invalid-method call are unrelated." }, { "left": "issue:43646", "right": "issue:44534", "accept": false, - "reason": "Both mention custom model/init behavior, but one is generic custom model initialization and the other is non-persistent buffer serialization junk." + "reason": "Both concern Transformers v5 custom model behavior, but one is about initialization and the other about non-persistent buffer values; insufficient evidence of one concrete bug." }, { "left": "issue:43994", "right": "issue:44610", "accept": false, - "reason": "Both are vision-related, but one is bad predictions from SigLIP2 and the other is a processor size mismatch for OmDet-Turbo." + "reason": "SigLIP2 bad outputs and OmDet-Turbo image-size mismatch are different model/preprocessor problems." }, { "left": "issue:43645", "right": "issue:44534", "accept": false, - "reason": "Different regressions: notebook custom-model initialization vs junk-filled non-persistent buffers." + "reason": "Notebook custom-model initialization and junk-filled buffers are separate v5 regressions." }, { "left": "issue:45216", "right": "issue:45357", "accept": true, - "reason": "Same underlying Qwen3.5 `save_pretrained` regression; #45357 is a later, narrower report that still saves incorrect visual encoder keys." + "reason": "Same model family and same `save_pretrained` checkpoint-serialization regression; the later issue just narrows the symptom to incorrect visual encoder keys." }, { "left": "issue:43322", "right": "issue:43540", "accept": false, - "reason": "Both affect multimodal loading/processing, but one is a Llava Next segfault and the other is a Qwen3OmniMoe video-input ValueError." + "reason": "Segfault on Llava Next model loading and Qwen3OmniMoe video-processing ValueError are unrelated multimodal failures." }, { "left": "issue:43377", "right": "issue:43819", "accept": false, - "reason": "Different audio-model bugs: missing padding-mask support in MIMI encoder vs DAC latent/forward mismatch without STE." + "reason": "Both are codec/model inference bugs, but MIMI padding-mask batching and DAC latent/STE mismatch are different code paths." }, { "left": "issue:44403", "right": "issue:45081", "accept": false, - "reason": "One is generic noisy model-loading output, the other is a specific Mistral tokenizer regex patch crash." + "reason": "Unnecessary loading noise and Mistral regex-patching crash are unrelated." }, { "left": "issue:44451", "right": "issue:45081", "accept": false, - "reason": "Both are tokenizer/model-loading failures, but for different models and different root causes." + "reason": "Different tokenizer-loading failures for different models; not the same defect." }, { "left": "issue:41553", "right": "issue:43582", "accept": false, - "reason": "Unrelated: bad AutoTokenizer error message for Voxtral vs Apple Silicon caching_allocator_warmup TypeError." + "reason": "Bad AutoTokenizer error message for Voxtral and Apple Silicon allocator TypeError are unrelated." }, { "left": "issue:44220", "right": "issue:44683", "accept": false, - "reason": "Different low-level failures: audio fbank extraction vs compiled flex_attention on newer torch." + "reason": "Audio fbank-feature extraction and compiled flex_attention on torch 2.9 are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays appropriately conservative. The only accepted duplicate-like pair, #45216/#45357, is plausibly the same Qwen3.5 save_pretrained regression and looks mergeable into one bugfix PR. The other soft-edge pairs are rejected with cautious, title-based reasoning rather than overclaiming evidence." + "feedback": "The summary is grounded in the packet and stays conservative about weak links. The only accepted soft-edge pair, 45216/45357, is plausibly the same Qwen3.5 save_pretrained serialization regression, while the rest of the pairwise rejections are appropriately cautious. No obvious overstatement or unsupported canonicalization." }, "evaluator_used": true, "retried": false, @@ -2654,15 +2702,15 @@ "issue:43404|issue:45127", "issue:42915|issue:45084", "issue:44987|issue:45081", - "issue:44479|issue:45325", "issue:43296|issue:43531", + "issue:44479|issue:45325", "issue:44062|issue:44987", "issue:42915|issue:44291", "issue:40990|issue:44811", "issue:43611|issue:44534", "issue:43317|issue:43856", - "issue:42175|issue:45542", "issue:36032|issue:43742", + "issue:42175|issue:45542", "issue:40444|issue:45325", "issue:43632|issue:44534", "issue:43421|issue:44568", @@ -2680,214 +2728,214 @@ "issue:44079|issue:44534", "issue:35707|issue:40444" ], - "prepared_review_unit_hash": "a28d5f7018859e92f8f51d63cf5adfe56345c1daa2ed96da09f5699a5c13ee10", + "prepared_review_unit_hash": "f822eae9c530972cd221112cf845f84ff76f09b56602ccad09de2cd5f669e661", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36162, - "estimated_input_tokens": 9041, - "estimated_eval_tokens": 18338 + "serialized_chars": 36158, + "estimated_input_tokens": 9040, + "estimated_eval_tokens": 18336 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous: the soft-pair candidates mostly share only broad subsystem or version context, not the same concrete bug or fix path. I reject all soft edges and see no clear canonical duplicate representative.", - "confidence": 0.97, - "canonical_issue_reason": "No single issue clearly represents the cluster; the items span unrelated bugs across tokenizers, Qwen/VL, packaging, loading, and attention behavior.", + "summary": "These items are mostly unrelated Transformer regressions across different models/subsystems; I do not see any true duplicate pairs to merge. If one issue must represent the cluster, #45020 is the broadest umbrella for the repeated recent-version breakage theme.", + "confidence": 0.89, + "canonical_issue_reason": "Issue #45020 is the broadest umbrella for the repeated 'recent transformers versions break models' theme, though the cluster overall is not a true duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "No issue is a strong global representative. The cluster does not appear to center on one underlying defect, so a canonical issue choice would be arbitrary.", + "best_issue_reason": "#45020 is the most representative and open issue here: it captures broad version-regression breakage across models, even if it does not subsume the other reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43632", "right": "issue:44756", "accept": false, - "reason": "Unrelated: HF init flag regression vs mmap/OOM on Strix Halo." + "reason": "Different failures: `_is_hf_initialized` init handling vs Strix Halo mmap/OOM behavior." }, { "left": "issue:44265", "right": "issue:44610", "accept": false, - "reason": "Different bugs: torch export/check utility failure vs OmDet-Turbo processor size mismatch." + "reason": "torch.export/torch_compilable_check is unrelated to a processor input-size mismatch in OmDet-Turbo." }, { "left": "issue:43404", "right": "issue:45127", "accept": false, - "reason": "Distinct causes: lm_head weight tying bug vs LoRA merge with extended vocabulary collapse." + "reason": "One is a tied lm_head bug; the other is LoRA merge collapse with extended vocab. Not the same bug." }, { "left": "issue:42915", "right": "issue:45084", "accept": false, - "reason": "FP8 config failure and template-node compilation error are unrelated code paths." + "reason": "Qwen3MoE FP8 config failure and a non-template-node compile error are different code paths." }, { "left": "issue:44987", "right": "issue:45081", "accept": false, - "reason": "Both are loading-related, but one is remote-code/model loading and the other is a Mistral regex patch crash." + "reason": "Tokenizer loading failure for one model is not the same as the Mistral regex patch crash." }, { - "left": "issue:44479", - "right": "issue:45325", + "left": "issue:43296", + "right": "issue:43531", "accept": false, - "reason": "Same model family, but video-input regression and still-image rope-index scaling are different symptoms and likely different fixes." + "reason": "PaddleOCR-VL loading and Qwen3-MoE sliding_window issues are unrelated." }, { - "left": "issue:43296", - "right": "issue:43531", + "left": "issue:44479", + "right": "issue:45325", "accept": false, - "reason": "Different model families and failure modes: PaddleOCR-VL loading vs Qwen3-MoE sliding_window behavior." + "reason": "Both are Qwen VL-temporal topics, but one is a broad video-input regression and the other is a still-image RoPE scaling bug." }, { "left": "issue:44062", "right": "issue:44987", "accept": false, - "reason": "Tokenizer AddedToken argument conflict is unrelated to loading a remote-code model." + "reason": "AddedToken special-arg duplication is unrelated to a specific model loading regression." }, { "left": "issue:42915", "right": "issue:44291", "accept": false, - "reason": "Both touch transformers v5-era loading, but FP8 config failure and _is_hf_initialized/init_empty_weights mismatch are different bugs." + "reason": "FP8 config handling and unexpected `_is_hf_initialized` argument errors are distinct problems." }, { "left": "issue:40990", "right": "issue:44811", "accept": false, - "reason": "Perplexity/eval issue vs Whisper batch_decode special-token handling; no shared bug." + "reason": "Perplexity regression on GPT-OSS is unrelated to Whisper batch_decode special-token handling." }, { "left": "issue:43611", "right": "issue:44534", "accept": false, - "reason": "base_model_prefix loading breakage is separate from non-persistent buffer initialization junk." + "reason": "`base_model_prefix` loading breakage and non-persistent buffer corruption are different failures." }, { "left": "issue:43317", "right": "issue:43856", "accept": false, - "reason": "Loading with gpu+cpu offload is unrelated to training-time memory inefficiency." + "reason": "Device-map/offload loading failure is not the same as MoE training memory inefficiency." }, { - "left": "issue:42175", - "right": "issue:45542", + "left": "issue:36032", + "right": "issue:43742", "accept": false, - "reason": "Both are TensorFlow packaging/backend issues, but one is missing TF in the torch extra and the other is an undefined backend error with tensorboard-only install." + "reason": "T5Tokenizer special-token method conflict and MobileLLM key errors do not share the same root cause." }, { - "left": "issue:36032", - "right": "issue:43742", + "left": "issue:42175", + "right": "issue:45542", "accept": false, - "reason": "Tokenizer special-token conflict vs model key error; different underlying failures." + "reason": "Packaging omission of TensorFlow is different from the undefined tf backend error caused by tensorboard-only installs." }, { "left": "issue:40444", "right": "issue:45325", "accept": false, - "reason": "Both involve Qwen VL, but iterable-dataset multi-image finetuning and rope-index scaling for still images are not the same bug." + "reason": "Multi-image iterable fine-tuning failure is not the same as RoPE temporal position scaling in Qwen2.5-VL." }, { "left": "issue:43632", "right": "issue:44534", "accept": false, - "reason": "Unrelated: _is_hf_initialized flag regression vs non-persistent buffer junk in v5." + "reason": "`_is_hf_initialized` flag behavior and non-persistent buffer filling are separate regressions." }, { "left": "issue:43421", "right": "issue:44568", "accept": false, - "reason": "Shared special-token theme, but runtime post-processor updates and add_special_tokens=True regression are different surfaces and not clearly one fix." + "reason": "Runtime post-processor updates and BOS/EOS insertion on tokenization are related only broadly, not the same bug." }, { "left": "issue:43818", "right": "issue:45357", "accept": false, - "reason": "Video-LLaVA tower/temporal-attention issue is unrelated to Qwen3.5 save_pretrained visual-key regression." + "reason": "Video-LLaVA temporal-attention/weight-sharing bug and Qwen3.5 save_pretrained key bug are unrelated." }, { "left": "issue:43531", "right": "issue:44877", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior and granite_speech strict config loading are unrelated." + "reason": "Qwen3-MoE sliding_window behavior and granite_speech strict-config loading are different issues." }, { "left": "issue:42915", "right": "issue:44589", "accept": false, - "reason": "FineGrainedFP8Config failure is unrelated to missing Float8 storage type lookup." + "reason": "FP8 config failure and missing Float8 storage type are distinct float8-related problems." }, { "left": "issue:36032", "right": "issue:41720", "accept": false, - "reason": "Tokenizer load conflict vs CUDA assert during auto device mapping are different problem classes." + "reason": "Tokenizer special-token conflict is unrelated to Qwen3 auto device-mapping CUDA asserts." }, { "left": "issue:43425", "right": "issue:44403", "accept": false, - "reason": "Torch compatibility issue is unrelated to noisy transformer loading logs." + "reason": "Torch version incompatibility and noisy loading output are not the same underlying bug." }, { "left": "issue:44683", "right": "issue:45020", "accept": false, - "reason": "Compiled flex_attention on new torch versions is unrelated to remote_code model breakage." + "reason": "Compiled flex_attention on torch>=2.9 and remote_code loading regressions are separate compatibility problems." }, { "left": "issue:43482", "right": "issue:45538", "accept": false, - "reason": "GGUF loading with transformers v5 and CLIPTokenizer model_max_length are different defects." + "reason": "GGUF loading failure and CLIPTokenizer max-length behavior are unrelated." }, { "left": "issue:44829", "right": "issue:45479", "accept": false, - "reason": "Both affect training loss, but flash_attention_3 degeneration and num_labels=1 zero-loss behavior have different root causes." + "reason": "Flash-attention-induced degenerate training and single-label zero-loss behavior are different classification bugs." }, { "left": "issue:33666", "right": "issue:40444", "accept": false, - "reason": "Both mention Qwen VL training, but multi-GPU training and multi-image iterable dataset failures are not the same bug." + "reason": "Qwen2-VL multi-GPU training and multi-image-per-prompt iterable-dataset failure are not the same bug." }, { "left": "issue:44403", "right": "issue:44991", "accept": false, - "reason": "Generic loading noise is unrelated to tokenizer loading failure for EMBEDDIA/est-roberta." + "reason": "A general loading-noise complaint does not match a concrete tokenizer loading failure." }, { "left": "issue:43792", "right": "issue:44589", "accept": false, - "reason": "Whisper run failure and Float8 storage lookup error are unrelated." + "reason": "Whisper runtime failure is unrelated to the Float8 storage lookup issue." }, { "left": "issue:44079", "right": "issue:44534", "accept": false, - "reason": "ModelOutput key assignment and non-persistent buffer junk are separate internal behaviors." + "reason": "ModelOutput key assignment and non-persistent buffer junk are different subsystems." }, { "left": "issue:35707", "right": "issue:40444", "accept": false, - "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL multi-image finetuning." + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL multi-image fine-tuning failure." } ] }, @@ -2987,219 +3035,222 @@ "issue:43262|issue:43746", "issue:43632|issue:45563", "issue:43881|issue:44991", - "issue:43317|issue:43873", "issue:40990|issue:45584", + "issue:43317|issue:43873", "issue:34634|issue:35707" ], - "prepared_review_unit_hash": "8036223205b231423116aa0b4e79a355b114956e81d139dea9a4dd8db108e853", + "prepared_review_unit_hash": "600b3bd80c17b76c461ab3af243a1e58c629366bd461739fdf24a77d56978737", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 29, - "serialized_chars": 36388, - "estimated_input_tokens": 9097, - "estimated_eval_tokens": 18450 + "serialized_chars": 36393, + "estimated_input_tokens": 9099, + "estimated_eval_tokens": 18454 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a heterogeneous set of mostly unrelated issues, with only a few superficial overlaps around Transformers v5 regressions, quantization/offloading, and a couple of model-family-specific bugs. None of the soft edges look like true duplicates.", - "confidence": 0.91, - "canonical_issue_reason": "No single canonical issue is safe here: the items span many distinct bugs and model families, so picking one representative would be misleading.", - "canonical_pr_reason": "No PRs are present in this cluster.", - "best_issue_reason": "There is no strong global best issue; the closest broad items are still too narrow or too different from the rest of the set to serve as a reliable representative.", - "best_pr_reason": "No PRs are present in this cluster.", + "summary": "This cluster is mostly heterogeneous, but it is dominated by Transformers v5 model-loading/initialization regressions. Only one soft pair looks like the same concrete bug family (Qwen2.5-VL position-id generation).", + "confidence": 0.72, + "canonical_issue_reason": "issue:43646 is the broadest representative of the dominant theme: Transformers 5.0.0 breaking custom model initialization and related loading paths.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43646 is the best cluster representative because it is the most generic, central v5 regression title and subsumes several nearby load/init failures.", + "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43377", "right": "issue:45491", "accept": false, - "reason": "Different models and failure modes: batched-vs-single padding-mask mismatch vs mixed-length sliding-window NaNs." + "reason": "Different models and symptoms; one is MIMI batching/padding-mask behavior, the other is Gemma3 sliding-window NaNs." }, { "left": "issue:43482", "right": "issue:43525", "accept": false, - "reason": "Both are load failures, but one is a Qwen2.5-GGUF/v5 loading issue and the other is a Llama4Config missing `pad_token_id`." + "reason": "Unrelated failures: Qwen2.5-GGUF loading vs Llama4Config missing pad_token_id." }, { "left": "issue:44291", "right": "issue:44589", "accept": false, - "reason": "Different code paths: unexpected `_is_hf_initialized` argument vs missing Float8 storage object." + "reason": "Both are Transformers v5 regressions, but one is init_empty_weights/_is_hf_initialized handling and the other is an fp8 storage lookup error." }, { "left": "issue:43749", "right": "issue:44079", "accept": false, - "reason": "FSDP CPU RAM efficient loading is unrelated to `ModelOutput` key assignment behavior." + "reason": "Different subsystems: FSDP CPU RAM-efficient loading vs ModelOutput key assignment." }, { "left": "issue:44079", "right": "issue:45072", "accept": false, - "reason": "`ModelOutput` key handling and bf16 dtype mismatches are unrelated bugs." + "reason": "ModelOutput key bookkeeping is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:43644", "right": "issue:44079", "accept": false, - "reason": "Non-persistent buffer serialization/junk is not the same bug as `ModelOutput` key assignment." + "reason": "Non-persistent buffer serialization junk is a different bug from ModelOutput key assignment." }, { "left": "issue:40444", "right": "issue:45381", "accept": false, - "reason": "Different modalities and symptoms: multi-image finetuning failure vs Qwen2.5-VL video position-id bug." + "reason": "Both involve Qwen2.5-VL, but one is finetuning with multiple images and the other is video position ids; not the same concrete bug." }, { "left": "issue:43632", "right": "issue:45072", "accept": false, - "reason": "`_is_hf_initialized` regression and bfloat16 inference dtype mismatch are separate issues." + "reason": "_is_hf_initialized flag regression is unrelated to bfloat16 dtype mismatch issues." }, { "left": "issue:44265", "right": "issue:45072", "accept": false, - "reason": "`torch.export`/`torch_compilable_check` failure is not the same as a bfloat16 dtype mismatch." + "reason": "torch.export/torch_compilable_check failure is a different code path than dtype mismatch inference bugs." }, { "left": "issue:43881", "right": "issue:44488", "accept": false, - "reason": "Different models and likely different loader failures; no shared concrete bug." + "reason": "Different model-loading failures affecting different models and likely different root causes." }, { "left": "issue:45325", "right": "issue:45381", - "accept": false, - "reason": "Same model family, but still-image temporal scaling and video vision-position-id errors are different code paths." + "accept": true, + "reason": "Same Qwen2.5-VL position-id / rope-index code path: one reports still-image temporal position scaling, the other video vision_position_ids being wrong." }, { "left": "issue:43531", "right": "issue:44291", "accept": false, - "reason": "Qwen3-MoE sliding-window behavior is unrelated to the `init_empty_weights` argument regression." + "reason": "Qwen3-MoE sliding_window behavior is unrelated to init_empty_weights argument handling." }, { "left": "issue:43611", "right": "issue:43646", "accept": false, - "reason": "Both are Transformers v5 regressions, but one is about `base_model_prefix` loading and the other about custom model initialization; not the same concrete bug." + "reason": "Both are v5 loading regressions, but base_model_prefix handling and custom model initialization are not the same concrete bug." }, { "left": "issue:41720", "right": "issue:45081", "accept": false, - "reason": "Qwen3 auto device-map CUDA assert and Mistral tokenizer regex crash are unrelated." + "reason": "Qwen3 auto device mapping CUDA assert and Mistral tokenizer regex patch crash are unrelated." }, { "left": "issue:42175", "right": "issue:43296", "accept": false, - "reason": "Packaging/backend selection issue is unrelated to PaddleOCR-VL loading failure." + "reason": "Packaging/backend dependency issue is unrelated to PaddleOCR-VL model loading." }, { "left": "issue:43454", "right": "issue:44466", "accept": false, - "reason": "Both mention tied `lm_head` weights, but one is a generation-quality/init issue and the other is device-dependent serialization; not the same fix." + "reason": "Missing lm_head weight tying and device-dependent serialization are different tied-weight bugs." }, { "left": "issue:43994", "right": "issue:44898", "accept": false, - "reason": "Different vision-model failures with different symptoms and likely different root causes." + "reason": "Different vision-model inference failures with different models and different failure modes." }, { "left": "issue:44387", "right": "issue:44617", "accept": false, - "reason": "Both mention OOM, but one is an int4 quantization memory regression and the other is a model-specific video OOM." + "reason": "Int4 quantization CUDA memory growth is unrelated to Sam3Video OOM." }, { "left": "issue:43828", "right": "issue:44683", "accept": false, - "reason": "Autocast dtype mismatch and compiled flex_attention failure on torch >= 2.9 are different problems." + "reason": "Autocast dtype mismatch and compiled flex_attention on torch>=2.9 are different issues." }, { "left": "issue:42915", "right": "issue:43482", "accept": false, - "reason": "Qwen3Moe FP8 config failure and Qwen2.5-GGUF loading failure are unrelated." + "reason": "Qwen3MoE FP8 config failure is unrelated to Qwen2.5-GGUF loading." }, { "left": "issue:43526", "right": "issue:43883", "accept": false, - "reason": "Image processor `reduce_labels` bug is unrelated to missing `all_tied_weights_keys` on a model class." + "reason": "BeitImageProcessorFast label reduction bug is unrelated to Molmo tied-weight attribute errors." }, { "left": "issue:43792", "right": "issue:43957", "accept": false, - "reason": "Whisper runtime failure and meta-device model loading regression do not share the same root cause." + "reason": "Whisper runtime failure and meta-device loading regressions are different problems." }, { "left": "issue:43701", "right": "issue:45325", "accept": false, - "reason": "Checkpoint resume key mismatch is unrelated to Qwen2.5-VL rope/position-id scaling." + "reason": "Checkpoint resume key mismatch is unrelated to Qwen2.5-VL position-id generation." }, { "left": "issue:43262", "right": "issue:43746", "accept": false, - "reason": "Audio chat-template sampling-rate default and PEFT local-checkpoint loading are different bugs." + "reason": "Audio chat-template sample-rate defaulting is unrelated to PEFT checkpoint loading." }, { "left": "issue:43632", "right": "issue:45563", "accept": false, - "reason": "An initialization flag regression is unrelated to a stale generate warning." + "reason": "_is_hf_initialized regression is unrelated to a stale warning in paged generate()." }, { "left": "issue:43881", "right": "issue:44991", "accept": false, - "reason": "Different model/tokenizer loading failures with no shared code path." + "reason": "Different tokenizer/model-loading failures for different models." }, { - "left": "issue:43317", - "right": "issue:43873", + "left": "issue:40990", + "right": "issue:45584", "accept": false, - "reason": "Both concern offloading/quantization, but one is a dequantized-model load failure with gpu+cpu offload and the other is a broader offloading complaint; not the same concrete bug." + "reason": "High perplexity on GPT-OSS and Whisper empty-transcription generation failure are unrelated." }, { - "left": "issue:40990", - "right": "issue:45584", + "left": "issue:43317", + "right": "issue:43873", "accept": false, - "reason": "Perplexity/evaluation anomaly and Whisper empty-transcription generation failure are unrelated." + "reason": "Both mention offloading/quantization, but they are not clearly the same concrete failure mode." }, { "left": "issue:34634", "right": "issue:35707", "accept": false, - "reason": "Bark voice_preset support and progressive generation with inputs_embeds/past_key_values are unrelated." + "reason": "Completely unrelated: BarkProcessor voice_preset vs progressive generation with inputs_embeds/past_key_values." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays appropriately conservative. The cluster is indeed heterogeneous, with a visible v5 loading/initialization theme, and the one accepted soft pair (issue:45325 vs issue:45381) is a plausible same-family position-id/rope-index match. The rejected soft pairs are also phrased cautiously and do not overclaim shared root causes." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -3304,262 +3355,262 @@ "issue:43550|issue:43606", "issue:41720|issue:42617" ], - "prepared_review_unit_hash": "7852e5b2837c4dbfd48907421944cee58f7022d1c59336beb3d907bd0f0dae02", + "prepared_review_unit_hash": "4e2e513f44155689223314896d0f7d4953d76890b4b43ae9228e4b9da9f10695", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 38, - "serialized_chars": 38640, - "estimated_input_tokens": 9660, - "estimated_eval_tokens": 19576 + "serialized_chars": 38633, + "estimated_input_tokens": 9659, + "estimated_eval_tokens": 19574 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous: the soft links point to broad thematic overlap around loading/offloading, quantization, serialization, and multimodal preprocessing, but the paired issues do not appear to describe the same underlying bug. I would not merge any of the soft-edge candidates.", - "confidence": 0.76, - "canonical_issue_reason": "issue:43873 is the broadest representative of the recurring loading/offloading theme, but the cluster is not actually a duplicate set and most items have distinct root causes.", + "summary": "This cluster is very heterogeneous: the pairs mostly share broad themes like loading, quantization, offloading, multimodal models, or config APIs, but not the same concrete bug or code path. I would reject every soft edge here; none looks safe to merge as a duplicate/one-fix pair.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43873 is the broadest representative of the set (offloading + quantization + loading), but it is only a weak cluster anchor because the surrounding issues are not true duplicates.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43873 is the closest umbrella issue and has the strongest cross-cutting theme among the set, but it still does not subsume the model-specific and API-specific reports.", + "best_issue_reason": "issue:43873 is the most generic and central issue to use as a representative triage anchor, but only because the cluster lacks a real duplicate nucleus.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43606", "right": "issue:44265", "accept": false, - "reason": "CPU-offload device mismatch vs torch.export failure on torch_compilable_check; different bugs." + "reason": "One is a CPU offload device-mismatch failure in a specific model; the other is a torch.export/torch_compilable_check export failure. Different code paths." }, { "left": "issue:44291", "right": "issue:44877", "accept": false, - "reason": "Unexpected _is_hf_initialized arg during init_empty_weights vs strict config loading; unrelated root causes." + "reason": "Unexpected `_is_hf_initialized` argument during init_empty_weights is unrelated to strict config blocking granite_speech loading." }, { "left": "issue:44186", "right": "issue:44265", "accept": false, - "reason": "Tokenizer padding/NER crash vs export-time check failure; not the same issue." + "reason": "Tokenizer crashes on NER/padding are unrelated to export failures from torch_compilable_check." }, { "left": "issue:43606", "right": "issue:43819", "accept": false, - "reason": "CPU offload mismatch vs DAC STE/latents mismatch; different code paths." + "reason": "CPU offload device mismatch and DAC.from_latents STE mismatch are different model/runtime bugs." }, { "left": "issue:44610", "right": "issue:45325", "accept": false, - "reason": "Processor/model input-size mismatch vs rope-index temporal scaling regression; different defects." + "reason": "Processor resolution mismatch for OmDet-Turbo is unrelated to Qwen2.5-VL temporal rope indexing." }, { "left": "issue:35707", "right": "issue:41720", "accept": false, - "reason": "Progressive generation with inputs_embeds/past_key_values vs auto device-map cuda assert; unrelated." + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen3 auto device-map cuda asserts." }, { "left": "issue:43377", "right": "issue:44898", "accept": false, - "reason": "MIMI padding-mask batching bug vs Perceiver resolution handling bug; different model paths." + "reason": "MIMI padding-mask batching bug and Perceiver non-default-resolution failure are different model components and symptoms." }, { "left": "issue:43454", "right": "issue:43550", "accept": false, - "reason": "lm_head tying/serialization bug vs torch.compile SDPA failure; not the same change." + "reason": "Tied lm_head weights bug and Bamba torch.compile/SDPA failure are unrelated." }, { "left": "issue:43377", "right": "issue:44610", "accept": false, - "reason": "Padding-mask batching issue vs OmDet image-size mismatch; different preprocessing bugs." + "reason": "Both involve model inputs, but one is MIMI batching behavior and the other is OmDet processor output size; not the same bug." }, { "left": "issue:43454", "right": "issue:45072", "accept": false, - "reason": "Weight-tying serialization issue vs dtype mismatch in CI; different symptoms and fixes." + "reason": "lm_head tying/serialization issues do not match dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:39692", "right": "issue:40444", "accept": false, - "reason": "Doc example errors vs finetuning failure with multiple images; different reports." + "reason": "SigLIP2 documentation errors and Qwen2.5-VL iterable-dataset finetuning failure are unrelated." }, { "left": "issue:44466", "right": "issue:45357", "accept": false, - "reason": "Generic tied-weight serialization inconsistency vs Qwen3.5 visual-key save_pretrained regression; not the same bug." + "reason": "Inconsistent lm_head serialization by device is not the same as incorrect visual encoder keys being saved." }, { "left": "issue:43749", "right": "issue:44534", "accept": false, - "reason": "FSDP RAM-efficient loading failure vs non-persistent buffer junk-initialization; different mechanisms." + "reason": "FSDP CPU RAM efficient loading and non-persistent buffer junk are both v5/loading-adjacent, but not the same concrete bug." }, { "left": "issue:43296", "right": "issue:44991", "accept": false, - "reason": "PaddleOCR-VL load failure vs tokenizer load regression for est-roberta; unrelated." + "reason": "PaddleOCR-VL load failure in vLLM and tokenizer loading failure for est-roberta are separate model/tokenizer issues." }, { "left": "issue:43792", "right": "issue:44683", "accept": false, - "reason": "Whisper load/run issue vs compiled flex_attention incompatibility; different subsystems." + "reason": "Whisper model runtime failure and compiled flex_attention on torch>=2.9 are unrelated." }, { "left": "issue:43296", "right": "issue:44877", "accept": false, - "reason": "Model loading failure vs strict config rejection; no shared underlying bug." + "reason": "vLLM/PaddleOCR-VL loading failure is unrelated to strict config rejection of granite_speech." }, { "left": "issue:41720", "right": "issue:43322", "accept": false, - "reason": "Qwen3 device-map cuda assert vs Llava Next segfault; different models and failures." + "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segfault are both loading/runtime crashes but not the same underlying issue." }, { "left": "issue:43454", "right": "issue:43720", "accept": false, - "reason": "lm_head tying bug vs packed-weight unpacking during accelerate loading; different code paths." + "reason": "lm_head tying is unrelated to BitNet packed-weight unpacking during accelerate loading." }, { "left": "issue:43404", "right": "issue:45357", "accept": false, - "reason": "Mistral3 lm_head tying bug vs Qwen3.5 visual-encoder key serialization bug; not duplicates." + "reason": "Mistral3 lm_head weight tying and Qwen3.5 visual encoder key serialization are different model families and fixes." }, { "left": "issue:41720", "right": "issue:44977", "accept": false, - "reason": "Device-mapping crash vs flash-attention generation regression; different Qwen3.5 issues." + "reason": "Auto device-map cuda assert for Qwen3 and flash-attention generation regression for Qwen3.5 are distinct failures." }, { "left": "issue:44423", "right": "issue:45406", "accept": false, - "reason": "Continuous-batching multimodal crash vs Gemma4Processor missing _tokenizer; similar area, different bug." + "reason": "Continuous-batching multimodal crash with 'str' object has no attribute 'to' is unrelated to Gemma4Processor missing `_tokenizer`." }, { "left": "issue:43526", "right": "issue:43881", "accept": false, - "reason": "BeitImageProcessorFast label reduction bug vs glm-4v loading failure; unrelated." + "reason": "BEiT fast processor label reduction bug and glm-4v-9b loading failure are unrelated." }, { "left": "issue:44186", "right": "issue:45072", "accept": false, - "reason": "Tokenizer crash vs dtype mismatch in inference; no common root cause." + "reason": "Tokenizer crash on NER/padding has nothing to do with dtype mismatches in inference." }, { "left": "issue:43873", "right": "issue:44756", "accept": false, - "reason": "Quantization/offloading issue vs mmap OOM workaround; different failure modes." + "reason": "Both mention memory/offload concerns, but one is quantization offloading behavior and the other is mmap OOM on Strix Halo." }, { "left": "issue:43577", "right": "issue:43708", "accept": false, - "reason": "dtype preservation bug vs resume_from_checkpoint max_steps miscalculation; unrelated." + "reason": "Blip2 dtype not propagating and Trainer resume/max_steps miscalculation are unrelated." }, { "left": "issue:42617", "right": "issue:42915", "accept": false, - "reason": "3d_parallel startup failure vs Qwen3Moe FineGrainedFP8Config failure; different models and errors." + "reason": "3d_parallel.py execution failure and Qwen3Moe FineGrainedFP8Config failure are different problems." }, { "left": "issue:44877", "right": "issue:44991", "accept": false, - "reason": "Config-loading strictness vs tokenizer-loading regression; distinct issues." + "reason": "Strict config blocking granite_speech and tokenizer loading for est-roberta are unrelated." }, { "left": "issue:44610", "right": "issue:44898", "accept": false, - "reason": "Processor output size mismatch vs Perceiver non-default resolution failure; not the same bug." + "reason": "Processor/model resolution mismatch for OmDet-Turbo is not the same as Perceiver interpolation failure." }, { "left": "issue:43550", "right": "issue:43720", "accept": false, - "reason": "torch.compile SDPA failure vs accelerate packed-weight loading; different code paths." + "reason": "torch.compile with SDPA on Bamba and BitNet packed-weight loading are separate issues." }, { "left": "issue:44479", "right": "issue:44918", "accept": false, - "reason": "Video-input regression vs SFT input-embedding unpacking failure; unrelated." + "reason": "Video-input regression in several Qwen models and TRL SFT embedding unpacking failure are different code paths." }, { "left": "issue:44186", "right": "issue:44610", "accept": false, - "reason": "Tokenizer batching crash vs image processor resolution mismatch; different defects." + "reason": "Tokenizer NER/padding crash and OmDet processor size mismatch are unrelated." }, { "left": "issue:36331", "right": "issue:42915", "accept": false, - "reason": "Trainer compute_loss signature breakage vs Qwen3Moe FP8 failure; no overlap." + "reason": "CustomTrainer.compute_loss signature mismatch is unrelated to Qwen3Moe FP8 loading failure." }, { "left": "issue:36331", "right": "issue:42617", "accept": false, - "reason": "CustomTrainer API break vs 3d_parallel startup failure; unrelated." + "reason": "Trainer compute_loss API mismatch is unrelated to 3d_parallel.py runtime failure." }, { "left": "issue:43316", "right": "issue:44913", "accept": false, - "reason": "Gemma3TextConfig API discrepancy vs GPTNeoX rotary_pct not persisting on reload; different config bugs." + "reason": "Gemma3TextConfig API discrepancy and GPTNeoX rotary_pct reload regression are both config issues but not the same bug." }, { "left": "issue:44671", "right": "issue:44743", "accept": false, - "reason": "CamemBERT masked-LM prediction regression vs qwen3_5 recurrent-state reset; different models and logic." + "reason": "CamemBERT masked-LM prediction regression and Qwen3.5 recurrent-state reset are unrelated model behaviors." }, { "left": "issue:42915", "right": "issue:43317", "accept": false, - "reason": "Qwen3Moe FP8 failure vs device_map=auto dequantized-load offload failure; different bugs." + "reason": "Qwen3Moe FineGrainedFP8Config failure and dequantized model offload loading failure are different quantization/load paths." }, { "left": "issue:43550", "right": "issue:43606", "accept": false, - "reason": "Bamba torch.compile/SDPA bug vs CPU offload device mismatch; not the same issue." + "reason": "Bamba torch.compile/SDPA failure and CPU-offload device mismatch are unrelated." }, { "left": "issue:41720", "right": "issue:42617", "accept": false, - "reason": "Qwen3 auto device-map assert vs 3d_parallel startup failure; unrelated." + "reason": "Qwen3 auto device-map assert and 3d_parallel.py failure do not describe the same underlying bug." } ] }, @@ -3639,8 +3690,8 @@ "issue:43540|issue:43782", "issue:43388|issue:45059", "issue:44186|issue:44361", - "issue:42915|issue:43322", "issue:41720|issue:43873", + "issue:42915|issue:43322", "issue:36010|issue:42617", "issue:45406|issue:45464", "issue:44514|issue:44545", @@ -3664,220 +3715,220 @@ "issue:44451|issue:44991", "issue:44062|issue:44451" ], - "prepared_review_unit_hash": "23ca7a7f42bc48e7ebde864b67fa218895228b3b0500112b06b5c0abd3128e50", + "prepared_review_unit_hash": "41c71546ee2fec9f153479152e8c3fc77f69fad772f36512eb02bf3403515694", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36737, - "estimated_input_tokens": 9185, - "estimated_eval_tokens": 18626 + "serialized_chars": 36742, + "estimated_input_tokens": 9186, + "estimated_eval_tokens": 18628 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Only two soft edges look like true duplicates: the Qwen2_5_VLProcessor batched-padding crash and the HF_HUB_OFFLINE tokenizer-cache failure. The remaining pairs share broad areas but not the same concrete bug/change.", - "confidence": 0.96, - "canonical_issue_reason": "issue:44514 is the cleanest canonical issue for its pair: it is the older report and has an exact title match with 44545, so it best serves as the duplicate anchor.", + "summary": "This set is mostly heterogeneous, but it contains a couple of clear duplicate pairs around tokenizer/offline loading. The strongest duplicate anchors are 44514\u219444545 and 42886\u219444843.", + "confidence": 0.63, + "canonical_issue_reason": "There isn\u2019t a single coherent cluster-wide bug here; if one issue must serve as the anchor, 44545 is the clearest canonical because it is an exact duplicate of 44514 and has slightly more activity/inbound references.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44843 is the strongest standalone issue overall because it states the offline-mode failure clearly and points to the likely root cause (`_patch_mistral_regex` calling `model_info()` unconditionally), making it the most actionable anchor.", + "best_issue_reason": "44545 is the best issue anchor in this set: it is an exact title-match duplicate target for 44514 and the most clearly representative of a concrete bug among the near-duplicate pairs.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43531", "right": "issue:43572", "accept": false, - "reason": "Different models and failure modes: Qwen3-MoE sliding-window behavior vs StableLmConfig pad_token_idx missing after v5 update." + "reason": "Different bugs: Qwen3-MoE sliding-window behavior vs StableLmConfig pad_token_idx regression." }, { "left": "issue:44292", "right": "issue:44479", "accept": false, - "reason": "Both involve Qwen models, but one is an NVFP4 loading error and the other is a video-input regression in Qwen VL/MoE paths." + "reason": "Both are Qwen-related, but one is NVFP4 runtime failure and the other is a video-input regression for different models." }, { "left": "issue:41720", "right": "issue:45161", "accept": false, - "reason": "Different concrete problems: auto device mapping CUDA assert on A800 vs GPT-OSS MoE tensor-parallelism failure." + "reason": "Different model/runtime failures: cudaErrorAssert on Qwen3 auto device mapping vs TP-only issue for GPT-OSS MoE." }, { "left": "issue:43540", "right": "issue:43782", "accept": false, - "reason": "One is a Qwen3OmniMoe video-input processing ValueError; the other is a Qwen3VL from_pretrained weight_only loading error." + "reason": "Different code paths: Qwen3OmniMoe video processing vs Qwen3VL from_pretrained weight_only loading." }, { "left": "issue:43388", "right": "issue:45059", "accept": false, - "reason": "Metric gathering label truncation in Trainer/Mask2Former is not the same bug as SAM3 text/bbox weird behavior." + "reason": "Unrelated symptoms: metric gathering last-batch label truncation vs SAM3 text/bbox behavior." }, { "left": "issue:44186", "right": "issue:44361", "accept": false, - "reason": "Both are tokenizer crashes, but they affect different models and appear to stem from different code paths and inputs." + "reason": "Different tokenizers and failure modes: LayoutLMv2 batching/NER crash vs MLuke task AttributeError." }, { - "left": "issue:42915", - "right": "issue:43322", + "left": "issue:41720", + "right": "issue:43873", "accept": false, - "reason": "Qwen3Moe FineGrainedFP8Config failure and Llava Next segfault are unrelated concrete bugs." + "reason": "Both mention model execution/offloading, but the concrete failures are unrelated." }, { - "left": "issue:41720", - "right": "issue:43873", + "left": "issue:42915", + "right": "issue:43322", "accept": false, - "reason": "Both relate loosely to loading/offloading, but the reported failures and triggers are different enough to keep separate." + "reason": "Different models and failure classes: FineGrainedFP8Config vs segmentation fault in Llava Next loading." }, { "left": "issue:36010", "right": "issue:42617", "accept": false, - "reason": "GenerationMixin import error and a 3d_parallel script failure are unrelated." + "reason": "Import error for GenerationMixin is unrelated to not being able to run 3d_parallel.py." }, { "left": "issue:45406", "right": "issue:45464", "accept": false, - "reason": "Gemma4Processor missing _tokenizer in serve is a different issue from Qwen3.5 streaming inference failure." + "reason": "Different features and failures: Gemma4Processor serve crash vs Qwen3.5 streaming chat/completions bug." }, { "left": "issue:44514", "right": "issue:44545", "accept": true, - "reason": "Exact same title and same crash scenario; 44514 is the older canonical report." + "reason": "Exact same bug title and same batched-input/padding=False crash path in Qwen2_5_VLProcessor.apply_chat_template." }, { "left": "issue:36331", "right": "issue:41720", "accept": false, - "reason": "Trainer compute_loss signature mismatch is unrelated to Qwen3 auto device-mapping CUDA asserts." + "reason": "CustomTrainer.compute_loss signature breakage is unrelated to Qwen3 auto device mapping." }, { "left": "issue:33357", "right": "issue:35707", "accept": false, - "reason": "MacOS bus error with a CLIP model is not the same underlying bug as progressive generation with inputs_embeds/past_key_values." + "reason": "MacOS bus error with CLIP model is unrelated to progressive generation with inputs_embeds/past_key_values." }, { "left": "issue:41720", "right": "issue:43746", "accept": false, - "reason": "Different failures: device-mapping CUDA assert vs PEFT-adapter local checkpoint loading for GraniteSpeech." + "reason": "Different problems: Qwen3 device-mapping assert vs GraniteSpeech PEFT checkpoint loading." }, { "left": "issue:30064", "right": "issue:34634", "accept": false, - "reason": "Image Processor void segmentation maps and BarkProcessor voice_preset are entirely different subsystems and bugs." + "reason": "ImageProcessor segmentation-map handling and BarkProcessor voice_preset are unrelated." }, { "left": "issue:43606", "right": "issue:43720", "accept": false, - "reason": "Both touch accelerate/offloading/loading, but one is a CPU-offload device mismatch and the other is unpacking packed weights in BitNet." + "reason": "CPU offload device mismatch and BitNet packed-weight unpacking are different bugs." }, { "left": "issue:44492", "right": "issue:44509", "accept": false, - "reason": "A cache-strategy typo and docs mentioning removed pipeline tasks are unrelated documentation/config issues." + "reason": "A typo in cache strategies is unrelated to stale docs mentioning removed pipeline tasks." }, { "left": "issue:43550", "right": "issue:43653", "accept": false, - "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to BigBirdTokenizer special-token registration." + "reason": "Bamba torch.compile/SDPA failure is unrelated to BigBirdTokenizer mask-token registration." }, { "left": "issue:44368", "right": "issue:45357", "accept": false, - "reason": "A tie_word_embeddings warning during LoRA fine-tuning is not the same as incorrect visual-encoder keys being saved." + "reason": "A warning about tie_word_embeddings is not the same as incorrect visual-encoder keys on save_pretrained." }, { "left": "issue:44625", "right": "issue:45479", "accept": false, - "reason": "num_labels propagation in Qwen3.5 config is different from the cross-model zero-loss classification bug." + "reason": "num_labels propagation in Qwen3.5 is not the same as zero-loss behavior for num_labels=1 across sequence-classification models." }, { "left": "issue:43421", "right": "issue:43927", "accept": false, - "reason": "Runtime post-processor updates for special tokens and save/load loss of custom token IDs are related broadly, but not the same concrete bug." + "reason": "Runtime special-token post-processing update and config save/load custom token IDs are different issues." }, { "left": "issue:43606", "right": "issue:43653", "accept": false, - "reason": "Device mismatch during CPU offload and mask-token registration in a tokenizer are unrelated." + "reason": "Different tokenizer/model bugs: offload device mismatch vs missing BigBird mask special-token registration." }, { "left": "issue:43782", "right": "issue:45381", "accept": false, - "reason": "from_pretrained weight_only loading error is distinct from wrong vision_position_ids in Qwen2.5-VL video input." + "reason": "Different Qwen vision bugs: weight_only load failure vs incorrect vision_position_ids for video input." }, { "left": "issue:34567", "right": "issue:35707", "accept": false, - "reason": "TrainerState token counting and progressive generation with past_key_values are different issues." + "reason": "TrainerState token-count tracking is unrelated to progressive generation." }, { "left": "issue:34634", "right": "issue:41720", "accept": false, - "reason": "BarkProcessor voice_preset and Qwen3 auto device mapping are unrelated." + "reason": "Bark voice_preset and Qwen3 auto device mapping are unrelated." }, { "left": "issue:44898", "right": "issue:45072", "accept": false, - "reason": "Perceiver interpolation at non-default resolution and dtype mismatches in bfloat16 inference do not describe the same bug." + "reason": "Perceiver interpolation failure and dtype mismatch in SwitchTransformers/TimmWrapperModel are different failures." }, { "left": "issue:42886", "right": "issue:44843", "accept": true, - "reason": "Both describe the same HF_HUB_OFFLINE tokenizer-loading failure; 44843 supplies the likely root cause via an unconditional `model_info()` call." + "reason": "Same offline-loading bug path: unconditional model_info() / Mistral regex patch breaks HF_HUB_OFFLINE cache loading." }, { "left": "issue:43421", "right": "issue:43653", "accept": false, - "reason": "TokenizersBackend post-processor refresh and BigBirdTokenizer special-token registration are different tokenization bugs." + "reason": "TokenizersBackend special-token refresh and BigBirdTokenizer mask-token registration are different bugs." }, { "left": "issue:44451", "right": "issue:44991", "accept": false, - "reason": "Both are tokenizer-loading regressions, but for different models and not enough evidence of a shared underlying cause." + "reason": "Both are tokenizer load failures, but they involve different models and no shared concrete code path is evident." }, { "left": "issue:44062", "right": "issue:44451", "accept": false, - "reason": "AddedToken constructor argument handling is unrelated to ScandiBERT tokenizer loading." + "reason": "AddedToken keyword-argument conflict is unrelated to tokenizer loading failure for ScandiBERT." } ] }, "evaluator_result": { "accept": true, - "feedback": "Grounded and conservative overall. The two accepted duplicate pairs are well supported by the packet: 44514/44545 have the same title and scenario, and 42886/44843 describe the same HF_HUB_OFFLINE tokenizer-loading failure with a plausible shared root cause. The remaining soft edges are rejected cautiously without overclaiming similarity." + "feedback": "Grounded overall: the packet is heterogeneous, the two accepted pairs are the clear same-bug matches, and the rejected pairs are conservatively separated. Minor wording nit: 44514/44545 are near-identical rather than literally exact title matches, but the duplicate judgment is still well supported." }, "evaluator_used": true, "retried": false, @@ -3961,8 +4012,8 @@ "issue:43746|issue:45381", "issue:45183|issue:45479", "issue:39692|issue:43873", - "issue:43618|issue:43697", "issue:43540|issue:44008", + "issue:43618|issue:43697", "issue:40444|issue:43746", "issue:43746|issue:43873", "issue:44220|issue:44661", @@ -3975,23 +4026,23 @@ "issue:44843|issue:45081", "issue:44062|issue:44488", "issue:44186|issue:44898", - "issue:43540|issue:45381", - "issue:43550|issue:45072" + "issue:43550|issue:45072", + "issue:43540|issue:45381" ], - "prepared_review_unit_hash": "f309753de1960f2da45be8c380ea47234b7a5dcd7928486e16379c9f0cb9bae2", + "prepared_review_unit_hash": "184acd7bec9d8df12b5679a0c0b94ee968f19599f440bfd79ebd2161d7475d9c", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36405, + "serialized_chars": 36407, "estimated_input_tokens": 9102, "estimated_eval_tokens": 18460 }, @@ -4000,114 +4051,114 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous; the soft pairs are mostly broad-topic similarities rather than true duplicates, so I rejected all of them. The closest-looking pair is the Mistral tokenizer regex set, but even there the failures are different enough to keep separate.", - "confidence": 0.95, - "canonical_issue_reason": "issue:45081 is the most concrete, code-path-specific bug in the set (a direct tokenizer loading crash with a clear trigger and traceback), so it is the best representative issue if one must be chosen.", + "summary": "This set is largely a heterogeneous mix of unrelated bug reports; there is no strong duplicate cluster. The closest shared theme is tokenizer/loading regressions, but even those are mostly distinct code paths.", + "confidence": 0.21, + "canonical_issue_reason": "Issue 45081 is the most concrete, code-path-specific bug in the set and sits near the only small tokenizer-regex subcluster.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45081 has the strongest actionable reproduction and the most precisely scoped failure mode among these unrelated issues.", + "best_issue_reason": "45081 is open, reproducible, and narrowly scoped, making it the best representative issue for triage despite the overall heterogeneity.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43381", "right": "issue:43844", "accept": false, - "reason": "Different subsystems and failure modes: eval-mode gradient checkpointing vs ZeRO-3 gradient explosion during training." + "reason": "Different failures: eval-mode gradient checkpointing vs DeepSpeed ZeRO-3 gradient explosion." }, { "left": "issue:44918", "right": "issue:44936", "accept": false, - "reason": "One is an embeddings/SFT trainer failure, the other is evaluate() breaking after train(); not the same bug." + "reason": "Both trainer-related, but one is an SFT input-embedding unpacking bug and the other is an evaluate-after-train failure." }, { "left": "issue:44077", "right": "issue:44625", "accept": false, - "reason": "Unrelated model/config problems: patchtsmixer post_init policy vs Qwen3.5 num_labels propagation." + "reason": "Unrelated models and code paths: patchtsmixer post_init policy vs Qwen3.5 num_labels propagation." }, { "left": "issue:44987", "right": "issue:45042", "accept": false, - "reason": "Different bugs and surfaces: model loading regression vs PIL image processor torchvision dependency issue." + "reason": "A model-loading regression vs a PIL image processor torchvision dependency issue." }, { "left": "issue:43531", "right": "issue:44589", "accept": false, - "reason": "Qwen3-MoE sliding_window bug is unrelated to the Float8 storage lookup error." + "reason": "Sliding-window logic in Qwen3-MoE is unrelated to a Float8 storage lookup error." }, { "left": "issue:43994", "right": "issue:44671", "accept": false, - "reason": "Both are model-output regressions, but they involve different models and distinct code paths." + "reason": "Different models, different symptoms, and different fixes: SigLIP2 inference mismatch vs CamemBERT masked LM regression." }, { "left": "issue:39692", "right": "issue:44610", "accept": false, - "reason": "Doc/example errors for SigLIP2 do not match an OmDet-Turbo processor/model input-size mismatch." + "reason": "Documentation/example errors do not match the OmDet-Turbo processor/model resolution mismatch." }, { "left": "issue:43421", "right": "issue:44186", "accept": false, - "reason": "Tokenizer post-processor update behavior is not the same as LayoutLMv2 NER padding/truncation crashes." + "reason": "TokenizersBackend post-processor updates and LayoutLMv2 NER padding crashes are separate tokenizer bugs." }, { "left": "issue:45081", "right": "issue:45356", "accept": false, - "reason": "Both mention fix_mistral_regex, but one is a loading crash and the other is a tokenizer regression/warning issue; not the same underlying defect." + "reason": "Both touch Mistral regex handling, but one crashes on backend_tokenizer access while the other reports codec handling and warning regressions." }, { "left": "issue:43525", "right": "issue:43531", "accept": false, - "reason": "Llama4Config pad_token_id absence is unrelated to Qwen3-MoE sliding_window handling." + "reason": "Llama4Config pad_token_id absence is unrelated to Qwen3-MoE sliding_window behavior." }, { "left": "issue:43596", "right": "issue:44220", "accept": false, - "reason": "BertModel/ZeRO-3 initialization failure is unrelated to audio feature extraction." + "reason": "BertModel ZeRO-3 init index error and audio fbank feature extraction are unrelated failures." }, { "left": "issue:43746", "right": "issue:45381", "accept": false, - "reason": "PEFT checkpoint loading for GraniteSpeech is unrelated to Qwen2.5-VL video vision_position_ids." + "reason": "GraniteSpeech PEFT checkpoint loading and Qwen2.5-VL video vision_position_ids are different model-specific bugs." }, { "left": "issue:45183", "right": "issue:45479", "accept": false, - "reason": "Input validation error messages are not the same bug as the degenerate zero-loss classification behavior." + "reason": "General input validation messaging bug vs a single-label classification zero-loss issue." }, { "left": "issue:39692", "right": "issue:43873", "accept": false, - "reason": "A documentation example bug is not the same as an offloading/quantization runtime bug." + "reason": "A SigLIP2 docs/example problem is not the same as quantization offloading not working." }, { - "left": "issue:43618", - "right": "issue:43697", + "left": "issue:43540", + "right": "issue:44008", "accept": false, - "reason": "Missing CLIP attentions assignment and RTDetrV2 output differences are different regressions." + "reason": "Different multimodal models and causes: Qwen3OmniMoe video processing vs Gemma 3n variable-name collision." }, { - "left": "issue:43540", - "right": "issue:44008", + "left": "issue:43618", + "right": "issue:43697", "accept": false, - "reason": "Different models and failure types: Qwen3OmniMoe video processing vs Gemma 3n variable collision." + "reason": "CLIPOutput attentions regression and RTDetrV2 output drift are not the same code path." }, { "left": "issue:40444", "right": "issue:43746", "accept": false, - "reason": "Multi-image Qwen2.5-VL finetuning failure is unrelated to GraniteSpeech PEFT checkpoint loading." + "reason": "Qwen2.5-VL iterable-dataset finetuning failure is unrelated to GraniteSpeech PEFT checkpoint loading." }, { "left": "issue:43746", @@ -4119,73 +4170,73 @@ "left": "issue:44220", "right": "issue:44661", "accept": false, - "reason": "Audio feature extraction and add-new-model-like/tokenizer mapping failure are unrelated." + "reason": "Audio fbank feature extraction and tokenizer mapping registration are distinct subsystems." }, { "left": "issue:43716", "right": "issue:44008", "accept": false, - "reason": "Mistral-3 image dtype mismatch is unrelated to Gemma 3n audio tensor name collision." + "reason": "Mistral image-preprocessor dtype mismatch is unrelated to Gemma 3n forward variable collision." }, { "left": "issue:43335", "right": "issue:43756", "accept": false, - "reason": "Sparse-layer creation bug and RoPE-layer dropping are different model/config regressions." + "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer dropping are different configuration bugs." }, { "left": "issue:43720", "right": "issue:43819", "accept": false, - "reason": "Packed-weight loading in BitNet is unrelated to DAC forward/from_latents mismatch." + "reason": "BitNet packed-weight loading and DAC.from_latents/STE mismatch are unrelated." }, { "left": "issue:43653", "right": "issue:43720", "accept": false, - "reason": "BigBirdTokenizer special-token decoding bug is unrelated to BitNet packed-weight loading." + "reason": "BigBirdTokenizer special-token registration and BitNet weight unpacking do not share a code path." }, { "left": "issue:43421", "right": "issue:44811", "accept": false, - "reason": "Both involve tokenization/decoding, but one is runtime post-processor updates and the other is Whisper batch_decode skip_special_tokens behavior." + "reason": "Post-processor mutation at runtime is not the same as Whisper batch_decode ignoring skip_special_tokens." }, { "left": "issue:30064", "right": "issue:36010", "accept": false, - "reason": "Image processor segmentation-map handling is unrelated to a GenerationMixin import error." + "reason": "Void segmentation map processing and a GenerationMixin import error are unrelated." }, { "left": "issue:44843", "right": "issue:45081", "accept": false, - "reason": "Both touch _patch_mistral_regex, but one is unconditional hub metadata access/offline failure and the other is an attribute error on tokenizer loading." + "reason": "Both involve _patch_mistral_regex, but one breaks offline mode via model_info() and the other crashes on backend_tokenizer access; distinct bugs." }, { "left": "issue:44062", "right": "issue:44488", "accept": false, - "reason": "AddedToken special-argument duplication is unrelated to the model-loading problem for cjvt/sleng-bert." + "reason": "AddedToken special-argument duplication is unrelated to loading cjvt/sleng-bert." }, { "left": "issue:44186", "right": "issue:44898", "accept": false, - "reason": "Tokenizer padding/truncation crashes are not the same as Perceiver image resolution interpolation failures." + "reason": "LayoutLMv2 tokenizer crashes and Perceiver non-default-resolution failures are different issues." }, { - "left": "issue:43540", - "right": "issue:45381", + "left": "issue:43550", + "right": "issue:45072", "accept": false, - "reason": "Both are video-related, but they affect different models and different internal bugs." + "reason": "torch.compile+SDPA failure in Bamba is unrelated to bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { - "left": "issue:43550", - "right": "issue:45072", + "left": "issue:43540", + "right": "issue:45381", "accept": false, - "reason": "Bamba torch.compile SDPA failure is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel inference." + "reason": "Qwen3OmniMoe video-input processing and Qwen2.5-VL vision_position_ids are separate multimodal bugs." } ] }, @@ -4277,8 +4328,8 @@ "issue:43650|issue:43906", "issue:41720|issue:44991", "issue:44805|issue:44936", - "issue:30064|issue:36331", "issue:44485|issue:44509", + "issue:30064|issue:36331", "issue:43606|issue:45072", "issue:42175|issue:45081", "issue:43782|issue:45325", @@ -4290,208 +4341,214 @@ "issue:43262|issue:43819", "issue:43653|issue:44671" ], - "prepared_review_unit_hash": "a40f6d534b3accdb894f805fe61485b9afab9d7fbb8d4c540563e4fd505556c9", + "prepared_review_unit_hash": "da508ecdaf7b4fbc1a498091bf421465dedd5a03f4e6bb5b5374fc74678e4552", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36866, - "estimated_input_tokens": 9217, - "estimated_eval_tokens": 18690 + "serialized_chars": 36862, + "estimated_input_tokens": 9216, + "estimated_eval_tokens": 18688 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a set of unrelated regression reports that share broad Transformers-version or model-family vocabulary rather than the same underlying bug. I found no soft-edge pair that is clearly the same concrete issue/change, so all candidate links are rejected.", - "confidence": 0.71, - "canonical_issue_reason": "issue:45020 is the broadest active report in the set and best fits a loose umbrella role for recent-version remote_code regressions, though the cluster overall is too heterogeneous for a strong canonical choice.", + "summary": "The items are a heterogeneous mix of unrelated bugs and regressions; the soft-similarity links mostly connect broad theme matches (v5 regressions, tokenizer/config loading, model-specific inference issues) rather than the same concrete underlying defect. No soft edge should be merged.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical duplicate exists because the cluster is not a true duplicate set. If one representative issue is needed, issue:45020 is the broadest umbrella for the remote_code/v5 regression theme.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 has the widest scope, is still open, and can plausibly serve as the representative tracking issue for the cluster\u2019s loose theme of recent-version breakages.", + "best_issue_reason": "issue:45020 is the best umbrella-style issue in this set because it groups the broad 'recent transformers versions break models using remote_code' regression theme more generally than the other, more specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43577", "right": "issue:43818", "accept": false, - "reason": "Different problems: one is a dtype-loading issue in BLIP2, the other is a Video-LLaVA model-structure/weight-sharing bug." + "reason": "Different bugs: BLIP2 dtype handling vs Video-LLaVA missing temporal attention/shared weights." }, { "left": "issue:43643", "right": "issue:44877", "accept": false, - "reason": "Both involve config loading, but one is about missing fields from trust_remote_code and the other is a strict-config failure for granite_speech; not the same bug." + "reason": "Both involve config loading, but the failures are different: missing fields with trust_remote_code vs strict config blocking granite_speech." }, { "left": "issue:43550", "right": "issue:43927", "accept": false, - "reason": "Unrelated: torch.compile/SDPA failure for Bamba versus DiaConfig losing custom token IDs on save/load." + "reason": "Unrelated code paths: torch.compile/SDPA failure in Bamba vs DiaConfig token-ID persistence bug." }, { "left": "issue:43782", "right": "issue:44977", "accept": false, - "reason": "Different model and failure mode: Qwen3VL weight_only loading error versus Qwen3.5 flash-attention generation issue." + "reason": "Different Qwen models and different failure modes: weight_only load error vs flash-attention generation issue." }, { "left": "issue:43531", "right": "issue:43643", "accept": false, - "reason": "No concrete overlap beyond being Transformer model/config issues; sliding_window behavior and missing config fields are distinct." + "reason": "Sliding-window logic in Qwen3-MoE is unrelated to trust_remote_code config deserialization." }, { "left": "issue:43577", "right": "issue:43582", "accept": false, - "reason": "BLIP2 dtype mismatch and AppleSilicon caching_allocator_warmup TypeError are unrelated." + "reason": "BLIP2 dtype mismatch and Apple Silicon allocator warmup TypeError are unrelated." }, { "left": "issue:44448", "right": "issue:44671", "accept": false, - "reason": "Both are v5 output regressions, but for different models and different code paths; too broad to merge." + "reason": "Both are v5 behavior regressions, but they affect different models and different failure surfaces (Pegasus generation output vs CamemBERT masked LM predictions)." }, { "left": "issue:43644", "right": "issue:43749", "accept": false, - "reason": "Non-persistent buffer initialization bug and FSDP CPU RAM efficient loading breakage are separate loading-path problems." + "reason": "Non-persistent buffer initialization bug is not the same as broken FSDP CPU RAM efficient loading." }, { "left": "issue:43653", "right": "issue:43819", "accept": false, - "reason": "Tokenizer special-token registration and DAC latent reconstruction are unrelated." + "reason": "Tokenizer special-token registration bug vs DAC latent reconstruction/STE mismatch." }, { "left": "issue:44561", "right": "issue:45020", "accept": false, - "reason": "45020 is a broad umbrella report, while 44561 is a specific v5 regression from removing is_torch_fx_available; not clearly the same concrete issue." + "reason": "44561 is a specific missing-symbol regression; 45020 is a broad umbrella issue about remote_code breakage, not the same concrete bug." }, { "left": "issue:43825", "right": "issue:43994", "accept": false, - "reason": "Pipeline error-message regression and SigLIP2 nonsensical outputs are different issues." + "reason": "Pipeline error-message wording is unrelated to SigLIP2 producing bad outputs." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "Different models and failures: Bamba compile/SDPA vs DAC latent conversion mismatch." }, { "left": "issue:43825", "right": "issue:43827", "accept": false, - "reason": "Related only at the documentation/pipeline-removal theme; one is an error message bug, the other is stale docs." + "reason": "Related to v5 pipeline removals, but one is an error-message bug and the other is docs lag; not the same defect." }, { "left": "issue:44186", "right": "issue:44442", "accept": false, - "reason": "LayoutLMv2Tokenizer NER/padding crash and FastSpeech2ConformerTokenizer loading failure are unrelated tokenizer issues." + "reason": "LayoutLMv2 NER/padding crash and FastSpeech2Conformer tokenizer load failure are distinct tokenizer issues." }, { "left": "issue:44821", "right": "issue:45084", "accept": false, - "reason": "AutoImageProcessor URL loading and template-node compilation errors do not share the same failure mode." + "reason": "AutoImageProcessor URL loading and template-node compile errors are unrelated." }, { "left": "issue:43650", "right": "issue:43906", "accept": false, - "reason": "A low-content placeholder issue and an isolated reproduction of another issue are not duplicates." + "reason": "Placeholder-style title vs an isolated reproduction of another issue; no shared underlying bug is evident." }, { "left": "issue:41720", "right": "issue:44991", "accept": false, - "reason": "Different symptoms and subsystems: Qwen3 device-mapping CUDA assert versus tokenizer loading for EMBEDDIA/est-roberta." + "reason": "CUDA assert with Qwen3 device mapping is unrelated to tokenizer loading failure for est-roberta." }, { "left": "issue:44805", "right": "issue:44936", "accept": false, - "reason": "Mask-shape IndexError and trainer.evaluate after train are not the same bug." + "reason": "Mask-shape IndexError and trainer.evaluate() failing after train() are different problems." }, { - "left": "issue:30064", - "right": "issue:36331", + "left": "issue:44485", + "right": "issue:44509", "accept": false, - "reason": "Image processor void segmentation map handling is unrelated to CustomTrainer.compute_loss keyword-arg incompatibility." + "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still referencing removed pipeline tasks." }, { - "left": "issue:44485", - "right": "issue:44509", + "left": "issue:30064", + "right": "issue:36331", "accept": false, - "reason": "GLM-5 RoPE discussion and docs mentioning removed pipeline tasks are different topics." + "reason": "ImageProcessor segmentation-map handling and CustomTrainer compute_loss signature break are unrelated." }, { "left": "issue:43606", "right": "issue:45072", "accept": false, - "reason": "CPU offload device mismatch and bfloat16 dtype mismatch are separate inference bugs." + "reason": "CPU offload device mismatch in Bark-small is not the same as bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:42175", "right": "issue:45081", "accept": false, - "reason": "TensorFlow extra installation issue and Mistral regex patch crash are unrelated." + "reason": "Packaging backend omission and Mistral regex patch crash are unrelated." }, { "left": "issue:43782", "right": "issue:45325", "accept": false, - "reason": "Both are Qwen VL issues, but one is a weight_only loading error and the other is a rope-index scaling bug; not the same change." + "reason": "Different Qwen2.5-VL rope-index bug and Qwen3VL weight_only load error are not the same issue." }, { "left": "issue:44610", "right": "issue:45072", "accept": false, - "reason": "Processor image-size mismatch and dtype mismatch in inference are different defects." + "reason": "OmDet-Turbo input-size mismatch is unrelated to bfloat16 dtype mismatch bugs." }, { "left": "issue:34634", "right": "issue:36010", "accept": false, - "reason": "BarkProcessor voice_preset and GenerationMixin import failure are unrelated." + "reason": "BarkProcessor voice_preset and GenerationMixin import failure are different breakages." }, { "left": "issue:44060", "right": "issue:44368", "accept": false, - "reason": "Both mention tied-weights warnings around Qwen models, but one is about an incorrect internal weight tie and the other is a user-visible warning during LoRA fine-tuning." + "reason": "Both mention tied-weights warnings in Qwen-family models, but they describe different configs and symptoms; not the same concrete bug." }, { "left": "issue:45081", "right": "issue:45406", "accept": false, - "reason": "Mistral regex patching crash and Gemma4Processor missing _tokenizer in transformers serve are different processor bugs." + "reason": "Mistral tokenizer regex patch crash and Gemma4Processor missing _tokenizer are different processor/tokenizer failures." }, { "left": "issue:43720", "right": "issue:45072", "accept": false, - "reason": "BitNet packed-weight unpacking and bfloat16 dtype mismatches are not the same underlying problem." + "reason": "BitNet packed-weight unpacking during accelerate loading is unrelated to dtype mismatch inference errors." }, { "left": "issue:43262", "right": "issue:43819", "accept": false, - "reason": "Audio sampling-rate defaulting and DAC latent reconstruction mismatch are unrelated." + "reason": "Audio processor chat-template sampling-rate default bug and DAC forward/from_latents mismatch are unrelated." }, { "left": "issue:43653", "right": "issue:44671", "accept": false, - "reason": "Tokenizer special-token registration and CamemBERT masked-LM prediction regression are different issues." + "reason": "BigBirdTokenizer special-token registration bug and CamemBERT masked-LM regression are different issues." } ] }, @@ -4593,23 +4650,23 @@ "issue:43531|issue:45042", "issue:43329|issue:44479", "issue:43257|issue:43931", - "issue:43257|issue:43756", - "issue:36010|issue:36331" + "issue:36010|issue:36331", + "issue:43257|issue:43756" ], - "prepared_review_unit_hash": "6b59f64fc4b00194160d74c2c96dc296206bbeb3c9a663b8a7af88ab77831f0e", + "prepared_review_unit_hash": "8b1ee8302727314f0200bfe372a68a8ecc6a177c3ad3327bc633eadf138afa46", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 31, - "serialized_chars": 36684, + "serialized_chars": 36681, "estimated_input_tokens": 9171, "estimated_eval_tokens": 18598 }, @@ -4618,208 +4675,181 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly a grab bag of unrelated Transformers regressions across loading, tokenization, processors, and training. I\u2019d only merge one soft edge: the Qwen video-input regression pair (43329, 44479). Everything else looks like broad thematic similarity rather than the same underlying bug.", - "confidence": 0.37, - "canonical_issue_reason": "issue:43329 is the most concrete root-cause report here: it points to the broken video branch in `_get_num_multimodal_tokens` with undefined helpers/variables, which is the clearest diagnostic match for the only plausible duplicate pair.", + "summary": "This cluster is a mix of unrelated Transformers issues, mostly centered on model loading, tokenizers, processors, and training regressions. None of the proposed soft pairs look like the same underlying bug or change, so all soft edges are rejected. The most plausible umbrella issue is the FSDP loading regression because it is broad and well-discussed.", + "confidence": 0.61, + "canonical_issue_reason": "issue:43749 is the broadest, most central loading-regression report in the set and has the strongest discussion/inbound signal.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43329 is the best representative of the cluster\u2019s only real duplicate-like subcase because it is specific, actionable, and pinpoints the failing code path rather than just describing user-visible symptoms.", + "best_issue_reason": "issue:43749 is a good representative issue for the cluster because it is broad, high-impact, and closest to an umbrella regression rather than a narrow model-specific bug.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43749", "right": "issue:44291", "accept": false, - "reason": "Both involve model loading, but one is FSDP CPU RAM efficient loading and the other is an `init_empty_weights` argument mismatch; different failure points and fixes." + "reason": "Both are loading-related, but one is FSDP CPU RAM efficient loading and the other is init_empty_weights/_is_hf_initialized argument handling; different code paths and failure modes." }, { "left": "issue:43582", "right": "issue:44857", "accept": false, - "reason": "Apple Silicon allocator warmup TypeError vs CUDA float16 AMP loss crash; unrelated platform and subsystem." + "reason": "AppleSilicon caching_allocator_warmup TypeError is unrelated to LwDetrImageLoss crashing under float16 AMP/CUDA." }, { "left": "issue:43526", "right": "issue:43697", "accept": false, - "reason": "BEiT `reduce_labels` fast-processor bug and RTDetrV2 output drift are both vision-related, but they are different code paths and symptoms." + "reason": "BEiT Fast image processor label reduction is unrelated to RTDetrV2 output drift in v5." }, { "left": "issue:43526", "right": "issue:44554", "accept": false, - "reason": "BEiT image-label handling vs an MPS attention correctness issue; no shared bug." + "reason": "Image label reduction bug and MPS attention correctness issue are different subsystems and different symptoms." }, { "left": "issue:45084", "right": "issue:45538", "accept": false, - "reason": "Template-node compile failure and CLIP tokenizer max-length handling are unrelated." + "reason": "Template-node compilation error is unrelated to CLIPTokenizer model_max_length behavior." }, { "left": "issue:43329", "right": "issue:45600", "accept": false, - "reason": "Video multimodal token counting bug vs stale `auto_mappings.py` config references; different parts of the library." + "reason": "Undefined video-token helper bug is unrelated to stale auto_mappings references to removed configs." }, { "left": "issue:43653", "right": "issue:44912", "accept": false, - "reason": "BigBird special-token registration and GPT-oss MXFP4 loading behavior are unrelated." + "reason": "BigBird special-token decode bug is unrelated to MXFP4 quantization/load fallback in git-oss-20b." }, { "left": "issue:43454", "right": "issue:43577", "accept": false, - "reason": "AyaVision lm_head tying and BLIP-2 dtype propagation are different model-initialization bugs." - }, - { - "left": "issue:44568", - "right": "issue:45488", - "accept": false, - "reason": "Both are tokenizer regressions, but one is special-token/BOS-EOS handling and the other is pre-tokenizer override behavior." + "reason": "Both involve model loading, but AyaVision lm_head tying and BLIP2 dtype propagation are separate model-specific bugs." }, { "left": "issue:34634", "right": "issue:36331", "accept": false, - "reason": "BarkProcessor voice_preset and CustomTrainer `compute_loss` signature are unrelated." + "reason": "BarkProcessor voice_preset failure is unrelated to CustomTrainer.compute_loss signature changes." }, { "left": "issue:43638", "right": "issue:44936", "accept": false, - "reason": "Zero3 training index error vs evaluate-after-train failure; different trainer issues." + "reason": "DeepSpeed ZeRO-3 index error on a non-pretrained Bert model is not the same issue as trainer.evaluate() failing after training." }, { "left": "issue:43452", "right": "issue:44991", "accept": false, - "reason": "GGUF loading breaks for auto classes, while the other is a tokenizer-loading regression for a specific checkpoint; same broad area only." + "reason": "GGUF loading breakage for auto classes and tokenizer loading regression for est-roberta are different failures." }, { "left": "issue:43257", "right": "issue:43329", "accept": false, - "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed is unrelated to the video multimodal token-count bug." + "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to multimodal token counting video-branch undefined names." }, { "left": "issue:43746", "right": "issue:44811", "accept": false, - "reason": "PEFT checkpoint loading and Whisper batch decode skip-special-tokens are different audio/model-loading bugs." + "reason": "PEFT local checkpoint loading and Whisper batch_decode skip_special_tokens are unrelated bugs." }, { "left": "issue:38175", "right": "issue:44610", "accept": false, - "reason": "Unexpected zero probabilities in SigLIP2 vs OmDet-Turbo processor size mismatch; both vision issues, but not the same bug." + "reason": "Both are vision-related, but zero probabilities in SigLIP2 and processor/model resolution mismatch in OmDet-Turbo are not the same underlying defect." }, { "left": "issue:43854", "right": "issue:44977", "accept": false, - "reason": "GLM-4.7-Flash load test failure vs Qwen3.5 flash-attention generation regression; different models and code paths." + "reason": "GLM-4.7-Flash loading test failure is unrelated to Qwen3.5 flash-attention generation regression." }, { "left": "issue:44589", "right": "issue:45042", "accept": false, - "reason": "Float8 storage lookup during loading vs PIL backend requiring torchvision; unrelated." + "reason": "Float8 storage lookup failure is unrelated to PIL image processors incorrectly requiring torchvision." }, { "left": "issue:43257", "right": "issue:43696", "accept": false, - "reason": "Qwen3 MoE weight conversion and GPT-oss OOM are not the same failure." + "reason": "Qwen3 MoE weight conversion bug is unrelated to GPT-oss-20b CUDA OOM." }, { "left": "issue:39692", "right": "issue:44912", "accept": false, - "reason": "SigLIP2 doc-example/model-processor mismatch and GPT-oss MXFP4 fallback are different quantization/model-loading problems." + "reason": "SigLIP2 doc example errors/quantization failure are not the same as git-oss-20b MXFP4 loading fallback." }, { "left": "issue:44568", "right": "issue:45356", "accept": false, - "reason": "Special-token addition regression vs Kimi-K2.5 codec-handling regression; distinct tokenizer bugs." + "reason": "Both are tokenizer regressions, but BOS/EOS addition and Kimi-K2.5 codec/warning handling are different concrete bugs." }, { "left": "issue:36010", "right": "issue:43645", "accept": false, - "reason": "GenerationMixin import breakage and custom-model notebook initialization failure are related to v5 API changes but not the same bug." + "reason": "GenerationMixin import failure is unrelated to Jupyter custom-model initialization breakage." }, { "left": "issue:43819", "right": "issue:43927", "accept": false, - "reason": "DAC `from_latents` mismatch and DiaConfig save/load token-ID loss are unrelated." + "reason": "DAC latent reconstruction mismatch and DiaConfig token-ID persistence are different model families and unrelated bugs." }, { "left": "issue:43720", "right": "issue:44610", "accept": false, - "reason": "BitNet packed-weight loading and OmDet processor image-size mismatch are different problems." + "reason": "Packed-weight unpacking during accelerate loading is unrelated to OmDet-Turbo processor resolution mismatch." }, { "left": "issue:43262", "right": "issue:44811", "accept": false, - "reason": "Audio chat-template sample-rate default and Whisper batch decode skip-special-tokens are distinct processor bugs." + "reason": "Audio chat-template sampling-rate defaulting and Whisper batch_decode special-token handling are separate processor bugs." }, { "left": "issue:43550", "right": "issue:44912", "accept": false, - "reason": "Bamba torch.compile/SDPA failure vs GPT-oss quantization fallback are unrelated." + "reason": "Bamba torch.compile/SDPA failure is unrelated to git-oss-20b quantization load fallback." }, { "left": "issue:43749", "right": "issue:44756", "accept": false, - "reason": "Both mention loading/OOM, but one is FSDP CPU RAM efficient loading and the other is mmap on Strix Halo; different root causes." + "reason": "FSDP CPU RAM efficient loading regression and mmap disablement on Strix Halo are different memory/loading issues." }, { "left": "issue:43531", "right": "issue:45042", "accept": false, - "reason": "Qwen3-MoE sliding window behavior and PIL torchvision dependency handling are unrelated." + "reason": "Qwen3-MoE sliding_window behavior is unrelated to PIL backend torchvision requirements." }, { "left": "issue:43329", "right": "issue:44479", - "accept": true, - "reason": "These look like the same video-input regression in Qwen VLM processors; 43329 identifies the concrete broken video token-counting path, while 44479 reports the broader symptom across affected models." - }, - { - "left": "issue:43257", - "right": "issue:43931", "accept": false, - "reason": "Qwen3 MoE weight conversion and Qwen3-VL shape mismatch are different loading bugs." - }, - { - "left": "issue:43257", - "right": "issue:43756", - "accept": false, - "reason": "Qwen3 MoE weight conversion is unrelated to Smollm3 RoPE-layer dropping." - }, - { - "left": "issue:36010", - "right": "issue:36331", - "accept": false, - "reason": "GenerationMixin import failure and `compute_loss` signature mismatch are unrelated." + "reason": "Both touch video/multimodal paths, but one is undefined helper usage in token counting and the other is a broader v5.3.0 video-input regression; not the same concrete bug." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The single accepted soft edge (43329, 44479) is plausibly the same video-input regression, and the rejected pairs are reasonably distinguished by different failure modes or code paths. No clear overstatement of evidence." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -4886,26 +4916,26 @@ ], "soft_pairs": [ "issue:34567|issue:40990", - "issue:43697|issue:45357", "issue:43329|issue:43450", + "issue:43697|issue:45357", "issue:43577|issue:45072", - "issue:44479|issue:45381", "issue:41720|issue:43540", + "issue:44479|issue:45381", "issue:43452|issue:43582", "issue:44987|issue:45538", "issue:42175|issue:44938", "issue:43317|issue:45538", "issue:43927|issue:44912", "issue:44291|issue:45042", + "issue:33357|issue:34567", "issue:43906|issue:45381", "issue:43540|issue:44560", - "issue:33357|issue:34567", "issue:43540|issue:44479", - "issue:42915|issue:45538", "issue:42915|issue:43742", + "issue:42915|issue:45538", "issue:42175|issue:43824", - "issue:43526|issue:45479", "issue:42915|issue:44821", + "issue:43526|issue:45479", "issue:44062|issue:44991", "issue:44737|issue:45588", "issue:42175|issue:44488", @@ -4916,220 +4946,220 @@ "issue:28282|issue:34689", "issue:36010|issue:42175" ], - "prepared_review_unit_hash": "f39cab3844e68597ebb04904a026187ed22343b431e2f29d68f78eb151c6ea55", + "prepared_review_unit_hash": "bf7ca8419556ccd38580200633b6e0a2b73bd09c7b1627c155c9fd2d1a2b70ff", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36212, - "estimated_input_tokens": 9053, - "estimated_eval_tokens": 18362 + "serialized_chars": 36213, + "estimated_input_tokens": 9054, + "estimated_eval_tokens": 18364 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated issue reports with one plausible duplicate pair around the Qwen video-input regression; no PRs are present.", - "confidence": 0.44, - "canonical_issue_reason": "Issue 44479 is the broadest and clearest report: it describes a v5.3.0 video-input regression across multiple Qwen model families, which likely subsumes the more specific qwen2.5-vl manifestation.", + "summary": "This cluster is mostly heterogeneous: many unrelated model- or backend-specific issues, with one plausible duplicate-style link around Qwen video-input regressions. No PRs are present.", + "confidence": 0.41, + "canonical_issue_reason": "issue:44479 is the broadest and most central report here: it describes a v5.3.0 video-input regression across multiple Qwen VL variants, and it likely subsumes the narrower Qwen2.5-VL symptom in issue:45381.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44479 is the best representative issue because it captures the shared regression surface and is broad enough to anchor the related Qwen video-input discussion.", + "best_issue_reason": "issue:44479 is the best representative issue because it states the regression clearly, spans multiple affected models, and matches the only plausible soft duplicate in the set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:34567", "right": "issue:40990", "accept": false, - "reason": "Unrelated: training-state metric update vs perplexity evaluation behavior." + "reason": "Different problems: token-count tracking vs perplexity quality on a specific model." }, { - "left": "issue:43697", - "right": "issue:45357", + "left": "issue:43329", + "right": "issue:43450", "accept": false, - "reason": "Different bugs and model paths: RTDetr output drift vs Qwen3.5 save_pretrained key regression." + "reason": "Both involve video paths, but one is a missing helper/undefined variable bug and the other is a batched-shape processor issue; not the same code-path bug." }, { - "left": "issue:43329", - "right": "issue:43450", + "left": "issue:43697", + "right": "issue:45357", "accept": false, - "reason": "Both involve video/multimodal code, but one is an undefined-variable token-counting bug and the other is a batched processor shape issue." + "reason": "Different model families and failure modes: RTDetrV2 output drift vs Qwen3.5 save_pretrained key regression." }, { "left": "issue:43577", "right": "issue:45072", "accept": false, - "reason": "Both mention dtype, but they affect different models and different failure modes." - }, - { - "left": "issue:44479", - "right": "issue:45381", - "accept": true, - "reason": "Likely same underlying Qwen video-input regression: 44479 is the umbrella report, while 45381 is a specific qwen2.5-vl symptom around video position IDs." + "reason": "Both mention dtype, but they affect different components and symptoms; not clearly one underlying bug." }, { "left": "issue:41720", "right": "issue:43540", "accept": false, - "reason": "Different concrete failures: auto device mapping CUDA assert vs video-processing ValueError in a different Qwen model." + "reason": "Qwen3 auto device-map CUDA assert is unrelated to Qwen3OmniMoe video-input ValueError." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Both point to the same Qwen2.5-VL video-input regression; 45381 looks like a narrower symptom of the broader v5.3.0 issue in 44479." }, { "left": "issue:43452", "right": "issue:43582", "accept": false, - "reason": "No shared code path: gguf loading regression vs Apple Silicon allocator warmup type error." + "reason": "Unrelated topics: gguf_file loading path vs Apple Silicon allocator warmup TypeError." }, { "left": "issue:44987", "right": "issue:45538", "accept": false, - "reason": "Unrelated: model-loading failure for a specific repo vs CLIP tokenizer max-length semantics." + "reason": "Loading a specific model regression is unrelated to CLIPTokenizer's model_max_length behavior." }, { "left": "issue:42175", "right": "issue:44938", "accept": false, - "reason": "Packaging/backend availability issue vs a Python 3.14 loading failure; too generic to treat as duplicates." + "reason": "Missing TensorFlow in a torch extra is a packaging/backend issue, not the same as Python 3.14 load failure." }, { "left": "issue:43317", "right": "issue:45538", "accept": false, - "reason": "Different areas: device_map/offload loading bug vs tokenizer max-length constant." + "reason": "Device-map/offload failure is unrelated to tokenizer max-length configuration." }, { "left": "issue:43927", "right": "issue:44912", "accept": false, - "reason": "Different model/config bugs: DiaConfig persistence vs MXFP4 quantization fallback." + "reason": "Config save/load token-id loss is unrelated to MXFP4 quantization fallback." }, { "left": "issue:44291", "right": "issue:45042", "accept": false, - "reason": "Different load-time API mismatch vs image-processor torchvision dependency regression." + "reason": "Different loading/initialization regressions in different subsystems." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "MacOS bus error on CLIP model loading is unrelated to TrainerState token counting." }, { "left": "issue:43906", "right": "issue:45381", "accept": false, - "reason": "A reproduction of another issue, but not the same concrete bug as the qwen2.5-vl video-position-ID regression." + "reason": "An isolated reproduction of a prior issue is not the same as the Qwen2.5-VL video-position-id bug." }, { "left": "issue:43540", "right": "issue:44560", "accept": false, - "reason": "Both are video-related, but they describe different models and distinct observed failures." - }, - { - "left": "issue:33357", - "right": "issue:34567", - "accept": false, - "reason": "Unrelated Mac bus error in CLIP vs TrainerState token-count tracking." + "reason": "Both are video-related, but they hit different Qwen models and different error mechanisms." }, { "left": "issue:43540", "right": "issue:44479", "accept": false, - "reason": "Related theme, but not enough to confirm the same code-path bug; one is a specific model ValueError and the other is a broader regression report." + "reason": "Same broad video area, but the failures differ: Qwen3OmniMoe video processing vs a multi-model Qwen VL regression." }, { "left": "issue:42915", - "right": "issue:45538", + "right": "issue:43742", "accept": false, - "reason": "Different categories: FP8 config loading vs tokenizer max-length behavior." + "reason": "FineGrainedFP8Config failure is unrelated to a key error loading MobileLLM-125M." }, { "left": "issue:42915", - "right": "issue:43742", + "right": "issue:45538", "accept": false, - "reason": "Different failures in different model-loading paths." + "reason": "Unrelated model quantization/config failure vs tokenizer max-length behavior." }, { "left": "issue:42175", "right": "issue:43824", "accept": false, - "reason": "Packaging/backend issue vs missing Qwen2.5-VL class import." + "reason": "Packaging/backend inclusion issue is unrelated to missing Qwen2_5_VL export." }, { - "left": "issue:43526", - "right": "issue:45479", + "left": "issue:42915", + "right": "issue:44821", "accept": false, - "reason": "Different model families and different functional bugs." + "reason": "FP8 config failure is unrelated to AutoImageProcessor-from-URL loading." }, { - "left": "issue:42915", - "right": "issue:44821", + "left": "issue:43526", + "right": "issue:45479", "accept": false, - "reason": "Unrelated: FP8 Qwen3Moe failure vs AutoImageProcessor URL loading." + "reason": "Different bug classes: image-processor label reduction vs sequence-classification loss degeneracy." }, { "left": "issue:44062", "right": "issue:44991", "accept": false, - "reason": "Tokenizer-added-token argument bug vs tokenizer-loading regression for a specific model." + "reason": "Tokenizer constructor argument conflict is not the same as a specific tokenizer load regression." }, { "left": "issue:44737", "right": "issue:45588", "accept": false, - "reason": "XLNet CPU placement bug vs flash-attention sinkless-model AttributeError." + "reason": "XLNet CPU device placement bug is unrelated to flash_attention.py crashing on sink-less models." }, { "left": "issue:42175", "right": "issue:44488", "accept": false, - "reason": "Backend/install packaging issue vs model-loading failure for cjvt/sleng-bert." + "reason": "Both are load-related at a high level, but the titles indicate different root causes." }, { "left": "issue:45137", "right": "issue:45405", "accept": false, - "reason": "DeepSpeed ZeRO3 runtime error vs dependency version bump; unrelated." + "reason": "DeepSpeed ZeRO3 deque error is unrelated to a PEFT version bump." }, { "left": "issue:38175", "right": "issue:44898", "accept": false, - "reason": "Different vision-model bugs with different symptoms and code paths." + "reason": "Different image-model correctness issues with no clear shared root cause." }, { "left": "issue:43646", "right": "issue:43749", "accept": false, - "reason": "Transformers 5 custom init breakage vs FSDP CPU RAM efficient loading regression." + "reason": "Custom model init breakage in v5 is unrelated to FSDP CPU RAM efficient loading." }, { "left": "issue:43582", "right": "issue:43782", "accept": false, - "reason": "Apple Silicon allocator warmup type error vs Qwen3VL weight-only load error." + "reason": "Apple Silicon allocator TypeError is unrelated to Qwen3VL weight_only loading failure." }, { "left": "issue:28282", "right": "issue:34689", "accept": false, - "reason": "Missing PyTorch import vs Llama 3.2 Vision loading breakage; not the same issue." + "reason": "Generic PyTorch ImportError is unrelated to Llama 3.2 Vision model loading regression." }, { "left": "issue:36010", "right": "issue:42175", "accept": false, - "reason": "Import path regression for GenerationMixin vs missing TensorFlow backend in an install extra." + "reason": "Missing GenerationMixin export and missing TensorFlow backend are separate problems." } ] }, "evaluator_result": { "accept": true, - "feedback": "Grounded overall: the canonical issue choice is supported by the packet, no PRs are present, and the one accepted soft-edge pair (44479/45381) is stated cautiously. The rejected pairs are generally conservative and do not overclaim duplication." + "feedback": "The summary is grounded in the packet and stays conservative overall. The one accepted soft edge (44479/45381) is well supported by the titles and scope, and the rest of the rejections avoid overclaiming. No PR-pair issue applies here since there are no PRs present." }, "evaluator_used": true, "retried": false, @@ -5202,8 +5232,8 @@ "issue:43643|issue:45375", "issue:43756|issue:45325", "issue:43742|issue:44291", - "issue:44484|issue:44704", "issue:43493|issue:43994", + "issue:44484|issue:44704", "issue:28282|issue:29127", "issue:34567|issue:36331", "issue:44062|issue:45081", @@ -5217,8 +5247,8 @@ "issue:44514|issue:45381", "issue:43697|issue:45216", "issue:43452|issue:43577", - "issue:45042|issue:45081", "issue:43742|issue:44589", + "issue:45042|issue:45081", "issue:43531|issue:44821", "issue:43688|issue:45440", "issue:43452|issue:44843", @@ -5228,202 +5258,202 @@ "issue:43819|issue:44811", "issue:41762|issue:44936" ], - "prepared_review_unit_hash": "497a926599b4d3624f159cbaefa2e132eacd11606be0e0cf46edecd1917a48a5", + "prepared_review_unit_hash": "c7cb16ae802ef39d553e72c2e43faf5a5abf4d3b16ca03a3b4bcb15780bfdf61", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 28, - "serialized_chars": 35845, - "estimated_input_tokens": 8962, - "estimated_eval_tokens": 18180 + "serialized_chars": 35851, + "estimated_input_tokens": 8963, + "estimated_eval_tokens": 18182 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a loose set of unrelated Transformers bug reports that only share high-level loading/API vocabulary. I rejected all soft edges because the paired items do not describe the same concrete underlying bug or change.", - "confidence": 0.62, - "canonical_issue_reason": "issue:43452 is the most broadly representative loading-related report in the set, spanning core `from_pretrained` behavior for multiple auto classes and fitting the loose thematic center of the cluster better than the more specialized reports.", + "summary": "These candidate pairs mostly share only broad subsystem overlap (loading, tokenizers, configs, multimodal models) and do not appear to be the same concrete bug/change. I would not merge any of the soft edges as duplicates.", + "confidence": 0.89, + "canonical_issue_reason": "Issue 44561 is the strongest representative to anchor a cluster: it describes a broad, regression-style breakage affecting trust_remote_code models and has the clearest cross-cutting impact. That said, the overall set is heterogeneous, so there is no true duplicate hub here.", "canonical_pr_reason": null, - "best_issue_reason": "It is a clear, user-facing regression in a core loading path and is broad enough to act as the best single representative issue for this heterogeneous cluster.", + "best_issue_reason": "Issue 44561 is the best global issue candidate because it is the most broadly impactful and clearly framed regression among the set; the rest are narrower, model-specific, or unrelated.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43643", "right": "issue:45375", "accept": false, - "reason": "Both involve missing config fields, but they are different mechanisms on different models: `trust_remote_code` returning incomplete config vs a strict config dropping `deepstack_visual_indexes`." + "reason": "Both involve config fields and trust_remote_code, but one is a generic AutoConfig missing-fields report while the other is a specific Qwen3.5MoeVisionConfig strict-field omission; different code paths." }, { "left": "issue:43756", "right": "issue:45325", "accept": false, - "reason": "Both mention RoPE/position IDs, but the bugs are model-specific and distinct: Smollm3 dropping RoPE layers vs Qwen2.5-VL temporal position scaling." + "reason": "Both mention RoPE, but they describe different models and different bugs: SmolLM3 layer dropping vs Qwen2.5-VL temporal position scaling." }, { "left": "issue:43742", "right": "issue:44291", "accept": false, - "reason": "One is a key error when loading MobileLLM, the other is an `init_empty_weights`/`_is_hf_initialized` TypeError. Different failure modes and code paths." + "reason": "Both are model loading failures, but the symptoms and root causes differ: a KeyError on MobileLLM loading versus an unexpected _is_hf_initialized argument with init_empty_weights." }, { - "left": "issue:44484", - "right": "issue:44704", + "left": "issue:43493", + "right": "issue:43994", "accept": false, - "reason": "A save-pretrained sharding-size question is unrelated to `AutoProcessor` failing to forward kwargs to `cached_file`." + "reason": "Both are about SigLIP2, but one reports HF/JAX implementation divergence and the other reports nonsensical AutoModel/pipeline outputs; not enough evidence of the same bug." }, { - "left": "issue:43493", - "right": "issue:43994", + "left": "issue:44484", + "right": "issue:44704", "accept": false, - "reason": "Both are about SigLIP2, but one is a HF-vs-JAX implementation discrepancy and the other is bad outputs with AutoModel/pipeline; not the same concrete bug." + "reason": "Unrelated API issues: save_pretrained max_shard_size behavior versus AutoProcessor not forwarding kwargs to cached_file." }, { "left": "issue:28282", "right": "issue:29127", "accept": false, - "reason": "Torch import failure for AutoModel is unrelated to unclear error messaging for insufficient boxes in LayoutLMv3." + "reason": "These are entirely different errors: missing PyTorch for AutoModel versus a LayoutLMv3 box-information error message." }, { "left": "issue:34567", "right": "issue:36331", "accept": false, - "reason": "Trainer state tracking and a custom `compute_loss` signature break are separate trainer issues with different symptoms and fixes." + "reason": "Different trainer issues: num_input_tokens_seen not updating versus compute_loss receiving an unexpected num_items_in_batch argument." }, { "left": "issue:44062", "right": "issue:45081", "accept": false, - "reason": "One is an `AddedToken` constructor conflict, the other is a Mistral regex patch crashing on tokenizer internals; they are different tokenizer bugs." + "reason": "Both involve tokenizers, but one is an AddedToken constructor conflict and the other is a backend_tokenizer AttributeError in _patch_mistral_regex." }, { "left": "issue:44291", "right": "issue:44821", "accept": false, - "reason": "`init_empty_weights` loading breakage and URL loading for `AutoImageProcessor` are unrelated." + "reason": "Different loading paths and failures: init_empty_weights/_is_hf_initialized versus loading an AutoImageProcessor from URL." }, { "left": "issue:43452", "right": "issue:44488", "accept": false, - "reason": "Both are loading-related, but one is a GGUF-specific breakage in auto classes and the other is a model-specific inability to load `cjvt/sleng-bert`." + "reason": "One is about gguf_file breaking AutoTokenizer/AutoModelForCausalLM; the other is a specific model that no longer loads. Too vague to call the same bug." }, { "left": "issue:44792", "right": "issue:44964", "accept": false, - "reason": "Different multimodal models and different reports; no evidence they share the same underlying defect." + "reason": "Both are multimodal/model-loading problems, but they concern different models and likely different code paths." }, { "left": "issue:44514", "right": "issue:45290", "accept": false, - "reason": "Both crash in `apply_chat_template`, but the triggering conditions differ materially: batched input with `padding=False` vs assistant messages with tool calls and no content." + "reason": "Both hit apply_chat_template, but one is a batched padding=False crash and the other is tool-call assistant messages with no content." }, { "left": "issue:45405", "right": "issue:45600", "accept": false, - "reason": "A PEFT version bump issue is unrelated to stale `auto_mappings` references to removed configs." + "reason": "Completely different concerns: unreleased PEFT version bump versus stale auto_mappings references breaking CI." }, { "left": "issue:43643", "right": "issue:44561", "accept": false, - "reason": "Both involve config/remote-code compatibility, but one is missing fields from `AutoConfig`, while the other is removal of `is_torch_fx_available` breaking remote-code models." + "reason": "Both are config/loading regressions, but one is missing fields under trust_remote_code while the other is a removed helper breaking trust_remote_code models." }, { "left": "issue:43688", "right": "issue:43749", "accept": false, - "reason": "Auxiliary-loss normalization and FSDP CPU RAM efficient loading are unrelated subsystems and failure modes." + "reason": "Different subsystems and failures: auxiliary-loss normalization in MoE models versus FSDP_CPU_RAM_EFFICIENT_LOADING breakage." }, { "left": "issue:44514", "right": "issue:45381", "accept": false, - "reason": "Both concern Qwen2.5-VL chat/template behavior, but one is a template crash on batched padding and the other is incorrect `vision_position_ids`; not the same bug." + "reason": "Both involve Qwen2.5-VL vision/chat processing, but one is a batching/padding crash and the other is wrong vision_position_ids for video input." }, { "left": "issue:43697", "right": "issue:45216", "accept": false, - "reason": "RTDetrV2 output mismatch and Qwen3.5 checkpoint save corruption are different regression classes with different code paths." + "reason": "Different model families and bugs: RTDetrV2 output changes versus Qwen3.5 save_pretrained checkpoint correctness." }, { "left": "issue:43452", "right": "issue:43577", "accept": false, - "reason": "`gguf_file`/auto-class loading issues are unrelated to BLIP2 dtype propagation for `qformer`." + "reason": "One concerns gguf/model loading, the other dtype propagation for Blip2/Qformer; no shared concrete bug." }, { - "left": "issue:45042", - "right": "issue:45081", + "left": "issue:43742", + "right": "issue:44589", "accept": false, - "reason": "Torchvision being incorrectly required for PIL backend image processors is unrelated to the Mistral regex/tokenizer crash." + "reason": "Both are load-time TypeErrors, but the failure signatures and mechanisms are unrelated." }, { - "left": "issue:43742", - "right": "issue:44589", + "left": "issue:45042", + "right": "issue:45081", "accept": false, - "reason": "MobileLLM key error and a missing `Float8_e4m3fnStorage` type are separate loading/runtime failures." + "reason": "Different areas: PIL backend image processors requiring torchvision versus a Mistral tokenizer regex patch crash." }, { "left": "issue:43531", "right": "issue:44821", "accept": false, - "reason": "Qwen3-MoE sliding-window behavior and image processor loading from URL are unrelated." + "reason": "Unrelated: Qwen3-MoE sliding_window behavior versus loading AutoImageProcessor from URL." }, { "left": "issue:43688", "right": "issue:45440", "accept": false, - "reason": "Both involve MoE-style models, but the reported defects are distinct: auxiliary-loss normalization vs implementation divergence from a remote model." + "reason": "Both touch MoE-like models, but one is auxiliary-loss normalization and the other is DeepSeekV3 implementation divergence." }, { "left": "issue:43452", "right": "issue:44843", "accept": false, - "reason": "GGUF/auto-class loading and offline `model_info()` calls in `_patch_mistral_regex` are different loading regressions." + "reason": "gguf_file loading breakage is unrelated to the offline-mode model_info call in _patch_mistral_regex." }, { "left": "issue:44625", "right": "issue:44877", "accept": false, - "reason": "Qwen3.5 `num_labels` propagation and strict config preventing `granite_speech` loading are separate config bugs." + "reason": "Both mention config handling, but one is num_labels propagation and the other is strict config blocking granite_speech loading." }, { "left": "issue:43653", "right": "issue:44568", "accept": false, - "reason": "Special-token registration causing empty decode output is different from `add_special_tokens=True` failing to add BOS/EOS." + "reason": "Both are tokenizer special-token issues, but they affect different tokenizers and different symptoms; not clearly the same defect." }, { "left": "issue:43720", "right": "issue:44898", "accept": false, - "reason": "BitNet packed-weight loading and Perceiver non-default-resolution failure are unrelated." + "reason": "Different components and bug classes: BitNet packed-weight loading versus Perceiver positional-encoding interpolation." }, { "left": "issue:43819", "right": "issue:44811", "accept": false, - "reason": "DAC latent decoding mismatch and Whisper `batch_decode` ignoring `skip_special_tokens` are different bugs in different components." + "reason": "Different decoding/computation bugs: DAC.from_latents mismatch versus Whisper processor.batch_decode ignoring skip_special_tokens." }, { "left": "issue:41762", "right": "issue:44936", "accept": false, - "reason": "DeepSpeed ZeRO-3 loading for Gemma3 and `trainer.evaluate()` after `train()` are unrelated trainer/model-loading issues." + "reason": "These are unrelated training/loading issues: ZeRO-3 Gemma3 loading IndexError versus trainer.evaluate failing after train." } ] }, @@ -5500,8 +5530,8 @@ "issue:43295|issue:45356", "issue:44466|issue:45005", "issue:44464|issue:44743", - "issue:41720|issue:43906", "issue:24643|issue:29127", + "issue:41720|issue:43906", "issue:43525|issue:43643", "issue:43577|issue:43697", "issue:38175|issue:43295", @@ -5532,30 +5562,30 @@ "issue:32090|issue:34567", "issue:32090|issue:33666" ], - "prepared_review_unit_hash": "2fd41c04be8e77d6954e741c61678b11eb23dbf55502cc321b32cf5f9dd76f89", + "prepared_review_unit_hash": "ee07b8b04ff05b25964f34d48053205140eb56dd808c22f20b3d171632451b96", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 34, - "serialized_chars": 38189, - "estimated_input_tokens": 9548, - "estimated_eval_tokens": 19352 + "serialized_chars": 38183, + "estimated_input_tokens": 9546, + "estimated_eval_tokens": 19348 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is heterogeneous: the soft links are mostly broad subsystem similarity (tokenizers, Qwen, tied weights, generation) rather than the same underlying bug. I would not merge any of these pairs as duplicates.", - "confidence": 0.95, + "summary": "This cluster is heterogeneous: the issues span unrelated tokenizer, trainer, multimodal, generation, loading, and serialization problems. None of the soft pairs look like the same concrete bug/change, so I would not merge any of them as duplicates and there is no clear canonical representative.", + "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, @@ -5565,205 +5595,205 @@ "left": "issue:43295", "right": "issue:45356", "accept": false, - "reason": "Both are tokenizer/processor regressions, but they affect different code paths: processor.tokenizer/images handling vs Kimi-K2.5 codec and warning behavior." + "reason": "Different regressions in different tokenizer/processor flows; same broad area, not the same bug." }, { "left": "issue:44466", "right": "issue:45005", "accept": false, - "reason": "Both mention tied weights, but one is a device-dependent lm_head serialization bug and the other is a broader translation-model tied-weights issue." + "reason": "Both mention tied weights, but one is device-dependent lm_head serialization and the other is a broader translation-model v5 issue." }, { "left": "issue:44464", "right": "issue:44743", "accept": false, - "reason": "Different generation bugs: compiled-forward inconsistency vs recurrent state reset with cache and seq_len>1." + "reason": "One is chunked generation with compiled forward; the other is recurrent state reset with cache. Different code paths." }, { - "left": "issue:41720", - "right": "issue:43906", + "left": "issue:24643", + "right": "issue:29127", "accept": false, - "reason": "No clear match; one is a Qwen3 auto-device-map cuda assert, the other is an isolated reproduction of a different issue." + "reason": "Training/DeepSpeed weight-shape error vs layoutlmv3 box validation message. Unrelated bugs." }, { - "left": "issue:24643", - "right": "issue:29127", + "left": "issue:41720", + "right": "issue:43906", "accept": false, - "reason": "Completely different failures: DeepSpeed training weight-shape error vs LayoutLMv3 box-validation error message." + "reason": "Qwen3 auto device mapping crash vs an isolated reproduction of another issue; no evidence they are the same defect." }, { "left": "issue:43525", "right": "issue:43643", "accept": false, - "reason": "Both are config/field problems, but they concern different models and different missing-field behavior." + "reason": "Missing pad_token_id in Llama4Config vs missing fields from trust_remote_code AutoConfig. Similar symptom, different issue." }, { "left": "issue:43577", "right": "issue:43697", "accept": false, - "reason": "Unrelated bugs: BLIP2 dtype not propagated vs RTDetrV2 output mismatch in v5." + "reason": "Blip2 dtype propagation bug vs RTDetrV2 output mismatch. Different model families and failure modes." }, { "left": "issue:38175", "right": "issue:43295", "accept": false, - "reason": "Different models and symptoms: zero probabilities in SigLIP2 vs processor.tokenizer/images regression." + "reason": "Siglip2 zero probabilities vs processor.tokenizer/image-handling regression. Not the same underlying bug." }, { "left": "issue:44361", "right": "issue:44442", "accept": false, - "reason": "Both are tokenizer-loading failures, but they are different models and different error modes." + "reason": "Tokenizer failures, but for different tokenizers and likely different loading paths." }, { "left": "issue:44843", "right": "issue:44991", "accept": false, - "reason": "Both involve tokenizer loading, but one is an offline model_info call bug and the other is a model-specific v5 tokenizer load failure." + "reason": "Both are tokenizer-loading regressions, but one is offline model_info patching and the other is a specific model load failure." }, { "left": "issue:43653", "right": "issue:44361", "accept": false, - "reason": "Different tokenizers and failures: BigBird special-token registration vs MLukeTokenizer attribute error." + "reason": "BigBird special-token decode issue vs MLukeTokenizer task AttributeError. Unrelated." }, { "left": "issue:44479", "right": "issue:44936", "accept": false, - "reason": "Different areas entirely: Qwen3.5 video regression vs Trainer evaluate-after-train behavior." + "reason": "Video-input regression in Qwen VLMs vs Trainer evaluate-after-train failure. Different subsystems." }, { "left": "issue:43122", "right": "issue:43906", "accept": false, - "reason": "Only loosely related through tokenization/versioning; the reported failures and targets are not the same bug." + "reason": "Tokenization behavior change vs a reproduction of another issue. No shared concrete bug." }, { "left": "issue:44977", "right": "issue:45356", "accept": false, - "reason": "Different regressions: Qwen3.5 flash-attention generation vs Kimi-K2.5 tokenizer codec handling." + "reason": "Flash-attention generation bug vs tokenizer codec/regex regression. Different components." }, { "left": "issue:28282", "right": "issue:32090", "accept": false, - "reason": "ImportError for missing PyTorch is unrelated to Trainer NoneType broadcast during distributed loading." + "reason": "PyTorch import error vs Trainer GPU broadcast NoneType error. Not duplicates." }, { "left": "issue:28282", "right": "issue:33357", "accept": false, - "reason": "Different problems: missing PyTorch dependency vs MacOS bus error with a CLIP model." + "reason": "ImportError vs MacOS bus error. Completely different failures." }, { "left": "issue:28282", "right": "issue:35141", "accept": false, - "reason": "No shared bug; dependency import failure is unrelated to embedding resize/reinit behavior." + "reason": "Import-time PyTorch missing error vs tied-embedding reinitialization bug. Unrelated." }, { "left": "issue:43577", "right": "issue:45588", "accept": false, - "reason": "Unrelated: BLIP2 dtype propagation vs flash_attention crash on s_aux=None." + "reason": "Blip2 dtype issue vs flash_attention AttributeError on s_aux=None. Different code paths." }, { "left": "issue:30064", "right": "issue:33666", "accept": false, - "reason": "Different subsystems and failures: image processor void segmentation maps vs Qwen2-VL multi-GPU training." + "reason": "Void segmentation map processing vs Qwen2-VL multi-GPU training. Unrelated." }, { "left": "issue:43577", "right": "issue:44554", "accept": false, - "reason": "Different code paths: BLIP2 dtype issue vs MPS attention correctness issue when value head dim differs." + "reason": "Model dtype propagation vs MPS attention correctness issue. Not the same bug." }, { "left": "issue:43653", "right": "issue:43927", "accept": false, - "reason": "Both are tokenizer/config correctness issues, but the underlying bugs are different and not mergeable as one fix." + "reason": "BigBird mask-token decode issue vs DiaConfig custom token IDs lost on save/load. Different token/config bugs." }, { "left": "issue:44464", "right": "issue:44977", "accept": false, - "reason": "Different generation regressions: compiled forward chunking vs flash-attention generation on Qwen3.5." + "reason": "Compiled-forward generation inconsistency vs flash-attention generation failure. Similar symptom only, not same defect." }, { "left": "issue:43531", "right": "issue:43742", "accept": false, - "reason": "No overlap beyond being model-loading/runtime issues; sliding_window bug and MobileLLM key error are distinct." + "reason": "Qwen3-MoE sliding_window issue vs MobileLLM loading key error. Different areas." }, { "left": "issue:43329", "right": "issue:45325", "accept": false, - "reason": "Both are Qwen multimodal/video-related, but one is an undefined-symbol video branch bug and the other is rope/position-id scaling." + "reason": "Undefined helpers in multimodal video token counting vs rope-index scaling for still images. Different multimodal bugs." }, { "left": "issue:44479", "right": "issue:44625", "accept": false, - "reason": "Different bugs in Qwen3.5: video input regression vs num_labels propagation." + "reason": "Qwen VL video regression vs num_labels propagation bug. Unrelated." }, { "left": "issue:43550", "right": "issue:44361", "accept": false, - "reason": "Different components and failures: torch.compile/SDPA on Bamba vs tokenizer attribute error on MLuke." + "reason": "torch.compile/SDPA model failure vs tokenizer AttributeError. Different code paths." }, { "left": "issue:43452", "right": "issue:44451", "accept": false, - "reason": "Both are loading issues, but one is gguf_file handling in Auto* loaders and the other is a model-specific tokenizer load failure." + "reason": "gguf_file loading breakage vs a specific model-load failure. No evidence of same root cause." }, { "left": "issue:43504", "right": "issue:43720", "accept": false, - "reason": "Unrelated model-loading bugs: legacy field in BEiT vs packed-weight unpacking in BitNet/Aircelerate loading." + "reason": "Legacy-field preload bug vs packed-weight unpacking bug. Different loading mechanisms." }, { "left": "issue:41720", "right": "issue:44464", "accept": false, - "reason": "Different failures and execution paths: auto device mapping cuda assert vs chunked generation with compiled forward." + "reason": "CUDA assert on auto device mapping vs chunked generation inconsistency. Unrelated." }, { "left": "issue:43504", "right": "issue:43550", "accept": false, - "reason": "Different models and issues; shared only at a very broad \u201cruntime bug\u201d level." + "reason": "Pretrained-model legacy field issue vs compile/SDPA failure. Different subsystems." }, { "left": "issue:43262", "right": "issue:45084", "accept": false, - "reason": "Audio chat-template sampling-rate defaulting is unrelated to compilation failing on non-template nodes." + "reason": "Audio chat template sampling-rate default vs template compilation TypeError. Not the same bug." }, { "left": "issue:44849", "right": "issue:45357", "accept": false, - "reason": "Different regressions in Qwen3.5: hidden-states behavior vs incorrect visual-encoder keys on save_pretrained." + "reason": "Qwen3.5 hidden-states bug vs incorrect visual encoder keys on save_pretrained. Different regressions." }, { "left": "issue:32090", "right": "issue:34567", "accept": false, - "reason": "Trainer distributed broadcast failure is unrelated to TrainerState.num_input_tokens_seen not updating." + "reason": "GPU broadcast NoneType error vs TrainerState token counter not updating. Different issues." }, { "left": "issue:32090", "right": "issue:33666", "accept": false, - "reason": "Different training-time problems: NoneType broadcast vs multi-GPU training for Qwen2-VL." + "reason": "Trainer broadcast error vs Qwen2-VL multi-GPU training request. Unrelated." } ] }, @@ -5852,8 +5882,8 @@ "issue:43688|issue:43873", "issue:43257|issue:43299", "issue:43278|issue:43844", - "issue:44488|issue:44843", "issue:43540|issue:44977", + "issue:44488|issue:44843", "issue:43720|issue:44361", "issue:39692|issue:43493", "issue:44964|issue:45405", @@ -5869,20 +5899,20 @@ "issue:43688|issue:43746", "issue:39692|issue:43295" ], - "prepared_review_unit_hash": "0a8f87fa71a06c139253751805aa1355af77bb1c9044c8e53a7a11b1ec9f1f3b", + "prepared_review_unit_hash": "76411339043bb094699a04b3fa22b9fd48ef5bb9bbbe73c6dd42e3c6c61d1cfa", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37135, + "serialized_chars": 37136, "estimated_input_tokens": 9284, "estimated_eval_tokens": 18824 }, @@ -5891,156 +5921,156 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a grab-bag of distinct model-loading, config, tokenizer, multimodal, and training regressions that only look similar at the title level. I would not merge any of the soft-edge pairs; they describe different concrete bugs. The broadest representative issue is the generic v5/custom-initialization regression.", - "confidence": 0.56, - "canonical_issue_reason": "issue:43646 is the broadest umbrella item in the set: a generic Transformers 5.0 custom model initialization regression that can represent the wider class of load/init failures here.", + "summary": "This cluster is highly heterogeneous: most items are separate model-specific bugs, regressions, or docs issues rather than duplicates. The soft-similarity pairs mostly share broad themes like loading, config fields, multimodal processing, or v5 regressions, but not the same concrete code-path failure.", + "confidence": 0.19, + "canonical_issue_reason": "issue:43873 is the most broadly scoped and active item in the set, with multiple inbound references and a general offloading/quantization theme that is closest to the few related loading/runtime issues here.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43646 is the most representative anchor for this cluster because it is broader than the model-specific loading/config bugs and best matches the recurring v5 regression theme.", + "best_issue_reason": "issue:43873 is the best representative issue because it is broader than the highly model-specific reports and sits near the main runtime/offload cluster, even though the overall set does not form a tight duplicate group.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43504", "right": "issue:44610", "accept": false, - "reason": "Different failures: BEiT legacy-field loading vs OmDet processor/model image-size mismatch." + "reason": "Different models and failures: legacy field loading vs processor/model input-size mismatch." }, { "left": "issue:43278", "right": "issue:43688", "accept": false, - "reason": "Both mention dtype/normalization issues, but one is an eval dtype regression and the other is auxiliary-loss normalization in MoE models." + "reason": "One is a dtype regression in evaluation; the other is auxiliary-loss normalization. Same training stack, different bug." }, { "left": "issue:43334", "right": "issue:43572", "accept": false, - "reason": "Different config fields and models: Qwen3-VL pad_token_id vs StableLm pad_token_idx." + "reason": "Both are missing config attributes, but on different models and different fields, so not the same bug." }, { "left": "issue:43525", "right": "issue:43572", "accept": false, - "reason": "Both are missing config attributes, but they affect different model families and different fields." + "reason": "Same broad symptom class, but different config attributes and model families." }, { "left": "issue:43756", "right": "issue:44913", "accept": false, - "reason": "Smollm3 RoPE-layer dropping is unrelated to GPTNeoX rotary_pct reload behavior." + "reason": "Both involve config persistence, but one is RoPE layer drops and the other is rotary_pct reload; different code paths." }, { "left": "issue:43540", "right": "issue:43716", "accept": false, - "reason": "Qwen3OmniMoe video-input processing error is not the same as Mistral-3 image-preprocessor dtype mismatch." + "reason": "Different modalities and root causes: video processing error vs image preprocessor/model dtype mismatch." }, { "left": "issue:43493", "right": "issue:43825", "accept": false, - "reason": "SigLIP2 implementation mismatch is unrelated to pipeline translation-task messaging in v5." + "reason": "SigLIP2 implementation discrepancy is unrelated to a pipeline error-message regression." }, { "left": "issue:43742", "right": "issue:44821", "accept": false, - "reason": "MobileLLM key error and AutoImageProcessor URL loading failure are different code paths and model types." + "reason": "Both are loading-related, but one is a model key error and the other is URL image-processor loading." }, { "left": "issue:45200", "right": "issue:45229", "accept": false, - "reason": "Gemma 4 text-only fine-tuning defaults are unrelated to Gemma 4 multi-GPU inference OOM." + "reason": "Same Gemma4 family, but one is a training label/mask config issue and the other is multi-GPU inference OOM." }, { "left": "issue:33666", "right": "issue:36010", "accept": false, - "reason": "Multi-GPU training for Qwen2-VL is unrelated to an import error for GenerationMixin." + "reason": "Multi-GPU training and a GenerationMixin import failure are unrelated." }, { "left": "issue:43720", "right": "issue:43927", "accept": false, - "reason": "Packed-weight unpacking during accelerate loading is not the same bug as DiaConfig losing custom token IDs on save/load." + "reason": "Packed-weight unpacking during accelerate loading is unrelated to DiaConfig token-ID persistence." }, { "left": "issue:43606", "right": "issue:43927", "accept": false, - "reason": "CPU offload device mismatch and config token-ID loss are separate failures." + "reason": "CPU offload device mismatch and config token-ID loss are different failure modes." }, { "left": "issue:43688", "right": "issue:43873", "accept": false, - "reason": "Aux-loss normalization in MoE models is unrelated to quantization/offloading behavior." + "reason": "Aux-loss normalization and quantized offloading are separate bugs." }, { "left": "issue:43257", "right": "issue:43299", "accept": false, - "reason": "Both involve Qwen3 MoE loading, but one is accelerate+deepspeed conversion and the other is a v5 loading regression; not the same concrete bug." + "reason": "Both mention Qwen3 MOE, but one is weight conversion during accelerated loading and the other is a dev-regression loading failure." }, { "left": "issue:43278", "right": "issue:43844", "accept": false, - "reason": "Embedding dtype drift in eval is unrelated to ZeRO-3 gradient inflation." + "reason": "Evaluation dtype change and ZeRO-3 gradient growth are different issues." }, { - "left": "issue:44488", - "right": "issue:44843", + "left": "issue:43540", + "right": "issue:44977", "accept": false, - "reason": "Model-load failure for cjvt/sleng-bert is unrelated to offline tokenizer hub access in _patch_mistral_regex." + "reason": "Different Qwen variants and different symptoms: video input processing vs flash-attention generation behavior." }, { - "left": "issue:43540", - "right": "issue:44977", + "left": "issue:44488", + "right": "issue:44843", "accept": false, - "reason": "Video-input processing error in Qwen3OmniMoe is not the same as flash-attention generation issues in Qwen3.5." + "reason": "Model load failure and offline-network behavior in tokenizer patching are not the same bug." }, { "left": "issue:43720", "right": "issue:44361", "accept": false, - "reason": "Both are loader-related, but packed-weight unpacking and MLukeTokenizer task AttributeError are distinct bugs." + "reason": "Packed-weight loading issue vs tokenizer AttributeError on tasks; no shared code path." }, { "left": "issue:39692", "right": "issue:43493", "accept": false, - "reason": "SigLIP2 documentation issues are not the same as SigLIP2 implementation-vs-JAX discrepancies." + "reason": "Docs example errors are distinct from a backend implementation discrepancy." }, { "left": "issue:44964", "right": "issue:45405", "accept": false, - "reason": "Phi-4 multimodal load failure and PEFT minimum-version bump are unrelated." + "reason": "Model-loading regression is unrelated to the PEFT version bump issue." }, { "left": "issue:43493", "right": "issue:43844", "accept": false, - "reason": "SigLIP2 model mismatch and ZeRO-3 gradient instability are different problem classes." + "reason": "SigLIP2 accuracy/discrepancy bug and ZeRO-3 gradient amplification are unrelated." }, { "left": "issue:33666", "right": "issue:36331", "accept": false, - "reason": "Multi-GPU training setup for Qwen2-VL is unrelated to the CustomTrainer.compute_loss signature break." + "reason": "Multi-GPU training and CustomTrainer signature breakage are different problems." }, { "left": "issue:43756", "right": "issue:44060", "accept": false, - "reason": "Smollm3 RoPE-layer count bug is unrelated to Qwen3-Next tied-weights warnings." + "reason": "RoPE-layer drop and tied-weights warning are distinct model-specific issues." }, { "left": "issue:44589", "right": "issue:45520", "accept": false, - "reason": "Float8 storage lookup failure and flash_attn KeyError on Python 3.13 are different import/runtime issues." + "reason": "Different runtime failures: missing Float8 storage type vs flash_attn import key error." }, { "left": "issue:44625", @@ -6052,37 +6082,37 @@ "left": "issue:44521", "right": "issue:45290", "accept": false, - "reason": "All-zero assistant masks for multimodal inputs is not the same as tool-call assistant-message crash." + "reason": "Both hit apply_chat_template, but one is assistant-mask generation and the other is tool-call handling; different bugs." }, { "left": "issue:43646", "right": "issue:44466", "accept": false, - "reason": "Generic custom model initialization regression is broader, but lm_head.weight serialization/device-dependent tied-weights behavior is a distinct bug." + "reason": "Custom model initialization regressions and lm_head serialization inconsistency are different v5 issues." }, { "left": "issue:44423", "right": "issue:44734", "accept": false, - "reason": "Continuous-batching multimodal crash and /v1/responses KV-cache indexing bug are different serving-path failures." + "reason": "Both are serving crashes, but one is multimodal tensor handling and the other is KV-cache continuation indexing." }, { "left": "issue:43824", "right": "issue:44589", "accept": false, - "reason": "Qwen2_5_VL import failure and Float8 storage lookup failure are unrelated." + "reason": "ImportError for a specific model class is unrelated to a Float8 storage lookup failure." }, { "left": "issue:43688", "right": "issue:43746", "accept": false, - "reason": "Aux-loss normalization in MoE models is unrelated to GraniteSpeech PEFT local-checkpoint loading." + "reason": "Aux-loss normalization and PEFT adapter checkpoint loading are unrelated." }, { "left": "issue:39692", "right": "issue:43295", "accept": false, - "reason": "SigLIP2 doc-example errors are unrelated to the v4.57.5 processor.tokenizer regression." + "reason": "Docs example mistakes do not match a regression in custom model code passing images to tokenizer." } ] }, @@ -6162,8 +6192,8 @@ "issue:43504|issue:43606", "issue:44464|issue:45584", "issue:43504|issue:43653", - "issue:43575|issue:45405", "issue:43927|issue:44361", + "issue:43575|issue:45405", "issue:43278|issue:43701", "issue:44792|issue:45405", "issue:43606|issue:44112", @@ -6184,203 +6214,206 @@ "issue:43295|issue:44610", "issue:45200|issue:45325" ], - "prepared_review_unit_hash": "af3b007793388969c289cd0c34340e224109487c74a3dc4865f23cb3ae9e06cf", + "prepared_review_unit_hash": "845590747411e7465b7b63f31889a8c9bd58757bd031ebabc45832c9547d6fcd", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 27, - "serialized_chars": 35862, - "estimated_input_tokens": 8966, - "estimated_eval_tokens": 18188 + "serialized_chars": 35860, + "estimated_input_tokens": 8965, + "estimated_eval_tokens": 18186 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "No soft pair looks like a true duplicate: the items mostly share only broad keywords such as model loading, tokenizer, generation, or dtype issues, but they describe different failure modes and code paths.", - "confidence": 0.62, - "canonical_issue_reason": "Issue 43957 is the broadest, most representative report in this set: it describes a general model-loading regression affecting multiple models under transformers 5.x.", + "summary": "This cluster is mostly a grab bag of unrelated Transformers bugs across loading, tokenization, generation, dtype/device handling, and CI failures. I found one likely duplicate family around missing `pad_token_id` in config/load paths; the rest do not look mergeable as the same underlying bug.", + "confidence": 0.41, + "canonical_issue_reason": "Issue 43957 is the broadest and most representative loading-regression report in the set, centered on model instantiation with `torch.device('meta')` and a class of failures that affects multiple models rather than a single model-specific edge case.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43957 is the best standalone representative because it captures a wider cross-model regression than the more model-specific reports.", + "best_issue_reason": "Issue 43957 is the best overall issue candidate because it describes a general framework-level regression with wider applicability than the more model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:33357", "right": "issue:33666", "accept": false, - "reason": "Both mention model usage on specialized hardware/training, but one is a MacOS bus error for CLIP and the other is a Qwen2-VL multi-GPU training request; different bugs and workflows." + "reason": "Both are model-related failures, but one is a MacOS bus error for CLIP and the other is a Qwen2-VL multi-GPU training issue; different symptoms and code paths." }, { "left": "issue:43572", "right": "issue:44291", "accept": false, - "reason": "One is about missing pad_token_idx after a 5.0 update; the other is an init_empty_weights/_is_hf_initialized argument error. Related release context, but not the same bug." + "reason": "Both involve load/init regressions, but one is about `pad_token_idx` in `StableLmConfig` and the other about an unexpected `_is_hf_initialized` argument in `init_empty_weights`; different underlying bugs." }, { "left": "issue:43994", "right": "issue:44493", "accept": false, - "reason": "Both involve wrong outputs, but one is SigLIP2 nonsense results and the other is unexpected position-id keys across many models; different root causes." + "reason": "One is a SigLIP2 inference quality problem, the other is a position-id key warning across many models; too different to be the same bug." }, { "left": "issue:43504", "right": "issue:43606", "accept": false, - "reason": "Beit pretrained loading with a legacy field is unrelated to a Bark CPU offload device mismatch." + "reason": "A legacy-field pretrained-model load failure is not the same as a CPU offload device mismatch in `bark-small`." }, { "left": "issue:44464", "right": "issue:45584", "accept": false, - "reason": "Chunked generation with compiled forward is a generation-path consistency issue; Whisper empty transcription after align_special_tokens is a tokenizer/special-token edge case." + "reason": "Chunked generation with compiled forward and empty-transcription Whisper generation after token alignment are unrelated generation bugs." }, { "left": "issue:43504", "right": "issue:43653", "accept": false, - "reason": "A legacy-field load failure in BEiT is unrelated to BigBirdTokenizer mask-token registration and decode behavior." + "reason": "Legacy-field loading for BEiT semantic segmentation and BigBirdTokenizer special-token registration are different failure modes." }, { - "left": "issue:43575", - "right": "issue:45405", + "left": "issue:43927", + "right": "issue:44361", "accept": false, - "reason": "Qwen2-57B-A14B-Instruct TP OOM and MIN_PEFT_VERSION being bumped to an unreleased version are unrelated." + "reason": "DiaConfig save/load token-id corruption causing generation errors is not the same as MLukeTokenizer attribute failures on tasks." }, { - "left": "issue:43927", - "right": "issue:44361", + "left": "issue:43575", + "right": "issue:45405", "accept": false, - "reason": "DiaConfig losing custom token IDs after save/load is a config persistence bug; MLukeTokenizer failing on tasks is a tokenizer/task-specific AttributeError." + "reason": "TP OOM on a large Qwen2 model and an unreleased PEFT version bump are unrelated." }, { "left": "issue:43278", "right": "issue:43701", "accept": false, - "reason": "BF16-to-FP32 embedding dtype drift and resume_from_checkpoint key mismatch are different training-state issues." + "reason": "BF16-to-FP32 embedding dtype drift in evaluate is unrelated to resume checkpoint key mismatches." }, { "left": "issue:44792", "right": "issue:45405", "accept": false, - "reason": "A janus image-generation test failure is unrelated to a dependency version bump issue." + "reason": "A Janus image-generation test failure does not match a dependency-version issue." }, { "left": "issue:43606", "right": "issue:44112", "accept": false, - "reason": "Both are CI/device-related, but Bark CPU offload mismatch and GraniteSpeech stale device override are distinct bugs." + "reason": "Both mention device handling, but one is a real CPU offload mismatch and the other is a stale CI test in GraniteSpeech; not the same concrete bug." }, { "left": "issue:29942", "right": "issue:30333", "accept": false, - "reason": "Failing Flash Attention 2 tests and MLFlow not reporting failed jobs are unrelated." + "reason": "Flash Attention 2 test failures and MLFlow reporting on failed training jobs are unrelated." }, { "left": "issue:15354", "right": "issue:38175", "accept": false, - "reason": "GeneratorExp/JIT scripting incompatibility is unrelated to SigLIP2 zero-probability outputs." + "reason": "TorchScript export support for generator expressions and SigLIP2 zero-probability outputs are unrelated model issues." }, { "left": "issue:44554", "right": "issue:45446", "accept": false, - "reason": "An MPS attention correctness bug with mismatched head dims is unrelated to a PyTorch version check in flex_attention." + "reason": "An MPS attention correctness issue is not the same as a PyTorch version-check bug in flex_attention imports." }, { "left": "issue:43540", "right": "issue:45200", "accept": false, - "reason": "Qwen3OmniMoe video-input validation and Gemma 4 mm_token_type_ids defaults are separate multimodal input bugs." + "reason": "Both are multimodal-input problems, but one is video processing in Qwen3OmniMoe and the other is Gemma 4 text-only fine-tuning token-type defaults; different bugs." }, { "left": "issue:43232", "right": "issue:44849", "accept": false, - "reason": "A generation-state update bug after sync_gpus is not the same as Qwen3.5 output_hidden_states behavior." + "reason": "Generation kwargs syncing after `sync_gpus` and Qwen3.5 `output_hidden_states` behavior are separate generation-path issues." }, { "left": "issue:45084", "right": "issue:45520", "accept": false, - "reason": "Template-node compilation failure and flash_attn import KeyError on Python 3.13 are unrelated failures." + "reason": "Template-node compilation errors and Python 3.13 `flash_attn` import mapping errors are not the same bug." }, { "left": "issue:43844", "right": "issue:44964", "accept": false, - "reason": "ZeRO-3 gradient growth with DeepSpeed is unrelated to Phi-4 multimodal model loading failures." + "reason": "ZeRO-3 gradient growth during training and inability to load Phi-4 multimodal are unrelated." }, { "left": "issue:39401", "right": "issue:44568", "accept": false, - "reason": "Qwen3 tokenizer offset mapping is a tokenizer alignment bug; mdeberta add_special_tokens not adding BOS/EOS is a different special-token handling issue." + "reason": "Wrong offset mapping in Qwen3 tokenizer and missing BOS/EOS insertion in mDeBERTa are different tokenizer behaviors." }, { "left": "issue:43582", "right": "issue:44977", "accept": false, - "reason": "Apple Silicon caching_allocator_warmup TypeError and Qwen3.5 flash-attention generation are different runtime bugs." + "reason": "Apple Silicon allocator warmup TypeError and Qwen3.5 flash-attention generation problems are unrelated." }, { "left": "issue:45072", "right": "issue:45198", "accept": false, - "reason": "Both are inference issues, but dtype mismatches in bfloat16 models and Wav2Vec2 save_pretrained/tokenization failures are not the same code path." + "reason": "bfloat16 inference dtype mismatches in SwitchTransformers/TimmWrapperModel are not the same as Wav2Vec2 save/tokenization failures." }, { "left": "issue:44315", "right": "issue:44849", "accept": false, - "reason": "Liger Kernel not applied with model_init is about trainer/model initialization hooks, not Qwen3.5 hidden-state outputs." + "reason": "Liger Kernel application during `model_init` and Qwen3.5 hidden-state output behavior are different concerns." }, { "left": "issue:37428", "right": "issue:43957", "accept": false, - "reason": "Flash-support import failure and meta-device model-loading regressions are both loading-related but clearly separate errors." + "reason": "A missing flash-attention utility import and meta-device model loading failures are both loading-related, but they are distinct code paths and failure causes." }, { "left": "issue:44554", "right": "issue:45356", "accept": false, - "reason": "An MPS attention correctness issue is unrelated to Kimi-K2.5 tokenizer codec/reg-ex warnings." + "reason": "MPS attention correctness and Kimi-K2.5 tokenizer codec regression are unrelated." }, { "left": "issue:43334", "right": "issue:43525", - "accept": false, - "reason": "Qwen3-VL missing pad_token_id and Llama4Config missing pad_token_id are similar symptomatically, but they affect different model families and likely different config paths." + "accept": true, + "reason": "Both report `AttributeError` for missing `pad_token_id` on model configs during loading, with the same underlying config-attribute regression pattern." }, { "left": "issue:43295", "right": "issue:44610", "accept": false, - "reason": "Processor.tokenizer regression with images is unrelated to OmDet-Turbo processor image-size mismatch." + "reason": "Processor/tokenizer API regression and OmDet-Turbo processor output-size mismatch are different multimodal issues." }, { "left": "issue:45200", "right": "issue:45325", "accept": false, - "reason": "Gemma 4 token-type-id defaults and Qwen2.5-VL rope-index scaling are different multimodal bookkeeping bugs." + "reason": "Gemma 4 token-type-id defaults and Qwen2.5-VL rope-index scaling are different multimodal modeling bugs." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is grounded in the packet, and the only accepted soft-edge pair (issue:43334 / issue:43525) is a conservative duplicate call on the same missing `pad_token_id` config-attribute regression. The other rejections stay within the evidence and do not overstate sameness across clearly different bug classes." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -6462,30 +6495,30 @@ "issue:43295|issue:43450", "issue:43295|issue:43873", "issue:44442|issue:45198", - "issue:44360|issue:44561", "issue:38175|issue:45412", + "issue:44360|issue:44561", "issue:43295|issue:43994", "issue:43122|issue:44849", - "issue:44206|issue:45584", "issue:42915|issue:43824", + "issue:44206|issue:45584", "issue:43278|issue:43856", "issue:38175|issue:43493", "issue:43827|issue:43901", "issue:44568|issue:44991", - "issue:43450|issue:43493", "issue:43653|issue:44112", + "issue:43450|issue:43493", "issue:45290|issue:45381", "issue:45127|issue:45245", "issue:43819|issue:45083" ], - "prepared_review_unit_hash": "b47851ceb17d1d6c19f013525e704552c6d07a8d46652b1373a1677b13051bab", + "prepared_review_unit_hash": "ab5fea0c763f497cc2acd3e576eaf1bfb984d2ebd35c6de0b72f5ebcee5097cf", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, @@ -6500,192 +6533,192 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is a loose collection of unrelated Transformers issues rather than one duplicate cluster. The soft pairs mostly share broad subsystems or model families, but not the same concrete bug/change, so all soft edges should be rejected.", - "confidence": 0.97, - "canonical_issue_reason": "issue:43824 is the best canonical anchor here: it is a concrete, reproducible import regression with a clear failure mode and strong user-facing impact, and it has more discussion/reference signal than most others.", + "summary": "Most pairs are loose semantic matches, not true duplicates. The only near-family is around multimodal/chat-template regressions, but the concrete failure modes still differ, so I would not merge any of the soft edges.", + "confidence": 0.26, + "canonical_issue_reason": "Issue 44521 is the broadest and most central open report in the multimodal `apply_chat_template` area, with multiple inbound references; it is the best single representative if one issue must be chosen.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43824 is the strongest representative issue in this mixed set because it is self-contained, clearly actionable, and the most clearly described regression.", + "best_issue_reason": "Issue 44521 has the clearest user-facing regression, the widest scope within the closest issue family, and active references, so it is the strongest global issue to keep as the representative node.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44466", "right": "issue:45127", "accept": false, - "reason": "Different bugs: tied-weight serialization inconsistency vs LoRA merge collapse after vocab extension." + "reason": "Different bugs: serialization/tied-weights handling vs LoRA merge collapse after vocab extension." }, { "left": "issue:44898", "right": "issue:45412", "accept": false, - "reason": "Different model paths and symptoms: Perceiver resolution failure vs RT-DETR memory leak." + "reason": "Unrelated subsystems: Perceiver image resolution handling vs RT-DETR model memory release." }, { "left": "issue:43493", "right": "issue:43701", "accept": false, - "reason": "Same repo area but unrelated failures: SigLIP2 output mismatch vs resume checkpoint key mismatch." + "reason": "SigLIP2 implementation mismatch and a checkpoint key mismatch are distinct failures." }, { "left": "issue:43873", "right": "issue:44464", "accept": false, - "reason": "Offloading/quantization behavior is unrelated to compiled-forward chunked generation inconsistency." + "reason": "Quantization/offloading behavior and compiled chunked generation inconsistency are different code paths." }, { "left": "issue:44792", "right": "issue:45563", "accept": false, - "reason": "Janus test failure and stale generate warning are different issues and likely different code paths." + "reason": "Janus image-generation test failure is not the same as a paged-generate warning regression." }, { "left": "issue:44514", "right": "issue:44521", "accept": false, - "reason": "Both touch apply_chat_template, but one crashes on batched padding=False while the other returns all-zero assistant masks." + "reason": "Both involve `apply_chat_template`, but one crashes on batched padding=False input while the other produces wrong assistant masks." }, { "left": "issue:37428", "right": "issue:43824", "accept": false, - "reason": "Both are ImportErrors, but for different missing symbols and different code paths." + "reason": "Both are import errors, but they expose different missing symbols and different model entry points." }, { "left": "issue:43388", "right": "issue:43526", "accept": false, - "reason": "Trainer metric gathering bug and BeitImageProcessorFast label reduction bug are distinct." + "reason": "Different label-handling bugs in training metrics vs image processor label reduction." }, { "left": "issue:43572", "right": "issue:44987", "accept": false, - "reason": "Missing config field after v5 update is not the same as a model-loading failure for a remote model." + "reason": "A missing config field after v5 update is not the same as a model load failure for a specific repo." }, { "left": "issue:43844", "right": "issue:44936", "accept": false, - "reason": "ZeRO-3 gradient growth and trainer.evaluate() failing after train are different training behaviors." + "reason": "Training instability with ZeRO-3 and post-train evaluate failure are unrelated symptoms." }, { "left": "issue:43572", "right": "issue:44589", "accept": false, - "reason": "Config API regression vs Float8 storage lookup error; no shared concrete bug." + "reason": "Missing `pad_token_idx` config and Float8 storage lookup errors are unrelated regressions." }, { "left": "issue:43824", "right": "issue:43975", "accept": false, - "reason": "Both involve v5 regressions, but one is an import failure and the other is incorrect detokenization." + "reason": "Different breakages: missing model export vs incorrect detokenization behavior." }, { "left": "issue:43295", "right": "issue:43450", "accept": false, - "reason": "Custom processor/tokenizer crash and batched video shape bug are different multimodal regressions." + "reason": "Processor/tokenizer regression and batched video shape bug are separate processor issues." }, { "left": "issue:43295", "right": "issue:43873", "accept": false, - "reason": "Processor API regression is unrelated to quantization/offloading behavior." + "reason": "Custom processor API regression is not the same as quantization offloading behavior." }, { "left": "issue:44442", "right": "issue:45198", "accept": false, - "reason": "Tokenizer load failure and save/tokenization failure are separate tokenizer issues." + "reason": "Tokenizer load failure and save_pretrained/tokenization failure are different concrete problems." }, { - "left": "issue:44360", - "right": "issue:44561", + "left": "issue:38175", + "right": "issue:45412", "accept": false, - "reason": "DSA indexing discussion is unrelated to removal of is_torch_fx_available breaking trust_remote_code." + "reason": "SigLIP2 zero probabilities and RT-DETR memory retention are unrelated model bugs." }, { - "left": "issue:38175", - "right": "issue:45412", + "left": "issue:44360", + "right": "issue:44561", "accept": false, - "reason": "SigLIP2 inference anomaly and RT-DETR memory reclamation bug are unrelated." + "reason": "A discussion about DSA indexing math is not the same as a removed utility breaking trust_remote_code models." }, { "left": "issue:43295", "right": "issue:43994", "accept": false, - "reason": "Different models and failures: custom multimodal processor regression vs SigLIP2 nonsensical outputs." + "reason": "Processor/tokenizer regression vs SigLIP2 nonsensical outputs are different bugs." }, { "left": "issue:43122", "right": "issue:44849", "accept": false, - "reason": "Tokenizer version drift and Qwen3.5 hidden-state bug are unrelated." + "reason": "Tokenization drift across versions and Qwen3.5 hidden-state bug are unrelated." }, { - "left": "issue:44206", - "right": "issue:45584", + "left": "issue:42915", + "right": "issue:43824", "accept": false, - "reason": "Unsupported feature-extractor arg crash and Whisper empty-transcription failure are different bugs." + "reason": "Qwen3 MoE FP8 config failure is unrelated to a missing Qwen2.5-VL import export." }, { - "left": "issue:42915", - "right": "issue:43824", + "left": "issue:44206", + "right": "issue:45584", "accept": false, - "reason": "Qwen3Moe FP8 config failure is unrelated to a missing exported VL class." + "reason": "Unsupported feature-arg crash and Whisper empty-transcription generation failure are different paths." }, { "left": "issue:43278", "right": "issue:43856", "accept": false, - "reason": "Eval dtype change and MoE memory usage are different training-time issues." + "reason": "Dtype drift between train/eval and MoE memory usage are not the same bug." }, { "left": "issue:38175", "right": "issue:43493", "accept": false, - "reason": "Same model family, but zero probabilities and JAX parity discrepancy are not the same concrete failure." + "reason": "Both are SigLIP2-related but one is incorrect probabilities and the other is HF-vs-JAX discrepancy." }, { "left": "issue:43827", "right": "issue:43901", "accept": false, - "reason": "Both are docs-only regressions, but they refer to different pages and different outdated API mentions." + "reason": "Same general docs cleanup theme, but they target different pages and deprecated behaviors." }, { "left": "issue:44568", "right": "issue:44991", "accept": false, - "reason": "Special-token handling bug and tokenizer loading failure are distinct." + "reason": "Tokenizer special-token behavior and a specific tokenizer load failure are different issues." }, { - "left": "issue:43450", - "right": "issue:43493", + "left": "issue:43653", + "right": "issue:44112", "accept": false, - "reason": "Batched video shape bug is unrelated to SigLIP2 implementation discrepancy." + "reason": "BigBird tokenizer special-token registration and a GraniteSpeech CI test are unrelated." }, { - "left": "issue:43653", - "right": "issue:44112", + "left": "issue:43450", + "right": "issue:43493", "accept": false, - "reason": "BigBirdTokenizer special-token registration and GraniteSpeech CI device override are unrelated." + "reason": "Video batch shape handling and SigLIP2 output discrepancy are different subsystems and failures." }, { "left": "issue:45290", "right": "issue:45381", "accept": false, - "reason": "Chat-template crash with tool-call assistant messages is unrelated to Qwen2.5-VL video vision_position_ids." + "reason": "Chat-template tool-call crash and Qwen2.5-VL vision_position_ids bug are different multimodal regressions." }, { "left": "issue:45127", "right": "issue:45245", "accept": false, - "reason": "LoRA/tied-weights collapse is unrelated to the 2^24 category limit runtime error." + "reason": "LoRA/tied-weights collapse is unrelated to the category-count runtime error." }, { "left": "issue:43819", "right": "issue:45083", "accept": false, - "reason": "DAC latent/forward mismatch and qwen3_omni_moe feature-length helper behavior are different code paths." + "reason": "DAC latent reconstruction mismatch and qwen3_omni_moe feature-length helper behavior are not the same bug." } ] }, @@ -6761,16 +6794,16 @@ "soft_pairs": [ "issue:44462|issue:44561", "issue:44561|issue:44779", - "issue:45200|issue:45588", "issue:43450|issue:43575", + "issue:45200|issue:45588", "issue:44610|issue:44857", - "issue:44857|issue:45593", "issue:37428|issue:43425", + "issue:44857|issue:45593", "issue:43262|issue:43450", "issue:44448|issue:44743", "issue:43819|issue:44112", - "issue:45310|issue:45478", "issue:43606|issue:45083", + "issue:45310|issue:45478", "issue:43278|issue:43825", "issue:29942|issue:32090", "issue:37428|issue:44351", @@ -6795,247 +6828,253 @@ "issue:43335|issue:45072", "issue:43723|issue:44554" ], - "prepared_review_unit_hash": "135b3d7fbdc3571120a3e7aefe95004d0a76a1e1ff2c72366e962c43fe381b34", + "prepared_review_unit_hash": "8e277924137ff59273149621282805002ad76bffcb7fe962568ea6d05abcbc21", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 35, - "serialized_chars": 38239, - "estimated_input_tokens": 9560, - "estimated_eval_tokens": 19376 + "serialized_chars": 38242, + "estimated_input_tokens": 9561, + "estimated_eval_tokens": 19378 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly heterogeneous v5/model-specific regressions that should not be merged. I found one clear duplicate pair: the two identical Qwen3.5 MoE `from_pretrained` error reports. No PRs are present in the cluster.", - "confidence": 0.78, - "canonical_issue_reason": "issue:45310 is the clearest canonical issue because it is a concrete, well-scoped bug report and has an exact duplicate in the cluster (`issue:45478`) with the same title/failure mode.", + "summary": "The cluster is mostly a grab-bag of unrelated Transformers bug reports. The only likely duplicate pair is the Qwen3.5 MoE from_pretrained report (45310/45478), which has the same title and clearly the same failure class; the earlier report is the better canonical item.", + "confidence": 0.92, + "canonical_issue_reason": "issue:45310 is the best canonical issue: it is the earlier Qwen3.5 MoE from_pretrained report, has inbound reference activity, and issue:45478 appears to be a near-identical later duplicate based on the matching title and failure mode.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45310 is the best representative issue in this cluster: it is specific, reproducible, and duplicated verbatim by another issue, whereas most other items are unrelated regressions or generic v5 breakages.", + "best_issue_reason": "issue:45310 is the strongest representative because it is the earlier report of the likely duplicate pair and captures the core Qwen3.5 MoE from_pretrained regression.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44462", "right": "issue:44561", "accept": false, - "reason": "Both are v5-era failures, but one is tokenizer file selection and the other is a removed utility breaking trust_remote_code model loading; different root causes." + "reason": "Tokenizer.json loading behavior and a removed helper breaking trust_remote_code are different bugs." }, { "left": "issue:44561", "right": "issue:44779", "accept": false, - "reason": "Remote-code loading breakage and DeepSeek tokenizer output regression are separate tokenizer/model-loading bugs." + "reason": "Both are v5 regressions, but one is an API removal and the other is a tokenizer correctness issue." }, { - "left": "issue:45200", - "right": "issue:45588", + "left": "issue:43450", + "right": "issue:43575", "accept": false, - "reason": "Gemma4 text-only token-id defaults and flash-attention sink-less crash are unrelated code paths." + "reason": "Batched video shape bug and Qwen2 TP load OOM are unrelated failures." }, { - "left": "issue:43450", - "right": "issue:43575", + "left": "issue:45200", + "right": "issue:45588", "accept": false, - "reason": "Video processor batching shape bug and Qwen TP OOM are different subsystems and failure modes." + "reason": "Gemma4 mm_token_type_ids defaults and flash_attention s_aux handling are different code paths." }, { "left": "issue:44610", "right": "issue:44857", "accept": false, - "reason": "OmDet preprocessing size mismatch and LwDetr AMP/CUDA loss crash are different vision bugs." + "reason": "OmDet-Turbo input-size mismatch is unrelated to the LwDetrImageLoss AMP/CUDA crash." }, { - "left": "issue:44857", - "right": "issue:45593", + "left": "issue:37428", + "right": "issue:43425", "accept": false, - "reason": "Float16 loss crash and missing auxiliary losses when denoising is off are unrelated training issues." + "reason": "Flash-attention import error and general Torch 2.10 incompatibility are too broad and distinct." }, { - "left": "issue:37428", - "right": "issue:43425", + "left": "issue:44857", + "right": "issue:45593", "accept": false, - "reason": "A missing flash-attention import symbol is not the same concrete bug as broad Torch 2.10 incompatibility." + "reason": "Different symptoms in different models: AMP/CUDA loss crash versus missing auxiliary losses." }, { "left": "issue:43262", "right": "issue:43450", "accept": false, - "reason": "Audio chat-template sampling-rate defaults and video batch-shape handling are different processor bugs." + "reason": "Audio chat-template sampling-rate default and video batching shape bug are separate issues." }, { "left": "issue:44448", "right": "issue:44743", "accept": false, - "reason": "Pegasus v4/v5 output drift and Qwen3.5 recurrent-state reset are unrelated model-specific regressions." + "reason": "Pegasus output drift and Qwen3.5 recurrent-state reset are unrelated model behaviors." }, { "left": "issue:43819", "right": "issue:44112", "accept": false, - "reason": "DAC forward/from_latents mismatch and GraniteSpeech CI device-override test failure are not the same bug." - }, - { - "left": "issue:45310", - "right": "issue:45478", - "accept": true, - "reason": "Same title, same model family, same `from_pretrained` failure; this is a clear duplicate report." + "reason": "DAC latent mismatch and a GraniteSpeech CI device-override failure are not the same bug." }, { "left": "issue:43606", "right": "issue:45083", "accept": false, - "reason": "CPU offload device mismatch in Bark and `_get_feat_extract_output_lengths` behavior in qwen3_omni_moe are unrelated." + "reason": "CPU offload device mismatch and qwen3_omni_moe helper behavior are different problems." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Near-identical Qwen3.5 MoE from_pretrained reports with the same title and failure mode; 45310 is earlier and the better duplicate target." }, { "left": "issue:43278", "right": "issue:43825", "accept": false, - "reason": "BF16-to-FP32 eval dtype drift and `pipeline()` translation-task messaging are different issues." + "reason": "Embedding dtype drift in evaluate and an inaccurate pipeline error message are different regressions." }, { "left": "issue:29942", "right": "issue:32090", "accept": false, - "reason": "Flash Attention 2 test failures and `_gpu_broadcast_one` NoneType TypeError are different failures." + "reason": "Flash Attention 2 test failures are not the same as a Trainer NoneType broadcast error." }, { "left": "issue:37428", "right": "issue:44351", "accept": false, - "reason": "These are both import errors, but for different missing symbols and different breakages." + "reason": "Both are import-related, but they concern different missing symbols and different regressions." }, { "left": "issue:39692", "right": "issue:43526", "accept": false, - "reason": "Documentation/example errors for SigLIP2 are unrelated to BeitImageProcessorFast `reduce_labels` behavior." + "reason": "SigLIP2 docs/example errors and BEiT reduce_labels behavior are unrelated." }, { "left": "issue:43611", "right": "issue:43931", "accept": false, - "reason": "`base_model_prefix` loading regression and Qwen3-VL weight-shape mismatch are distinct model-loading problems." + "reason": "Broad loading regression and a Qwen3-VL weight-shape mismatch are not the same issue." }, { "left": "issue:43335", "right": "issue:43550", "accept": false, - "reason": "SwitchTransformers sparse-layer config bug and Bamba `torch.compile`/SDPA failure are different code paths." + "reason": "SwitchTransformers sparse-layer config bug and Bamba torch.compile SDPA failure are different bugs." }, { "left": "issue:43819", "right": "issue:44493", "accept": false, - "reason": "DAC latent conversion mismatch and widespread unexpected position-id keys are unrelated." + "reason": "DAC latent reconstruction and unexpected position-id keys are unrelated." }, { "left": "issue:45200", "right": "issue:45381", "accept": false, - "reason": "Both concern multimodal ids, but Gemma4 `mm_token_type_ids` defaults and Qwen2.5-VL video position ids are separate bugs." + "reason": "Missing mm_token_type_ids defaults and wrong vision_position_ids for video input are separate multimodal bugs." }, { "left": "issue:43450", "right": "issue:43526", "accept": false, - "reason": "Video batching shape handling and Beit fast processor `reduce_labels` are different processor defects." + "reason": "Video batching shape bug and BEiT label reduction bug affect different processor logic." }, { "left": "issue:39692", "right": "issue:44493", "accept": false, - "reason": "A SigLIP2 docs/example report is not the same as a generic position-id key regression across many models." + "reason": "SigLIP2 docs/example issues and widespread position-id key warnings are different regression families." }, { "left": "issue:44855", "right": "issue:45083", "accept": false, - "reason": "Python 3.13 `@torch.jit.script` parsing/indentation failure and qwen3_omni_moe helper behavior are unrelated." + "reason": "Python 3.13 import failure and qwen3_omni_moe feature-length behavior are unrelated." }, { "left": "issue:44448", "right": "issue:44779", "accept": false, - "reason": "Both mention v4/v5 output changes, but Pegasus generation drift and DeepSeek tokenizer regression are different underlying bugs." + "reason": "Pegasus output differences and Deepseek tokenizer regression are different model families and symptoms." }, { "left": "issue:29127", "right": "issue:32090", "accept": false, - "reason": "LayoutLMv3 error-message clarity and Trainer `_gpu_broadcast_one` NoneType crash are unrelated." + "reason": "LayoutLMv3 error-message clarity and Trainer GPU broadcast NoneType failure are unrelated." }, { "left": "issue:43335", "right": "issue:43653", "accept": false, - "reason": "SwitchTransformers config and BigBirdTokenizer special-token registration are different model/tokenizer bugs." + "reason": "SwitchTransformers sparse-layer bug and BigBirdTokenizer special-token registration bug are separate issues." }, { "left": "issue:38175", "right": "issue:44493", "accept": false, - "reason": "SigLIP2 zero-probabilities and position-id key warnings are different symptoms with different likely causes." + "reason": "SigLIP2 zero-probabilities and unexpected position-id keys are not the same bug." }, { "left": "issue:44977", "right": "issue:45229", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug and Gemma4 multi-GPU OOM are unrelated." + "reason": "Qwen3.5 flash-attention generation bug and Gemma4 CUDA OOM are distinct failures." }, { "left": "issue:44292", "right": "issue:45520", "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime failure and Python 3.13 flash_attn import key error are different problems." + "reason": "Qwen-3-8B-NVFP4 runtime error and flash_attn import KeyError on Python 3.13 are unrelated." }, { "left": "issue:43335", "right": "issue:45083", "accept": false, - "reason": "SwitchTransformers sparse-layer config and qwen3_omni_moe feature-length helper behavior are not the same bug." + "reason": "SwitchTransformers config handling and qwen3_omni_moe helper behavior are different code paths." }, { "left": "issue:43335", "right": "issue:43504", "accept": false, - "reason": "Sparse-layer creation in SwitchTransformers and BEiT legacy-field loading failure are unrelated." + "reason": "SwitchTransformers sparse-layer bug and BEiT pretrained-loading legacy-field bug are unrelated." }, { "left": "issue:43550", "right": "issue:44112", "accept": false, - "reason": "Bamba SDPA/compile failure and GraniteSpeech stale CI test are not the same underlying issue." + "reason": "Bamba SDPA failure and GraniteSpeech CI override failure are different problems." }, { "left": "issue:44568", "right": "issue:45491", "accept": false, - "reason": "Tokenizer special-token insertion and Gemma3 NaN embeddings under sliding-window attention are different issues." + "reason": "Tokenizer BOS/EOS handling and Gemma3 mixed-length NaN embeddings are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "SwitchTransformers config edge case and bfloat16 dtype mismatch in inference are not the same issue." }, { "left": "issue:43723", "right": "issue:44554", "accept": false, - "reason": "AutoTokenizer loading regression and MPS attention correctness issue are unrelated." + "reason": "AutoTokenizer loading in v5 and an MPS attention correctness bug are unrelated." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet: the cluster is indeed issue-only, and the one duplicate pair (issue:45310 / issue:45478) is a clear match by title and failure mode. The soft-edge rejections are conservative and do not overclaim equivalence, so I don\u2019t see evidence of an overstated mergeability verdict." + "accept": false, + "feedback": "The cluster-level conclusion is mostly plausible, but the justification overstates the packet evidence. The 45310/45478 pair is supported as a likely duplicate because the titles are identical and 45310 is earlier with inbound references, but the response claims the two reports match \"exactly\" in problem description/body content, which is not visible in the packet. Please tone that down to a conservative \"near-identical / likely duplicate\" assessment. The other soft-edge rejections look appropriately conservative." }, "evaluator_used": true, - "retried": false, + "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -7118,128 +7157,128 @@ "issue:43873|issue:44493", "issue:45372|issue:45406", "issue:43701|issue:45083", - "issue:43976|issue:45440", "issue:29942|issue:35141", + "issue:43976|issue:45440", "issue:45245|issue:45563", "issue:43550|issue:45083", "issue:41720|issue:45229", "issue:43278|issue:43329", "issue:32090|issue:33357", "issue:45372|issue:45459", - "issue:44451|issue:45520", "issue:44448|issue:44466", + "issue:44451|issue:45520", "issue:43065|issue:43994", "issue:43065|issue:45412", "issue:25251|issue:32090", "issue:43653|issue:44743", "issue:44857|issue:45588" ], - "prepared_review_unit_hash": "784d466bd7b168a4f8d2a829a66036c8814c72bd2546f9974e025738aeea3615", + "prepared_review_unit_hash": "e5c71020f8f662a85e731491b8c89e5900e3beb63b8605e0b0e60620bc6f9f50", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 31, - "serialized_chars": 36492, - "estimated_input_tokens": 9123, - "estimated_eval_tokens": 18502 + "serialized_chars": 36497, + "estimated_input_tokens": 9125, + "estimated_eval_tokens": 18506 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: the soft pairs span unrelated bugs across loading, tokenization, multimodal processing, flash-attention, offloading, and CI. I rejected every soft edge as a duplicate false positive. With no PRs present, I selected the broadest active issue as the best issue representative.", - "confidence": 0.69, - "canonical_issue_reason": "Issue 43873 is the broadest and most active open report here, with multiple inbound references and discussion around offloading/quantization behavior, making it the least specific umbrella representative among these unrelated issues.", + "summary": "These items do not form a true duplicate cluster. The soft pairs mostly share only broad subsystem similarity (tokenizers, flash-attn, offloading, vision models, etc.) but describe different concrete bugs or regressions.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out: the set spans many unrelated bugs, so a representative issue would be misleading.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43873 is the best available issue candidate because it is open, actively discussed, and broadly scoped compared with the other more niche or closed reports.", + "best_issue_reason": "No global best issue candidate; none of the issues cleanly represents the others as the same underlying defect.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:40444", "right": "issue:43526", "accept": false, - "reason": "Different subsystems and failures: Qwen2.5-VL iterable dataset finetuning vs BEiT fast image processor label reduction." + "reason": "Both involve image processing, but one is a multimodal IterableDataset training failure and the other is a BEiT fast processor label-reduction bug." }, { "left": "issue:44448", "right": "issue:44977", "accept": false, - "reason": "Both mention output differences, but they concern different models and code paths: Pegasus v4/v5 mismatch vs Qwen3.5 flash-attention generation." + "reason": "Different models and symptoms: Pegasus v4/v5 output mismatch vs Qwen3.5 flash-attention generation failure." }, { "left": "issue:43335", "right": "issue:45593", "accept": false, - "reason": "Unrelated bugs in different models: SwitchTransformers sparse-layer config vs D-FINE auxiliary losses." + "reason": "Unrelated model bugs: SwitchTransformers sparse-layer config creation vs D-FINE auxiliary-loss handling." }, { "left": "issue:43720", "right": "issue:44112", "accept": false, - "reason": "Packed-weight loading in BitNet is unrelated to a stale GraniteSpeech CI device-override test." + "reason": "Both are CI failures, but the root causes differ: packed-weight loading vs a stale device-override test." }, { "left": "issue:43066", "right": "issue:44462", "accept": false, - "reason": "Both are tokenizer-related, but one is a decoder-type regression and the other is AutoTokenizer ignoring tokenizer.json; different underlying bugs." + "reason": "Both concern tokenizers, but one is decoder-type selection in v5 and the other is ignoring repository tokenizer.json." }, { "left": "issue:45372", "right": "issue:45588", "accept": false, - "reason": "Gemma 4 processor import failure and flash-attention s_aux=None crash are different failure modes in different code paths." + "reason": "Different Gemma/processor import issue versus a flash_attention.py None-handling crash." }, { "left": "issue:34689", "right": "issue:35141", "accept": false, - "reason": "Model loading failure for Llama 3.2 Vision is unrelated to post_init reinitializing embeddings after resize." + "reason": "Both are regressions, but one is model loading for Llama 3.2 Vision and the other is embedding reinitialization after resizing." }, { "left": "issue:44977", "right": "issue:45563", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug is unrelated to a stale warning about num_return_sequences." + "reason": "Qwen3.5 flash-attention generation bug is unrelated to the stale num_return_sequences warning in paged generate()." }, { "left": "issue:44857", "right": "issue:45072", "accept": false, - "reason": "Different numeric/runtime issues in different models: LwDetrImageLoss AMP crash vs dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "Different dtype-related failures in different models/code paths: LwDetr AMP crash vs SwitchTransformers/TimmWrapper bfloat16 mismatch." }, { "left": "issue:43606", "right": "issue:44857", "accept": false, - "reason": "CPU offload device mismatch for bark-small is not the same bug as the LwDetrImageLoss float16 CUDA crash." + "reason": "One is a CPU-offload device mismatch in bark-small; the other is a float16 AMP crash in LwDetrImageLoss." }, { "left": "issue:43412", "right": "issue:45588", "accept": false, - "reason": "Gemma3n ExecuTorch export problems are unrelated to an s_aux=None flash-attention crash." + "reason": "ExecuTorch export issues for Gemma3n are unrelated to flash_attention.py crashing on s_aux=None." }, { "left": "issue:43873", "right": "issue:45229", "accept": false, - "reason": "Quantization/offloading behavior and Gemma4 multi-GPU OOM are different issues, not one concrete code-path bug." + "reason": "Both involve memory/device constraints, but one is quantization offloading and the other is multi-GPU CUDA OOM for Gemma4." }, { "left": "issue:37428", "right": "issue:44987", "accept": false, - "reason": "The missing internal flash-attention import and loading physical-intelligence/fast may be adjacent, but the reports do not describe the same concrete bug." + "reason": "Both mention loading/import paths, but the first is a missing flash-attention helper import and the second is a broader loading failure for a model." }, { "left": "issue:43873", @@ -7251,103 +7290,103 @@ "left": "issue:45372", "right": "issue:45406", "accept": false, - "reason": "Both involve Gemma 4, but one is an external import failure in processor loading and the other is a missing _tokenizer attribute in serve; different root causes." + "reason": "Different Gemma4 failures: missing ReasoningEffort import vs transformers serve crashing on Gemma4Processor._tokenizer." }, { "left": "issue:43701", "right": "issue:45083", "accept": false, - "reason": "Checkpoint key mismatch is unrelated to the qwen3_omni_moe feature-extraction length helper bug." + "reason": "Resume-from-checkpoint key mismatch is unrelated to the qwen3_omni_moe feat-length helper bug." }, { - "left": "issue:43976", - "right": "issue:45440", + "left": "issue:29942", + "right": "issue:35141", "accept": false, - "reason": "Python version compatibility regression is unrelated to DeepseekV3MoE divergence from the remote implementation." + "reason": "Failing Flash Attention tests and embedding reinitialization after resizing are separate regressions." }, { - "left": "issue:29942", - "right": "issue:35141", + "left": "issue:43976", + "right": "issue:45440", "accept": false, - "reason": "Flash Attention 2 test failures are unrelated to embedding reinitialization after token resize." + "reason": "Python version compatibility problem vs DeepseekV3MoE implementation divergence; different defects." }, { "left": "issue:45245", "right": "issue:45563", "accept": false, - "reason": "Category-count runtime limit and a stale warning in paged generate() are not the same underlying problem." + "reason": "Category-count overflow and a stale generate warning are not the same bug." }, { "left": "issue:43550", "right": "issue:45083", "accept": false, - "reason": "Bamba SDPA torch.compile failure is unrelated to the qwen3_omni_moe feature-length helper behavior." + "reason": "torch.compile/SDPA failure in Bamba is unrelated to the qwen3_omni_moe helper-function issue." }, { "left": "issue:41720", "right": "issue:45229", "accept": false, - "reason": "Qwen3 auto device-mapping cudaErrorAssert and Gemma4 multi-GPU OOM are both large-model runtime issues, but not the same bug." + "reason": "Qwen3 auto-device-map cuda assert and Gemma4 multi-GPU OOM are different model/runtime problems." }, { "left": "issue:43278", "right": "issue:43329", "accept": false, - "reason": "Embedding dtype drift during evaluation is unrelated to undefined variables in multimodal token counting." + "reason": "Embedding dtype drift in evaluate is unrelated to the multimodal token-count helper using undefined video-branch variables." }, { "left": "issue:32090", "right": "issue:33357", "accept": false, - "reason": "Trainer broadcast TypeError and MacOS bus error on CLIP are unrelated failures." + "reason": "Trainer broadcast NoneType error and MacOS bus error with CLIP are distinct failures." }, { "left": "issue:45372", "right": "issue:45459", "accept": false, - "reason": "Gemma 4 processor import failure and tokenizer error masking from protobuf absence are different issues." + "reason": "Gemma4 processor import failure is unrelated to tokenizer errors being hidden by except import_protobuf_decode_error()." }, { - "left": "issue:44451", - "right": "issue:45520", + "left": "issue:44448", + "right": "issue:44466", "accept": false, - "reason": "Loading a specific model and a Python 3.13 flash_attn KeyError are unrelated." + "reason": "Pegasus output regression and lm_head serialization inconsistency are separate v4/v5 issues." }, { - "left": "issue:44448", - "right": "issue:44466", + "left": "issue:44451", + "right": "issue:45520", "accept": false, - "reason": "Both involve version-dependent behavior, but Pegasus output differences and lm_head serialization inconsistency are different bugs." + "reason": "Loading a specific model and a Python 3.13 flash_attn import KeyError are unrelated." }, { "left": "issue:43065", "right": "issue:43994", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder is unrelated to SigLIP2 nonsensical AutoModel/pipeline outputs." + "reason": "Dummy Conv2d in Sam3PixelDecoder and SigLIP2 nonsensical AutoModel/pipeline output are different bugs." }, { "left": "issue:43065", "right": "issue:45412", "accept": false, - "reason": "A placeholder Conv2d in Sam3PixelDecoder is unrelated to RT-DETR memory not being released." + "reason": "Sam3PixelDecoder dummy layer issue is unrelated to RT-DETR memory not being released on deletion." }, { "left": "issue:25251", "right": "issue:32090", "accept": false, - "reason": "Pipeline top_k nesting behavior and a Trainer broadcast TypeError have no common underlying bug." + "reason": "Pipeline top_k nesting bug and Trainer _gpu_broadcast_one NoneType error are unrelated." }, { "left": "issue:43653", "right": "issue:44743", "accept": false, - "reason": "BigBirdTokenizer special-token decode bug is unrelated to recurrent state reset in qwen3_5 modular code." + "reason": "BigBirdTokenizer special-token registration issue and recurrent-state reset in qwen3_5 are different defects." }, { "left": "issue:44857", "right": "issue:45588", "accept": false, - "reason": "A CUDA AMP loss crash and an s_aux=None flash-attention crash are different model/runtime failures." + "reason": "Float16 AMP crash in LwDetrImageLoss and a flash_attention None-aux crash do not share the same code path." } ] }, @@ -7423,15 +7462,15 @@ "soft_pairs": [ "issue:44186|issue:45198", "issue:43421|issue:45491", - "issue:44265|issue:45198", "issue:43278|issue:43381", + "issue:44265|issue:45198", "issue:43994|issue:45412", - "issue:42915|issue:42994", "issue:44361|issue:45198", + "issue:42915|issue:42994", "issue:44206|issue:44479", "issue:44871|issue:44977", - "issue:43756|issue:45440", "issue:43257|issue:43278", + "issue:43756|issue:45440", "issue:44610|issue:45412", "issue:29942|issue:30064", "issue:45103|issue:45372", @@ -7444,223 +7483,229 @@ "issue:43653|issue:44442", "issue:43381|issue:43701", "issue:43526|issue:44186", - "issue:44464|issue:45491", "issue:43334|issue:43531", + "issue:44464|issue:45491", "issue:44977|issue:45245", - "issue:43262|issue:44206", "issue:29942|issue:36010", + "issue:43262|issue:44206", "issue:29127|issue:33357", - "issue:43761|issue:44079", - "issue:29127|issue:35141" + "issue:29127|issue:35141", + "issue:43761|issue:44079" ], - "prepared_review_unit_hash": "77edb973a1351d1e6c94b2b5aba3a2b1fac90dbb895e76241359e584b61db6d1", + "prepared_review_unit_hash": "5c33c4e9035d63135b213318a47a68cd8a5c470de04573f5ceefbdc48f80418a", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37064, - "estimated_input_tokens": 9266, - "estimated_eval_tokens": 18788 + "serialized_chars": 37069, + "estimated_input_tokens": 9268, + "estimated_eval_tokens": 18792 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These issues are largely unrelated Hugging Face Transformers bug reports across different subsystems; I don\u2019t see any convincing duplicate pairs, so no soft edges should be merged.", - "confidence": 0.99, - "canonical_issue_reason": "issue:43824 has the strongest impact signal in this set (6 inbound references) and is a clear, self-contained reproducible import failure.", + "summary": "This cluster is noisy and largely heterogeneous: the soft pairs span unrelated bugs across tokenizers, model loading, training, export, quantization, vision/audio processing, and docs. I don\u2019t see any true duplicate pair here, so there is no single canonical artifact to merge around.", + "confidence": 0.98, + "canonical_issue_reason": "No clear canonical issue: the items are thematically broad but do not describe the same underlying bug, so there is no safe duplicate target.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43824 is the best cluster anchor because it is concrete, externally referenced, and easy to identify as a distinct bug report.", + "best_issue_reason": "No issue cleanly represents the cluster as a duplicate hub; the set is too heterogeneous to nominate a meaningful global best issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44186", "right": "issue:45198", "accept": false, - "reason": "Different models and failure modes: LayoutLMv2 tokenizer padding/NER crash vs Wav2Vec2 save/tokenization failure." + "reason": "Different tokenizer/model families and different failure modes; not the same bug." }, { "left": "issue:43421", "right": "issue:45491", "accept": false, - "reason": "Tokenizer post-processor update behavior is unrelated to Gemma3 NaNs from sliding-window/all-padding attention." + "reason": "Special-token post-processor updates are unrelated to Gemma3 sliding-window NaN embeddings." }, { - "left": "issue:44265", - "right": "issue:45198", + "left": "issue:43278", + "right": "issue:43381", "accept": false, - "reason": "torch.export / torch_compilable_check failure is a different code path from Wav2Vec2 tokenization/save_pretrained issues." + "reason": "Training/eval dtype mismatch is unrelated to gradient-checkpointing being disallowed in eval mode." }, { - "left": "issue:43278", - "right": "issue:43381", + "left": "issue:44265", + "right": "issue:45198", "accept": false, - "reason": "Embedding dtype drift in eval is unrelated to the gradient-checkpointing eval-mode limitation." + "reason": "torch.export / torch_compilable_check failure is unrelated to Wav2Vec2 save/tokenization issues." }, { "left": "issue:43994", "right": "issue:45412", "accept": false, - "reason": "SigLIP2 bad outputs and RT-DETR memory release are different bugs in different subsystems." + "reason": "SigLIP2 inference correctness and RT-DETR memory release are different problems." }, { - "left": "issue:42915", - "right": "issue:42994", + "left": "issue:44361", + "right": "issue:45198", "accept": false, - "reason": "Qwen3Moe FP8 config failure is not the same as generic quantized model saving failure." + "reason": "MLukeTokenizer task AttributeError is not the same as Wav2Vec2 save_pretrained/tokenization failure." }, { - "left": "issue:44361", - "right": "issue:45198", + "left": "issue:42915", + "right": "issue:42994", "accept": false, - "reason": "MLukeTokenizer AttributeError is unrelated to Wav2Vec2 save/tokenization behavior." + "reason": "Both touch quantization, but one is FP8 config failure and the other is quantized-model saving." }, { "left": "issue:44206", "right": "issue:44479", "accept": false, - "reason": "Unsupported feature arg in LasrFeatureExtractor is a different regression from Qwen video-input behavior." + "reason": "Feature extractor center-arg regression and Qwen video-input regression are unrelated." }, { "left": "issue:44871", "right": "issue:44977", "accept": false, - "reason": "Gemma eos-token config mismatch is unrelated to Qwen3.5 flash-attention generation bugs." + "reason": "EOS token config inconsistency is not the same as flash-attention generation failure." }, { - "left": "issue:43756", - "right": "issue:45440", + "left": "issue:43257", + "right": "issue:43278", "accept": false, - "reason": "Smollm3 RoPE-layer mismatch and DeepseekV3MoE divergence are separate model-implementation issues." + "reason": "Qwen3 MoE loading/conversion with accelerate+deepspeed is unrelated to eval dtype changes." }, { - "left": "issue:43257", - "right": "issue:43278", + "left": "issue:43756", + "right": "issue:45440", "accept": false, - "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to training/eval embedding dtype changes." + "reason": "Different model families and different implementation mismatches; not one concrete bug." }, { "left": "issue:44610", "right": "issue:45412", "accept": false, - "reason": "Processor image-size mismatch is not the same as RT-DETR memory not being released on delete." + "reason": "Processor image-size mismatch is unrelated to a model memory leak on deletion." }, { "left": "issue:29942", "right": "issue:30064", "accept": false, - "reason": "Flash Attention 2 test failures and void segmentation map processing are different failures in different areas." + "reason": "Flash Attention 2 test failures and void segmentation-map processing are different subsystems." }, { "left": "issue:45103", "right": "issue:45372", "accept": false, - "reason": "auto_docstring future-annotations crash is unrelated to the mistral_common ReasoningEffort import error." + "reason": "Auto-docstring future-annotations crash is unrelated to Gemma 4 processor dependency import failure." }, { "left": "issue:43824", "right": "issue:44279", "accept": false, - "reason": "Missing Qwen2_5_VLForConditionalGeneration import is a specific API/export bug, not a generic dependency issue." + "reason": "Specific Qwen2.5-VL import error is not the same as a generic dependency issue." }, { "left": "issue:33357", "right": "issue:39692", "accept": false, - "reason": "MacOS CLIP bus error and SigLIP2 doc-example errors are different models and failure modes." + "reason": "MacOS CLIP bus error and SigLIP2 documentation mistakes are unrelated." }, { "left": "issue:45440", "right": "issue:45593", "accept": false, - "reason": "DeepseekV3MoE implementation divergence is unrelated to D-FINE auxiliary-loss behavior." + "reason": "DeepSeekV3 implementation divergence is unrelated to D-FINE auxiliary-loss behavior." }, { "left": "issue:29127", "right": "issue:34567", "accept": false, - "reason": "LayoutLMv3 error-message clarity and TrainerState token-count tracking are unrelated bugs." + "reason": "LayoutLMv3 error messaging and TrainerState token counters are unrelated." }, { "left": "issue:32090", "right": "issue:33290", "accept": false, - "reason": "Trainer NoneType broadcast failure is unrelated to DeepSpeed AdaFactor OOM." + "reason": "Trainer broadcast NoneType error and Adafactor+Deepspeed OOM are different runtime issues." }, { "left": "issue:45072", "right": "issue:45229", "accept": false, - "reason": "bfloat16 dtype mismatch in inference is not the same as Gemma4 multi-GPU OOM." + "reason": "BFloat16 dtype mismatch is not the same as multi-GPU CUDA OOM." }, { "left": "issue:43653", "right": "issue:44442", "accept": false, - "reason": "BigBirdTokenizer special-token registration and AutoTokenizer loading FastSpeech2ConformerTokenizer are different tokenizer issues." + "reason": "BigBird special-token registration and AutoTokenizer loading failures are different tokenizer bugs." }, { "left": "issue:43381", "right": "issue:43701", "accept": false, - "reason": "Gradient checkpointing in eval mode is unrelated to resume-from-checkpoint key mismatch." + "reason": "Eval-mode checkpointing restriction is unrelated to resume checkpoint key mismatch." }, { "left": "issue:43526", "right": "issue:44186", "accept": false, - "reason": "BeitImageProcessorFast reduce_labels bug is unrelated to LayoutLMv2Tokenizer crashes on padding/truncation." + "reason": "BEiT label reduction and LayoutLMv2 tokenizer crashes are different code paths." }, { - "left": "issue:44464", - "right": "issue:45491", + "left": "issue:43334", + "right": "issue:43531", "accept": false, - "reason": "Chunked generation with compiled forward is a different path from Gemma3 NaNs in mixed-length batching." + "reason": "Qwen3-VL pad_token_id import/load issue is unrelated to Qwen3-MoE sliding_window behavior." }, { - "left": "issue:43334", - "right": "issue:43531", + "left": "issue:44464", + "right": "issue:45491", "accept": false, - "reason": "Qwen3-VL pad_token_id loading error and Qwen3-MoE sliding_window behavior are separate issues." + "reason": "Compiled-forward chunked generation and Gemma3 NaN embeddings are not the same bug." }, { "left": "issue:44977", "right": "issue:45245", "accept": false, - "reason": "Flash-attention generation regression is unrelated to the category-count limit runtime error." + "reason": "Flash-attention generation failure is unrelated to the 2^24 categories runtime error." }, { - "left": "issue:43262", - "right": "issue:44206", + "left": "issue:29942", + "right": "issue:36010", "accept": false, - "reason": "Audio chat-template sampling-rate default is unrelated to LasrFeatureExtractor's unsupported center arg." + "reason": "Flash Attention 2 test breakage and GenerationMixin import error are distinct issues." }, { - "left": "issue:29942", - "right": "issue:36010", + "left": "issue:43262", + "right": "issue:44206", "accept": false, - "reason": "Flash Attention test failures are unrelated to the GenerationMixin import error." + "reason": "Audio chat-template sample-rate defaulting is unrelated to a feature-extractor center argument crash." }, { "left": "issue:29127", "right": "issue:33357", "accept": false, - "reason": "LayoutLMv3 error messaging and CLIP MacOS bus errors are not the same underlying bug." + "reason": "LayoutLMv3 box-validation messaging and CLIP bus error on MacOS are unrelated." }, { "left": "issue:29127", "right": "issue:35141", "accept": false, - "reason": "LayoutLMv3 error clarity and output-embedding reinitialization are different bugs in different code paths." + "reason": "LayoutLMv3 error clarity and output-embedding reinit after resize are different bugs." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression and ModelOutput key assignment are different layers of the stack." } ] }, @@ -7734,24 +7779,24 @@ "issue:45600" ], "soft_pairs": [ - "issue:43441|issue:45588", "issue:42915|issue:43931", + "issue:43441|issue:45588", "issue:43232|issue:43388", - "issue:43526|issue:44857", "issue:44442|issue:45072", + "issue:43526|issue:44857", "issue:43723|issue:45479", "issue:44479|issue:45290", - "issue:45003|issue:45440", "issue:43526|issue:44610", + "issue:45003|issue:45440", "issue:44743|issue:44811", "issue:43452|issue:45588", "issue:43931|issue:45216", "issue:44977|issue:45584", "issue:45588|issue:45600", - "issue:43316|issue:43525", "issue:43761|issue:44188", - "issue:43646|issue:44077", + "issue:43316|issue:43525", "issue:43575|issue:43701", + "issue:43646|issue:44077", "issue:43994|issue:44206", "issue:39692|issue:45491", "issue:43575|issue:44387", @@ -7763,20 +7808,20 @@ "issue:41628|issue:45372", "issue:43493|issue:43827" ], - "prepared_review_unit_hash": "d7b00247e999fcfa3808e320b6e13dcfc832c8fa7e45cc8eafbd386f301db5f3", + "prepared_review_unit_hash": "61f2a5d53bbb3014e6ae16299c3819484c6ee76fc21812ff5f2de4e0337608f9", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 28, - "serialized_chars": 36198, + "serialized_chars": 36197, "estimated_input_tokens": 9050, "estimated_eval_tokens": 18356 }, @@ -7785,180 +7830,180 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The soft pairs mostly span unrelated regressions in different subsystems or models, so I rejected all of them; the only plausible hub is the flash-attention-related issue around `s_aux=None`, but even that is not the same concrete bug as the paired reports.", - "confidence": 0.77, - "canonical_issue_reason": "issue:45588 is the best central anchor in this set: it is open and has multiple flash-attention-related soft links, making it the clearest hub among the provided issues.", + "summary": "This cluster is highly heterogeneous: it mixes unrelated bugs in loading, generation, quantization/memory, processors, docs, and config/API regressions. None of the soft edges look like true duplicates, and there are no PRs to assess.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out; the items cover distinct subsystems and failure modes, so treating them as one duplicate cluster would be too aggressive.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45588 is the strongest issue candidate overall because it is the most connected and most general among the soft-linked flash-attention reports.", + "best_issue_reason": "Issue 43873 is the broadest and most central memory/quantization report in the set, with the most discussion, but it still does not represent the other unrelated bugs well.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43441", - "right": "issue:45588", + "left": "issue:42915", + "right": "issue:43931", "accept": false, - "reason": "Both involve FlashAttention, but they describe different failure modes and likely different fixes: Ministral-3 RC regression vs `s_aux=None` on sink-less models." + "reason": "Both are model-loading/runtime failures, but one is Qwen3Moe + FineGrainedFP8Config and the other is Qwen3-VL weight-shape mismatch; different models and root causes." }, { - "left": "issue:42915", - "right": "issue:43931", + "left": "issue:43441", + "right": "issue:45588", "accept": false, - "reason": "Different models and different errors: FineGrainedFP8Config failure vs weight-shape mismatch on Qwen3-VL loading." + "reason": "Both mention flash attention, but they hit different code paths: Ministral-3 v5 RC compatibility vs an AttributeError in integrations/flash_attention.py for sink-less models." }, { "left": "issue:43232", "right": "issue:43388", "accept": false, - "reason": "Unrelated code paths: generation kwargs after `sync_gpus` vs metric gathering dropping tuple labels." + "reason": "Generation kwargs syncing and metric-gathering label truncation are unrelated behaviors in different parts of the trainer stack." }, { - "left": "issue:43526", - "right": "issue:44857", + "left": "issue:44442", + "right": "issue:45072", "accept": false, - "reason": "Different components and symptoms: BeitImageProcessorFast label reduction bug vs LwDetrImageLoss AMP crash." + "reason": "Tokenizer loading failure and bfloat16 dtype mismatch are separate bugs with no shared code path." }, { - "left": "issue:44442", - "right": "issue:45072", + "left": "issue:43526", + "right": "issue:44857", "accept": false, - "reason": "Tokenizer loading failure and dtype mismatch are unrelated bugs." + "reason": "BeitImageProcessorFast label reduction and LwDetrImageLoss AMP/CUDA crashes are different subsystems and failure modes." }, { "left": "issue:43723", "right": "issue:45479", "accept": false, - "reason": "Tokenizer loading in v5 and zero-loss classification behavior are not the same issue." + "reason": "AutoTokenizer loading in v5 and zero-loss sequence classification are unrelated issues." }, { "left": "issue:44479", "right": "issue:45290", "accept": false, - "reason": "Video-input regression for Qwen VL models is separate from chat-template tool-call handling." + "reason": "A video-input regression for Qwen VL models is not the same as chat template crashing on tool-call assistant messages." }, { - "left": "issue:45003", - "right": "issue:45440", + "left": "issue:43526", + "right": "issue:44610", "accept": false, - "reason": "`sys.modules` access in modeling utils is unrelated to DeepSeekV3MoE divergence from remote implementation." + "reason": "Label reduction in an image processor and a processor/model input-size mismatch are distinct bugs." }, { - "left": "issue:43526", - "right": "issue:44610", + "left": "issue:45003", + "right": "issue:45440", "accept": false, - "reason": "Both are model/processor-related, but one is label reduction and the other is input-size mismatch." + "reason": "Unsafe sys.modules access in modeling_utils is unrelated to DeepseekV3MoE divergence from remote implementation." }, { "left": "issue:44743", "right": "issue:44811", "accept": false, - "reason": "Cache/reset behavior in recurrent states is unrelated to Whisper batch decode special-token handling." + "reason": "Recurrent-state reset in Qwen3.5 cache handling and Whisper batch_decode skip_special_tokens behavior are unrelated." }, { "left": "issue:43452", "right": "issue:45588", "accept": false, - "reason": "Tokenizer/model loading with `gguf_file` is a different failure mode than FlashAttention `s_aux=None`." + "reason": "gguf_file loading breakage for tokenizer/model construction is not the same as flash_attention.py crashing on s_aux=None." }, { "left": "issue:43931", "right": "issue:45216", "accept": false, - "reason": "Both concern Qwen models, but one is a loading shape mismatch and the other is a broken `save_pretrained` checkpoint." + "reason": "Weight-shape mismatch during model loading and incorrect save_pretrained checkpoint serialization are different defects." }, { "left": "issue:44977", "right": "issue:45584", "accept": false, - "reason": "Qwen3.5 FlashAttention generation regression and Whisper empty-transcription generation failure are unrelated." + "reason": "Qwen3.5 flash-attention generation issues do not match Whisper's empty-transcription failure after align_special_tokens." }, { "left": "issue:45588", "right": "issue:45600", "accept": false, - "reason": "FlashAttention `s_aux=None` crash is unrelated to removed `Sam3LiteText` references in auto mappings." - }, - { - "left": "issue:43316", - "right": "issue:43525", - "accept": false, - "reason": "Different config API mismatches: Gemma3TextConfig discrepancy vs missing `pad_token_id` on Llama4Config." + "reason": "A flash-attention AttributeError and an auto_mappings stale-config CI break are unrelated." }, { "left": "issue:43761", "right": "issue:44188", "accept": false, - "reason": "CLIPVision hidden-state regression and attention-kernel divergence under `torch.compile` are separate bugs." + "reason": "CLIPVisionModel returning hidden_states=None and attention-kernel divergence under torch.compile are different code paths and symptoms." }, { - "left": "issue:43646", - "right": "issue:44077", + "left": "issue:43316", + "right": "issue:43525", "accept": false, - "reason": "Generic custom-model init breakage is not the same as the `patchtsmixer` `post_init` constraint." + "reason": "Gemma3TextConfig API mismatch and Llama4Config missing pad_token_id are separate config-schema regressions." }, { "left": "issue:43575", "right": "issue:43701", "accept": false, - "reason": "Tensor-parallel OOM and checkpoint key mismatch are unrelated." + "reason": "Tensor-parallel OOM on a specific Qwen model and resume_from_checkpoint key mismatch are unrelated." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "Custom model initialization breakage and patchtsmixer post_init validation are not the same bug." }, { "left": "issue:43994", "right": "issue:44206", "accept": false, - "reason": "SigLIP2 wrong outputs and LastFeatureExtractor center-arg crash are different problems." + "reason": "SigLIP2 nonsensical outputs and a LaserFeatureExtractor center-arg crash are distinct issues." }, { "left": "issue:39692", "right": "issue:45491", "accept": false, - "reason": "SigLIP2 docs/example errors are unrelated to Gemma3 mixed-length NaN embeddings." + "reason": "SigLIP2 documentation example errors and Gemma3 mixed-length NaN embeddings are different concerns." }, { "left": "issue:43575", "right": "issue:44387", "accept": false, - "reason": "Both mention memory, but one is model loading OOM and the other is int4 reserved-memory growth." + "reason": "Both involve memory pressure, but one is TP loading OOM for a specific model and the other is a broader int4 reserved-memory regression; not the same concrete fix." }, { "left": "issue:43873", "right": "issue:45588", "accept": false, - "reason": "Quantization/offloading behavior is unrelated to the FlashAttention sink-less crash." + "reason": "Quantization/offloading behavior and a flash_attention AttributeError are separate subsystems." }, { "left": "issue:43673", "right": "issue:44743", "accept": false, - "reason": "Both involve cache/generation, but they affect different mechanisms and code paths." + "reason": "Missing GenerationMixin cache in chunked_prefill and recurrent-state reset in modular_qwen3_5 are different generation bugs." }, { "left": "issue:33290", "right": "issue:34567", "accept": false, - "reason": "Deepspeed Adafactor OOM and `num_input_tokens_seen` not updating are unrelated trainer issues." + "reason": "Adafactor/DeepSpeed OOM and TrainerState.num_input_tokens_seen not updating are unrelated trainer issues." }, { "left": "issue:44442", "right": "issue:44898", "accept": false, - "reason": "Tokenizer loading failure is unrelated to Perceiver interpolation/non-default resolution failure." + "reason": "Tokenizer loading failure and Perceiver non-default-resolution classification failure are different model families and code paths." }, { "left": "issue:34689", "right": "issue:43611", "accept": false, - "reason": "Different release regressions and different model-loading assumptions; not the same concrete bug." + "reason": "Llama 3.2 Vision loading regression and base_model_prefix loading breakage are both loading-related, but the concrete failures differ." }, { "left": "issue:41628", "right": "issue:45372", "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and different loading flows." + "reason": "AutoImageProcessor import error and ReasoningEffort import error are separate missing-symbol regressions." }, { "left": "issue:43493", "right": "issue:43827", "accept": false, - "reason": "SigLIP2 implementation mismatch and docs still referencing removed `pipeline()` are unrelated." + "reason": "SigLIP2 HF/JAX implementation discrepancy is unrelated to docs still referencing pipeline() after removals." } ] }, @@ -8032,8 +8077,8 @@ ], "soft_pairs": [ "issue:43381|issue:43873", - "issue:33290|issue:34689", "issue:33290|issue:35141", + "issue:33290|issue:34689", "issue:44077|issue:44479", "issue:43720|issue:44442", "issue:43643|issue:44913", @@ -8042,9 +8087,9 @@ "issue:43873|issue:44871", "issue:43540|issue:44514", "issue:44162|issue:44792", - "issue:43316|issue:44877", "issue:43381|issue:43856", "issue:33290|issue:36331", + "issue:43316|issue:44877", "issue:34689|issue:36010", "issue:44855|issue:45137", "issue:42175|issue:43316", @@ -8052,210 +8097,210 @@ "issue:43388|issue:43688", "issue:43065|issue:44617", "issue:16998|issue:22355", + "issue:25251|issue:28282", "issue:43316|issue:43716", "issue:41950|issue:43475", - "issue:25251|issue:28282", "issue:45127|issue:45200", "issue:43761|issue:44479", "issue:43295|issue:43825", "issue:44871|issue:45584" ], - "prepared_review_unit_hash": "cd53b91dc9e07fd57b93d3e9e41a20c335c06cb91c1d332ce2067861b633f736", + "prepared_review_unit_hash": "7e656ab5c403fbda5b606cd305d44e38c17430309d8b8db28f580e8a025f8539", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 28, - "serialized_chars": 35511, - "estimated_input_tokens": 8878, - "estimated_eval_tokens": 18012 + "serialized_chars": 35508, + "estimated_input_tokens": 8877, + "estimated_eval_tokens": 18010 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is not a true duplicate set: the paired items span unrelated bugs across loading, training, tokenization, multimodal processors, and config semantics. I would not merge any soft edge.", - "confidence": 0.14, - "canonical_issue_reason": "No single issue cleanly represents the cluster; the items are too heterogeneous to treat one as canonical.", + "summary": "This cluster is heterogeneous: the soft pairs span unrelated bugs across loading, config, multimodal processing, training, and environment/setup issues. None of the soft edges look like the same underlying defect, so all are rejected and there is no clear canonical duplicate to choose.", + "confidence": 0.98, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "issue:43749 has the strongest discussion/inbound activity, but it still only represents one of many unrelated failure modes.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43381", "right": "issue:43873", "accept": false, - "reason": "Gradient checkpointing in eval mode is unrelated to quantization/offloading behavior." + "reason": "Different problems: eval-mode gradient checkpointing vs quantization/offloading behavior." }, { "left": "issue:33290", - "right": "issue:34689", + "right": "issue:35141", "accept": false, - "reason": "Different failure modes: Deepspeed/Adafactor OOM vs Llama 3.2 Vision model-loading regression." + "reason": "Unrelated issues: DeepSpeed Adafactor OOM vs embedding reinit after resize/tied embeddings." }, { "left": "issue:33290", - "right": "issue:35141", + "right": "issue:34689", "accept": false, - "reason": "Training-memory OOM and token-embedding reinitialization are distinct bugs." + "reason": "OOM during optimizer/deepspeed use is not the same as a Llama model-loading regression." }, { "left": "issue:44077", "right": "issue:44479", "accept": false, - "reason": "Unrelated model-specific config validation vs video-input regression in Qwen VL models." + "reason": "PatchTSMixer post_init API restriction is unrelated to Qwen video-input regression." }, { "left": "issue:43720", "right": "issue:44442", "accept": false, - "reason": "BitNet packed-weight loading and tokenizer loading are different code paths." + "reason": "Different failure modes: BitNet packed-weight loading vs tokenizer loading for FastSpeech2Conformer." }, { "left": "issue:43643", "right": "issue:44913", "accept": false, - "reason": "Missing fields from trust_remote_code config loading is unrelated to GPTNeoX rotary_pct persistence." + "reason": "Both concern config loading, but one is missing fields with trust_remote_code and the other is rotary_pct not persisting on reload." }, { "left": "issue:44190", "right": "issue:44496", "accept": false, - "reason": "Local dataset loading in a training script is unrelated to unrecognized model/config loading." + "reason": "Dataset script loading failure is unrelated to config/model_type recognition failure." }, { "left": "issue:43749", "right": "issue:43931", "accept": false, - "reason": "FSDP CPU RAM-efficient loading and Qwen3-VL shape mismatch are separate load-time issues." + "reason": "Both are model-loading related, but one is FSDP CPU RAM-efficient loading and the other is a Qwen3-VL shape mismatch." }, { "left": "issue:43873", "right": "issue:44871", "accept": false, - "reason": "Quantization/offloading behavior is unrelated to Gemma-3 eos_token_id inconsistency." + "reason": "Quantization/offloading issue is unrelated to Gemma eos_token_id config inconsistency." }, { "left": "issue:43540", "right": "issue:44514", "accept": false, - "reason": "Both are video-related, but they affect different Qwen processors and different error conditions." + "reason": "Both touch video/multimodal paths, but one is a video ValueError in Qwen3OmniMoe and the other is batched chat-template padding behavior in Qwen2_5_VLProcessor." }, { "left": "issue:44162", "right": "issue:44792", "accept": false, - "reason": "ESM2 breakage and janus image-generation test failure do not share the same underlying bug." - }, - { - "left": "issue:43316", - "right": "issue:44877", - "accept": false, - "reason": "Gemma3TextConfig API mismatch is unrelated to strict config loading for granite_speech." + "reason": "ESM2 model breakage is unrelated to a janus image-generation test failure." }, { "left": "issue:43381", "right": "issue:43856", "accept": false, - "reason": "Eval-mode gradient checkpointing is not the same as MoE training memory inefficiency." + "reason": "Gradient checkpointing in eval mode is not the same as Qwen3 MoE training memory usage." }, { "left": "issue:33290", "right": "issue:36331", "accept": false, - "reason": "Deepspeed OOM with Adafactor is unrelated to CustomTrainer.compute_loss signature breakage." + "reason": "DeepSpeed OOM and CustomTrainer compute_loss signature mismatch are different bugs." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Both are config-related, but one is a Gemma3TextConfig API discrepancy and the other is strict config blocking granite_speech loading." }, { "left": "issue:34689", "right": "issue:36010", "accept": false, - "reason": "Llama 3.2 Vision model loading and GenerationMixin import failure are different regressions." + "reason": "Model-loading regression for Llama 3.2 vision is unrelated to a missing GenerationMixin import." }, { "left": "issue:44855", "right": "issue:45137", "accept": false, - "reason": "Python 3.13 import parsing error and DeepSpeed ZeRO3 deque error are unrelated." + "reason": "Python 3.13 torch.jit parsing/IndentationError is unrelated to DeepSpeed ZeRO3 deque underflow." }, { "left": "issue:42175", "right": "issue:43316", "accept": false, - "reason": "Backend packaging omission has no overlap with Gemma3TextConfig API discrepancy." + "reason": "Package extras/backend installation problem is unrelated to a model config API discrepancy." }, { "left": "issue:43404", "right": "issue:43441", "accept": false, - "reason": "Same model family, but weight tying and FlashAttention failures are distinct code-path bugs." + "reason": "Same general family name, but different concrete failures: tied lm_head weights vs FlashAttention runtime failure." }, { "left": "issue:43388", "right": "issue:43688", "accept": false, - "reason": "Metric gathering label truncation and auxiliary-loss normalization are unrelated training issues." + "reason": "Metric gathering drops tuple labels, while the other issue is auxiliary-loss normalization." }, { "left": "issue:43065", "right": "issue:44617", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder is not the same as Sam3Video OOM behavior." + "reason": "Dummy Conv2d implementation in Sam3PixelDecoder is not the same issue as Sam3Video CUDA OOM." }, { "left": "issue:16998", "right": "issue:22355", "accept": false, - "reason": "Model_max_length behavior and missing transformers.onnx module are unrelated." + "reason": "DeBERTa model_max_length question is unrelated to the missing transformers.onnx import." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Pipeline top_k output nesting is unrelated to AutoModel missing PyTorch." }, { "left": "issue:43316", "right": "issue:43716", "accept": false, - "reason": "Gemma3TextConfig API differences do not match Mistral-3 image preprocessor dtype mismatch." + "reason": "Gemma3TextConfig API discrepancy is unrelated to Mistral-3 image preprocessor dtype mismatch." }, { "left": "issue:41950", "right": "issue:43475", "accept": false, - "reason": "Video-classification processor lookup and missing Sam3VisionEncoderOutput attribute are separate issues." - }, - { - "left": "issue:25251", - "right": "issue:28282", - "accept": false, - "reason": "Pipeline output-shape behavior is unrelated to missing PyTorch import errors." + "reason": "Video-classification pipeline processor selection is unrelated to Sam3VisionEncoderOutput missing fpn_position_embeddings." }, { "left": "issue:45127", "right": "issue:45200", "accept": false, - "reason": "LoRA merge collapse with tied embeddings is not the same as missing mm_token_type_ids defaults." + "reason": "LoRA/extended-vocab collapse with tied embeddings is unrelated to Gemma 4 mm_token_type_ids defaults." }, { "left": "issue:43761", "right": "issue:44479", "accept": false, - "reason": "CLIPVisionModel hidden_states regression and Qwen video-input regression are different models and failures." + "reason": "Different regressions in different multimodal paths: CLIPVisionModel hidden_states vs Qwen video input regression." }, { "left": "issue:43295", "right": "issue:43825", "accept": false, - "reason": "Processor.tokenizer regression is unrelated to an inaccurate pipeline error message." + "reason": "Processor/tokenizer regression is unrelated to the pipeline error message claiming translation support." }, { "left": "issue:44871", "right": "issue:45584", "accept": false, - "reason": "Gemma-3 eos_token_id mismatch and Whisper empty-transcription handling are different generation bugs." + "reason": "Gemma eos_token_id config inconsistency is not the same as Whisper empty-transcription failure after token alignment." } ] }, @@ -8294,6 +8339,7 @@ "issue:43742", "issue:43761", "issue:43827", + "issue:43873", "issue:43906", "issue:43931", "issue:43994", @@ -8316,10 +8362,8 @@ "issue:44938", "issue:44964", "issue:44977", - "issue:45127", "issue:45245", "issue:45290", - "issue:45357", "issue:45362", "issue:45381", "issue:45406", @@ -8330,8 +8374,8 @@ ], "soft_pairs": [ "issue:44734|issue:45406", - "issue:44077|issue:44534", "issue:43295|issue:44871", + "issue:44077|issue:44534", "issue:43550|issue:43701", "issue:43606|issue:43701", "issue:43646|issue:44913", @@ -8346,7 +8390,7 @@ "issue:42617|issue:43575", "issue:44479|issue:44734", "issue:44933|issue:45362", - "issue:41720|issue:43575", + "issue:43575|issue:43873", "issue:32090|issue:35141", "issue:44464|issue:45563", "issue:43065|issue:44610", @@ -8357,229 +8401,210 @@ "issue:41720|issue:45245", "issue:44190|issue:44964", "issue:44938|issue:45542", - "issue:45127|issue:45357", "issue:44792|issue:45600" ], - "prepared_review_unit_hash": "f4c2c006e106aa45c9e0e87196e9f7f005ee4e470613b9f418ff392c8509ddb6", + "prepared_review_unit_hash": "071197f85d3bc66a93662ed6a6e14b28d395d26f3f95397f7200af717786a8db", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 30, - "serialized_chars": 36583, - "estimated_input_tokens": 9146, - "estimated_eval_tokens": 18548 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 35615, + "estimated_input_tokens": 8904, + "estimated_eval_tokens": 18064 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous: the soft-similarity pairs mostly share broad areas (serving, multimodal, generation, config loading) but do not describe the same concrete bug/change. I would reject all proposed merges; no PRs are present. If one issue must anchor the set, issue 44521 is the strongest open representative because it has the most inbound references and broad activity.", - "confidence": 0.91, - "canonical_issue_reason": "Issue 44521 is the best representative by current activity and inbound references, but the cluster has no single true duplicate center.", + "summary": "No soft pair looks like a true duplicate: the titles point to different models, code paths, or failure modes, so this set does not have a safe canonical issue/PR.", + "confidence": 0.97, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Open, actively discussed, and the most referenced issue in the set, so it is the best practical anchor.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44734", "right": "issue:45406", "accept": false, - "reason": "Both involve transformers serve, but one is KV-cache tensor indexing and the other is a missing processor attribute; different failures." + "reason": "Both involve transformers serve/chat-like crashes, but one is KV-cache tensor indexing and the other is a Gemma4Processor attribute regression; different bugs." }, { - "left": "issue:44077", - "right": "issue:44534", + "left": "issue:43295", + "right": "issue:44871", "accept": false, - "reason": "Unrelated bugs: patchtsmixer post_init validation vs non-persistent buffer corruption in v5." + "reason": "One is a processor/tokenizer API regression in custom code, the other is an eos_token_id config mismatch for Gemma-3; not the same defect." }, { - "left": "issue:43295", - "right": "issue:44871", + "left": "issue:44077", + "right": "issue:44534", "accept": false, - "reason": "Different regressions: processor/tokenizer access breakage vs eos_token_id config mismatch." + "reason": "Unrelated issues: patchtsmixer post_init handling vs non-persistent buffer initialization garbage." }, { "left": "issue:43550", "right": "issue:43701", "accept": false, - "reason": "torch.compile/SDPA model failure is unrelated to resume_from_checkpoint key mismatches." + "reason": "Bamba torch.compile + SDPA failure is unrelated to resume_from_checkpoint key mismatch." }, { "left": "issue:43606", "right": "issue:43701", "accept": false, - "reason": "CPU offload device mismatch is a different code path from checkpoint key handling." + "reason": "CPU offload device mismatch is a different loading/runtime problem than checkpoint key mismatch." }, { "left": "issue:43646", "right": "issue:44913", "accept": false, - "reason": "Custom model init breakage is not the same as rotary_pct not persisting on reload." + "reason": "Custom model initialization breakage and GPTNeoX rotary_pct reload drift are separate config/init issues." }, { "left": "issue:43906", "right": "issue:45290", "accept": false, - "reason": "The reproduction target and failure mode differ: isolated prior issue reproduction vs chat template tool-call crash." + "reason": "A reproduction of an older issue is not the same as apply_chat_template crashing on tool-call assistant messages." }, { "left": "issue:44617", "right": "issue:45584", "accept": false, - "reason": "CUDA OOM in Sam3Video is unrelated to Whisper empty-transcription failure after align_special_tokens." + "reason": "Sam3Video CUDA OOM and Whisper empty-transcription failure are different models and failure modes." }, { "left": "issue:43761", "right": "issue:44242", "accept": false, - "reason": "CLIPVision hidden_states bug is different from MoE load-balancing loss omission." + "reason": "CLIPVision hidden_states regression is unrelated to MoE load-balancing loss omission." }, { "left": "issue:43931", "right": "issue:44060", "accept": false, - "reason": "Model weight shape mismatch is not the same as the incorrect tied-weights warning." + "reason": "Qwen3-VL weight-shape mismatch and Qwen3-Next tied-weight warning are distinct configuration/loading problems." }, { "left": "issue:43452", "right": "issue:44496", "accept": false, - "reason": "GGUF tokenizer/model loading breakage is a different issue from missing model_type in config.json." + "reason": "gguf_file load breakage is not the same as missing model_type in config.json." }, { "left": "issue:45563", "right": "issue:45584", "accept": false, - "reason": "Stale num_return_sequences warning is unrelated to Whisper generation crashing on empty output." + "reason": "A stale generate warning and Whisper empty-transcription crash are different issues." }, { "left": "issue:44521", "right": "issue:45381", "accept": false, - "reason": "Both are multimodal/VL-related, but assistant_mask generation and vision_position_ids are distinct bugs." + "reason": "assistant_masks all-zero for multimodal templates is separate from Qwen2.5-VL video vision_position_ids being wrong." }, { "left": "issue:43575", "right": "issue:43742", "accept": false, - "reason": "TP OOM on Qwen2-57B and key errors when loading MobileLLM are different load-time failures." + "reason": "Qwen2-57B tensor-parallel OOM and MobileLLM loading key error do not share a concrete code-path bug." }, { "left": "issue:42617", "right": "issue:43575", "accept": false, - "reason": "3d_parallel.py runtime failure does not match Qwen2 tensor-parallel OOM." + "reason": "3d_parallel.py execution failure is unrelated to Qwen2-57B TP memory exhaustion." }, { "left": "issue:44479", "right": "issue:44734", "accept": false, - "reason": "Video-input regression across Qwen VL models is unrelated to serve-side KV-cache indexing." + "reason": "Video-input regression in Qwen VL models and serve KV-cache indexing crash are different subsystems and symptoms." }, { "left": "issue:44933", "right": "issue:45362", "accept": false, - "reason": "Missing image_utils import and Qwen3.5 chat crashes are different defects." + "reason": "Missing image_utils import and Qwen3.5 chat crash are unrelated." }, { - "left": "issue:41720", - "right": "issue:43575", + "left": "issue:43575", + "right": "issue:43873", "accept": false, - "reason": "Auto device mapping cudaErrorAssert is not the same as tensor-parallel OOM." + "reason": "Both mention memory/offload concerns, but one is TP OOM and the other is quantization/offloading behavior; not the same bug." }, { "left": "issue:32090", "right": "issue:35141", "accept": false, - "reason": "Trainer GPU broadcast NoneType error is unrelated to embedding reinitialization in post_init." + "reason": "Trainer _gpu_broadcast_one TypeError and output embedding reinitialization in post_init are separate problems." }, { "left": "issue:44464", "right": "issue:45563", "accept": false, - "reason": "Compiled-forward generation inconsistency is not the same as a stale warning." + "reason": "Compiled-forward generation inconsistency is unrelated to a stale num_return_sequences warning." }, { "left": "issue:43065", "right": "issue:44610", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo input-size mismatch are separate model issues." + "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo processor size mismatch are different model-specific issues." }, { "left": "issue:44464", "right": "issue:44871", "accept": false, - "reason": "Chunked generation inconsistency is unrelated to eos_token_id configuration drift." + "reason": "Chunked generation/compiled-forward inconsistency is unrelated to Gemma-3 eos_token_id config mismatch." }, { "left": "issue:44792", "right": "issue:44977", "accept": false, - "reason": "Janus image-generation test failure is not the same as Qwen3.5 flash-attention generation failure." + "reason": "Failed janus image-generation test and Qwen3.5 flash-attention generation issue are different models and failure paths." }, { "left": "issue:43827", "right": "issue:43994", "accept": false, - "reason": "Documentation references to removed pipeline() are not the same as SigLIP2 producing bad outputs." + "reason": "Docs still referencing pipeline() is not the same as a model producing nonsensical outputs under AutoModel/pipeline." }, { "left": "issue:44534", "right": "issue:44913", "accept": false, - "reason": "Non-persistent buffer junk in v5 is unrelated to GPTNeoX rotary_pct not persisting." + "reason": "Non-persistent buffer junk and GPTNeoX rotary_pct reload regression are unrelated." }, { "left": "issue:41720", "right": "issue:45245", "accept": false, - "reason": "Qwen3 auto-device-map CUDA assert is unrelated to the 2^24 categories limit error." + "reason": "Qwen3 auto-device-map cuda assert has nothing to do with the 2^24 category limit error." }, { "left": "issue:44190", "right": "issue:44964", "accept": false, - "reason": "Local dataset loading in an example script is a different problem from Phi-4 multimodal model loading." + "reason": "Local dataset loading in a no-trainer script is unrelated to loading a multimodal model with latest transformers." }, { "left": "issue:44938", "right": "issue:45542", "accept": false, - "reason": "Python 3.14 import/load failure is unrelated to the TensorBoard-without-TensorFlow backend error." - }, - { - "left": "issue:45127", - "right": "issue:45357", - "accept": false, - "reason": "LoRA merge collapse with extended vocab is a different bug from incorrect visual encoder keys being saved." - }, - { - "left": "issue:44242", - "right": "issue:44521", - "accept": false, - "reason": "MoE load-balancing loss omission is unrelated to assistant mask generation for multimodal chat templates." - }, - { - "left": "issue:44617", - "right": "issue:45584", - "accept": false, - "reason": "Sam3Video OOM and Whisper empty-transcription crash are unrelated generation failures." + "reason": "Python 3.14 import/load failure and undefined tf backend due to tensorboard-only install are different environment issues." }, { "left": "issue:44792", "right": "issue:45600", "accept": false, - "reason": "Janus image-generation test failure is unrelated to CI breakage from removed Sam3LiteText configs." + "reason": "Janus test failure and auto_mappings referencing removed Sam3LiteText configs are unrelated CI/model issues." } ] }, @@ -8627,23 +8652,23 @@ "issue:44077", "issue:44079", "issue:44112", + "issue:44186", "issue:44190", "issue:44220", - "issue:44246", "issue:44279", "issue:44355", "issue:44466", - "issue:44514", "issue:44561", "issue:44683", "issue:44734", "issue:44743", "issue:44855", "issue:44913", - "issue:45003", + "issue:45127", "issue:45216", "issue:45245", "issue:45276", + "issue:45357", "issue:45375", "issue:45406", "issue:45446", @@ -8653,6 +8678,7 @@ "issue:45632" ], "soft_pairs": [ + "issue:45127|issue:45357", "issue:43388|issue:43582", "issue:43761|issue:44743", "issue:44855|issue:45588", @@ -8662,243 +8688,242 @@ "issue:43644|issue:44913", "issue:45276|issue:45584", "issue:43976|issue:44855", - "issue:43404|issue:45216", "issue:44077|issue:44079", + "issue:43404|issue:45216", "issue:43550|issue:44190", "issue:43116|issue:44190", - "issue:44112|issue:45600", "issue:43295|issue:45276", + "issue:44112|issue:45600", "issue:45245|issue:45584", "issue:41950|issue:44016", "issue:39692|issue:44190", "issue:36246|issue:41628", "issue:44734|issue:45632", "issue:43479|issue:45375", - "issue:45406|issue:45632", "issue:36246|issue:38617", + "issue:45406|issue:45632", "issue:44561|issue:45632", - "issue:44514|issue:44734", - "issue:44016|issue:44220", + "issue:44186|issue:44734", "issue:38175|issue:43065", + "issue:44016|issue:44220", "issue:42175|issue:44279", "issue:44683|issue:45446", - "issue:44466|issue:44913", - "issue:44246|issue:45003" + "issue:44466|issue:44913" ], - "prepared_review_unit_hash": "35db967a3dfbef3fd5e89fa1b92ba62e26932da9cdb718fe8ab845ad2ee5b754", + "prepared_review_unit_hash": "4d2041ec799464ef48dd817daafb571a24bcd3e2d69ff3e1e88f0158e385a412", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37130, - "estimated_input_tokens": 9283, - "estimated_eval_tokens": 18822 + "serialized_chars": 37249, + "estimated_input_tokens": 9313, + "estimated_eval_tokens": 18882 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous; the soft edges mostly connect unrelated bugs that only share broad subsystem vocabulary (imports, configs, cache, multimodal, CI, etc.). I would not merge any of the proposed pairs as duplicates.", - "confidence": 0.72, - "canonical_issue_reason": "issue:44561 is the best representative: it describes a clear, high-impact framework regression in `trust_remote_code` handling caused by a removed utility, with broad applicability beyond one model family.", + "summary": "All proposed soft edges look like false positives: the paired issues target different subsystems, models, or failure modes, so none should be merged as duplicates. There is no clear canonical issue in this mixed set.", + "confidence": 0.95, + "canonical_issue_reason": "No single issue is a good canonical here; the candidate pairs are heterogeneous and do not share one underlying bug or change.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44561 has the strongest combination of clarity, scope, and downstream impact, so it is the best single issue anchor for this cluster.", + "best_issue_reason": "No issue stands out as a global representative because the set spans unrelated import errors, model regressions, docs problems, and pipeline/config bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen3.5-related symptoms: LoRA/weight-tying collapse vs incorrect visual encoder keys on save_pretrained." + }, { "left": "issue:43388", "right": "issue:43582", "accept": false, - "reason": "Different failures: metric gathering last-batch label handling vs Apple Silicon allocator warmup TypeError." + "reason": "Unrelated areas and failures: gather_for_metrics label dropping vs Apple Silicon caching_allocator_warmup TypeError." }, { "left": "issue:43761", "right": "issue:44743", "accept": false, - "reason": "Unrelated model bugs: CLIP vision hidden states regression vs Qwen3 recurrent state reset with cache." + "reason": "CLIPVisionModel hidden_states regression is unrelated to Qwen recurrent state reset with cache." }, { "left": "issue:44855", "right": "issue:45588", "accept": false, - "reason": "Different code paths and symptoms: Python 3.13 JIT parsing/import indentation issue vs flash-attention `s_aux=None` crash." + "reason": "Python 3.13 indentation/torch.jit parsing issue is not the same as flash_attention s_aux=None AttributeError." }, { "left": "issue:43650", "right": "issue:44220", "accept": false, - "reason": "No shared underlying bug; one is an unclear data-related issue, the other is a specific `_torch_extract_fbank_features()` problem." + "reason": "Generic placeholder issue vs audio feature extraction bug; no shared concrete code path." }, { "left": "issue:44355", "right": "issue:44855", "accept": false, - "reason": "Both involve import/compile-time errors, but the concrete root causes and affected paths are different." + "reason": "Compiled Python file errors are unrelated to the DebertaV2Model import parsing failure." }, { "left": "issue:43540", "right": "issue:44734", "accept": false, - "reason": "Different multimodal/runtime bugs: Qwen3OmniMoe video processing ValueError vs serve KV-cache tensor indexing crash." + "reason": "Different multimodal/video processing bug versus serve KV-cache tensor indexing crash." }, { "left": "issue:43644", "right": "issue:44913", "accept": false, - "reason": "One is about non-persistent buffer serialization junk; the other is GPTNeoX config reload losing `rotary_pct`." + "reason": "Non-persistent buffer junk in v5 is unrelated to GPTNeoX rotary_pct config reload behavior." }, { "left": "issue:45276", "right": "issue:45584", "accept": false, - "reason": "Gemma4 embedding resize mismatch is unrelated to Whisper empty-transcription generation after `align_special_tokens`." + "reason": "Gemma4 embedding resize mismatch and Whisper empty-transcription generation failure are separate bugs." }, { "left": "issue:43976", "right": "issue:44855", "accept": false, - "reason": "Different regressions: package Python-version compatibility vs a Python 3.13 TorchScript parsing/indentation failure." + "reason": "Python version packaging failure is not the same as a DebertaV2 Python 3.13 import parsing error." }, { - "left": "issue:43404", - "right": "issue:45216", + "left": "issue:44077", + "right": "issue:44079", "accept": false, - "reason": "Both mention weights/checkpointing, but they affect different models and concrete failure modes; not the same bug." + "reason": "Optional post_init enforcement and ModelOutput key assignment are different API/behavior issues." }, { - "left": "issue:44077", - "right": "issue:44079", + "left": "issue:43404", + "right": "issue:45216", "accept": false, - "reason": "Unrelated issues: `post_init` validation vs `ModelOutput` key assignment logic." + "reason": "Weight tying bug in Mistral3ForConditionalGeneration does not match Qwen3.5 checkpoint save_pretrained regression." }, { "left": "issue:43550", "right": "issue:44190", "accept": false, - "reason": "Different domains: `torch.compile` SDPA failure in Bamba vs local dataset loading in an image-classification example." + "reason": "torch.compile/SDPA model failure is unrelated to local dataset loading in the image-classification script." }, { "left": "issue:43116", "right": "issue:44190", "accept": false, - "reason": "Separate example-script bugs with different symptoms and likely different causes." + "reason": "Multi-label classification example returning empty results is a different script and failure mode from dataset loading." }, { - "left": "issue:44112", - "right": "issue:45600", + "left": "issue:43295", + "right": "issue:45276", "accept": false, - "reason": "Both are CI-related, but one is a stale device-override test and the other is a broken auto-mapping reference." + "reason": "Processor/tokenizer regression is unrelated to Gemma4 token embedding resize behavior." }, { - "left": "issue:43295", - "right": "issue:45276", + "left": "issue:44112", + "right": "issue:45600", "accept": false, - "reason": "Different processor/tokenizer regression vs Gemma4 embedding resize behavior; no shared concrete code-path." + "reason": "Two different CI failures with different root causes and code paths." }, { "left": "issue:45245", "right": "issue:45584", "accept": false, - "reason": "Unrelated errors: category-cardinality limit vs Whisper empty transcription handling." + "reason": "Category-count runtime limit has nothing to do with Whisper empty transcription handling." }, { "left": "issue:41950", "right": "issue:44016", "accept": false, - "reason": "A video-classification pipeline bug and a notebook syntax error are not the same underlying issue." + "reason": "Video-classification pipeline processor lookup bug is unrelated to a notebook syntax error." }, { "left": "issue:39692", "right": "issue:44190", "accept": false, - "reason": "Documentation/example errors versus local dataset loading failure; only broad area overlaps." + "reason": "SigLIP2 docs example errors are not the same as local dataset loading failure in the no-trainer script." }, { "left": "issue:36246", "right": "issue:41628", "accept": false, - "reason": "Both are import errors, but they concern different symbols and different API surfaces; not the same bug." + "reason": "Both are import errors, but they concern different missing symbols and different modules; not the same bug." }, { "left": "issue:44734", "right": "issue:45632", "accept": false, - "reason": "Serve KV-cache continuation crash and trust-remote-code cache-path collision are unrelated." + "reason": "Serve KV-cache continuation crash is unrelated to trust_remote_code cache path collisions." }, { "left": "issue:43479", "right": "issue:45375", "accept": false, - "reason": "Different configuration bugs in different model families; one is default initialization, the other is a missing strict-serialization field." + "reason": "Phi4MultimodalConfig default-init issue differs from missing deepstack_visual_indexes in Qwen3_5MoeVisionConfig." }, { - "left": "issue:45406", - "right": "issue:45632", + "left": "issue:36246", + "right": "issue:38617", "accept": false, - "reason": "Gemma4Processor missing `_tokenizer` in serve is unrelated to trust_remote_code cache collisions." + "reason": "Different missing imports in different modules; no shared underlying regression." }, { - "left": "issue:36246", - "right": "issue:38617", + "left": "issue:45406", + "right": "issue:45632", "accept": false, - "reason": "Different import failures affecting different names/modules; no shared root cause is evident." + "reason": "Gemma4Processor _tokenizer crash is unrelated to cache-path collision for local trust_remote_code models." }, { "left": "issue:44561", "right": "issue:45632", "accept": false, - "reason": "One is a removed-API regression breaking trust_remote_code models; the other is a local cache-path collision bug." + "reason": "Removal of is_torch_fx_available breaking remote-code models is unrelated to cache path collisions." }, { - "left": "issue:44514", + "left": "issue:44186", "right": "issue:44734", "accept": false, - "reason": "Different Qwen2.5-VL chat-template batching issue vs serve KV-cache continuation indexing bug." + "reason": "Tokenizer padding/truncation crash and serve KV-cache indexing crash are distinct." }, { - "left": "issue:44016", - "right": "issue:44220", + "left": "issue:38175", + "right": "issue:43065", "accept": false, - "reason": "Notebook syntax error and audio feature-extraction issue are unrelated." + "reason": "SigLIP2 zero probabilities and Sam3PixelDecoder dummy Conv2d are unrelated model bugs." }, { - "left": "issue:38175", - "right": "issue:43065", + "left": "issue:44016", + "right": "issue:44220", "accept": false, - "reason": "Different model-specific behavior: SigLIP2 zero probabilities vs Sam3PixelDecoder dummy Conv2d." + "reason": "Notebook syntax error is unrelated to audio feature extraction failure." }, { "left": "issue:42175", "right": "issue:44279", "accept": false, - "reason": "Backend/dependency installation issue vs generic dependency issue; no concrete shared bug." + "reason": "Backend dependency packaging issue is too generic and does not match the other transformers dependency report as the same bug." }, { "left": "issue:44683", "right": "issue:45446", "accept": false, - "reason": "Both touch flex_attention/PyTorch versioning, but the failures are distinct and not mergeable as one fix." + "reason": "Both touch flex_attention, but one is a torch>=2.9 compiled-flex_attention failure and the other is an incorrect version check for AuxRequest import; different concrete defects." }, { "left": "issue:44466", "right": "issue:44913", "accept": false, - "reason": "Different state/serialization bugs: tied lm_head weight serialization vs GPTNeoX config reload behavior." - }, - { - "left": "issue:44246", - "right": "issue:45003", - "accept": false, - "reason": "Import performance complaint and unsafe `sys.modules[]` access are not the same underlying issue." + "reason": "Device-dependent lm_head serialization bug is unrelated to GPTNeoX rotary_pct config reload regression." } ] }, @@ -8944,19 +8969,19 @@ "issue:44075", "issue:44112", "issue:44242", + "issue:44246", "issue:44279", "issue:44336", "issue:44351", "issue:44368", - "issue:44462", "issue:44496", "issue:44521", "issue:44683", "issue:44933", "issue:44964", "issue:44987", + "issue:45003", "issue:45042", - "issue:45081", "issue:45200", "issue:45216", "issue:45278", @@ -8972,18 +8997,19 @@ "issue:45632" ], "soft_pairs": [ + "issue:44246|issue:45003", "issue:43299|issue:45216", - "issue:42907|issue:43441", "issue:43643|issue:44496", - "issue:44016|issue:44062", + "issue:42907|issue:43441", "issue:43295|issue:45335", + "issue:44016|issue:44062", "issue:44964|issue:45600", "issue:43976|issue:44987", "issue:43673|issue:44336", "issue:44112|issue:44964", - "issue:45561|issue:45632", "issue:43531|issue:45600", "issue:45335|issue:45584", + "issue:45561|issue:45632", "issue:43824|issue:44933", "issue:43976|issue:44279", "issue:44496|issue:45042", @@ -9000,211 +9026,216 @@ "issue:43901|issue:44496", "issue:43976|issue:45405", "issue:44683|issue:45542", - "issue:43479|issue:43525", - "issue:44462|issue:45081" + "issue:43479|issue:43525" ], - "prepared_review_unit_hash": "018d0195f305cc6745ec7b4e6ef6e3dbc8633d39a3e09947bdb40dcb132ce65c", + "prepared_review_unit_hash": "2bea3d432659b5501cbfa28aba1955383f9366b860bc588d078b17e6c936e2ab", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36769, - "estimated_input_tokens": 9193, - "estimated_eval_tokens": 18642 + "serialized_chars": 36624, + "estimated_input_tokens": 9156, + "estimated_eval_tokens": 18568 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous; the soft links are driven by superficial similarity (same model family, same subsystem, or generic import/load regressions) rather than the same concrete bug. I would keep the cluster centered on the broad upgrade/import-regression report, but reject the proposed duplicate edges.", - "confidence": 0.8, - "canonical_issue_reason": "issue:45278 is the broadest and most representative report here: a user-facing breakage after upgrading to 5.5.0 that can plausibly absorb several related import-regression reports.", + "summary": "These are mostly unrelated Transformers bug reports; none of the suggested soft edges look like true duplicates, so I rejected all of them.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue stands out: the set spans many distinct regressions, model-specific loading/saving bugs, and unrelated CI/docs/import problems. If forced, issue 45278 is the broadest umbrella report, but it is not a duplicate target for the rest.", "canonical_pr_reason": null, - "best_issue_reason": "It is the best global issue representative because it is broad, central to the cluster\u2019s common theme (5.x upgrade/import failures), and more likely than the others to serve as an umbrella canonical issue.", + "best_issue_reason": "Issue 45278 is the broadest general report in the set and could serve as a coarse umbrella for migration/import breakage, but it is still too generic to canonicalize these heterogeneous issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43299", - "right": "issue:45216", + "left": "issue:44246", + "right": "issue:45003", "accept": false, - "reason": "Both are Qwen3-family regressions, but one is a loading failure and the other is a save_pretrained checkpoint regression; different code paths and different symptoms." + "reason": "One is a slow-import complaint; the other is about unsafe sys.modules access. Related area, different bug." }, { - "left": "issue:42907", - "right": "issue:43441", + "left": "issue:43299", + "right": "issue:45216", "accept": false, - "reason": "Dequantized save failure vs FlashAttention load failure for Ministral-3 are distinct problems." + "reason": "Different Qwen versions and different failures: model loading vs save_pretrained checkpoint corruption." }, { "left": "issue:43643", "right": "issue:44496", "accept": false, - "reason": "Both involve config/model loading, but one is missing fields in AutoConfig with trust_remote_code and the other is an unrecognized model due to missing model_type; not the same bug." + "reason": "Both involve config/loading, but one is missing fields under trust_remote_code and the other is an unrecognized model missing model_type." }, { - "left": "issue:44016", - "right": "issue:44062", + "left": "issue:42907", + "right": "issue:43441", "accept": false, - "reason": "A notebook syntax error and an AddedToken argument conflict are unrelated." + "reason": "Same model family, but different code paths: saving dequantized weights vs FlashAttention load/runtime failure." }, { "left": "issue:43295", "right": "issue:45335", "accept": false, - "reason": "processor.tokenizer/image passing regression and t5gemma embedding resize behavior are separate issues." + "reason": "Completely different subsystems: processor/tokenizer regression versus t5gemma embedding resize behavior." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "Notebook syntax error versus AddedToken keyword-argument bug; no shared underlying defect." }, { "left": "issue:44964", "right": "issue:45600", "accept": false, - "reason": "Phi-4 multimodal loading and stale auto_mappings references to Sam3LiteText configs are different failures." + "reason": "One is a specific model-load failure; the other is stale auto_mappings references breaking CI. Different root causes." }, { "left": "issue:43976", "right": "issue:44987", "accept": false, - "reason": "Python version compatibility and loading physical-intelligence/fast are unrelated." + "reason": "Version/Python compatibility issue versus a specific model-loading regression." }, { "left": "issue:43673", "right": "issue:44336", "accept": false, - "reason": "GenerationMixin cache missing during chunked_prefill and ANSI codes in loading_report are different subsystems and bugs." + "reason": "Generation cache bug and ANSI output formatting bug are unrelated." }, { "left": "issue:44112", "right": "issue:44964", "accept": false, - "reason": "CI test flakiness in GraniteSpeech is unrelated to Phi-4 multimodal model loading." - }, - { - "left": "issue:45561", - "right": "issue:45632", - "accept": false, - "reason": "A pytest-xdist file race and a trust_remote_code cache-path collision are different concurrency/path issues." + "reason": "CI test flake in GraniteSpeech versus Phi-4 multimodal loading failure; unrelated." }, { "left": "issue:43531", "right": "issue:45600", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior and removed Sam3LiteText references in auto_mappings are unrelated." + "reason": "Qwen3-MoE sliding_window behavior is unrelated to removed Sam3LiteText mappings." }, { "left": "issue:45335", "right": "issue:45584", "accept": false, - "reason": "Embedding resize propagation and Whisper empty-transcription handling are distinct bugs." + "reason": "Embedding resize bug versus Whisper empty-transcription generation failure; different code paths." + }, + { + "left": "issue:45561", + "right": "issue:45632", + "accept": false, + "reason": "Both mention file/path issues, but one is a pytest-xdist race and the other is a trust_remote_code cache collision." }, { "left": "issue:43824", "right": "issue:44933", "accept": false, - "reason": "Both are import-related, but one is a missing Qwen2.5-VL export and the other is a non-existent image_utils import; not the same symbol or code path." + "reason": "Both are import errors, but they concern different missing symbols and likely different breakages." }, { "left": "issue:43976", "right": "issue:44279", "accept": false, - "reason": "Python support/version mismatch and a generic dependency issue are not the same problem." + "reason": "Generic dependency complaint does not match the specific Python-version regression." }, { "left": "issue:44496", "right": "issue:45042", "accept": false, - "reason": "Missing model_type config loading and PIL image processors requiring torchvision are unrelated." + "reason": "Unrecognized model/config issue versus PIL backend incorrectly requiring torchvision." }, { "left": "issue:43854", "right": "issue:44964", "accept": false, - "reason": "Two different model-specific loading failures; same broad area, but not the same concrete bug." + "reason": "Different models and different failure surfaces; no evidence of the same bug." }, { "left": "issue:43577", "right": "issue:45216", "accept": false, - "reason": "BLIP2 dtype propagation and Qwen3.5 save_pretrained checkpoint correctness are unrelated." + "reason": "Blip2 dtype propagation bug versus Qwen3.5 save_pretrained regression; unrelated." }, { "left": "issue:43299", "right": "issue:45357", "accept": false, - "reason": "Qwen3VL loading and Qwen3.5 visual-encoder save_pretrained key layout are different regressions." + "reason": "Same family name, but one is a loading failure and the other is incorrect visual-encoder key serialization." }, { "left": "issue:43526", "right": "issue:43901", "accept": false, - "reason": "A BeitImageProcessorFast bug and a docs/behavior mismatch in TextClassificationPipeline are unrelated." + "reason": "Image-processor label reduction bug versus pipeline docs mismatch." }, { "left": "issue:44075", "right": "issue:44368", "accept": false, - "reason": "Ignored SGD optimizer args and a Qwen3.5 tie_word_embeddings warning are not the same bug." + "reason": "Optimizer argument handling and a Qwen warning about tie_word_embeddings are unrelated." }, { "left": "issue:44521", "right": "issue:45325", "accept": false, - "reason": "All-zero assistant masks in multimodal chat templates and Qwen2.5-VL rope-index scaling are separate multimodal issues." + "reason": "Both are multimodal, but assistant-mask generation and rope-index scaling are distinct bugs." }, { "left": "issue:44242", "right": "issue:45200", "accept": false, - "reason": "Missing load-balancing loss and Gemma 4 mm_token_type_ids defaults are different model-training bugs." + "reason": "MoE load-balancing loss logic versus Gemma 4 multimodal token-id defaults; different implementations." }, { "left": "issue:43381", "right": "issue:45593", "accept": false, - "reason": "Gradient checkpointing in eval mode and D-FINE auxiliary-loss behavior are unrelated." + "reason": "Gradient checkpointing eval-mode restriction is unrelated to D-FINE auxiliary-loss behavior." }, { "left": "issue:44351", "right": "issue:45278", "accept": false, - "reason": "44351 is one specific missing import (HybridCache); 45278 is a broad complaint about many import errors after upgrade, not the same concrete failure." + "reason": "Missing HybridCache import may be one symptom of broader breakage, but the reports are not the same concrete bug." }, { "left": "issue:43901", "right": "issue:44496", "accept": false, - "reason": "A pipeline docs mismatch and an unrecognized model/config loading error are unrelated." + "reason": "Docs/behavior mismatch for a pipeline versus a model-loading configuration error." }, { - "left": "issue:43976", - "right": "issue:45405", + "left": "issue:43927", + "right": "issue:45632", "accept": false, - "reason": "Python compatibility and a PEFT minimum-version bump are different dependency/version issues." + "reason": "Save/load token-ID corruption and local trust_remote_code cache collisions are different bugs." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Same as above: documentation mismatch is unrelated to model-type loading failure." }, { "left": "issue:44683", "right": "issue:45542", "accept": false, - "reason": "Compiled flex_attention on torch>=2.9 and a TensorFlow backend error from tensorboard installation are unrelated." + "reason": "Compiled flex_attention on new torch and missing TensorFlow backend are unrelated." }, { "left": "issue:43479", "right": "issue:43525", "accept": false, - "reason": "Different config attributes on different models; both are config bugs, but not the same one." - }, - { - "left": "issue:44462", - "right": "issue:45081", - "accept": false, - "reason": "AutoTokenizer ignoring tokenizer.json and _patch_mistral_regex crashing on backend_tokenizer access are distinct tokenizer-loading bugs." + "reason": "Different config bugs in different models: default subconfig initialization versus missing pad_token_id." } ] }, @@ -9269,6 +9300,7 @@ "issue:44898", "issue:44987", "issue:45020", + "issue:45081", "issue:45276", "issue:45290", "issue:45397", @@ -9277,14 +9309,15 @@ "issue:45593" ], "soft_pairs": [ + "issue:44462|issue:45081", "issue:41950|issue:45020", "issue:44297|issue:44462", "issue:44279|issue:45405", - "issue:41950|issue:43650", "issue:44496|issue:44877", + "issue:41950|issue:43650", "issue:43299|issue:43854", - "issue:44462|issue:44779", "issue:38175|issue:44617", + "issue:44462|issue:44779", "issue:43295|issue:44779", "issue:39692|issue:43901", "issue:43650|issue:45290", @@ -9306,202 +9339,208 @@ "issue:44987|issue:45542", "issue:43653|issue:45593" ], - "prepared_review_unit_hash": "3445cd7f5940f976dde3c5008537684e22c9204532e3ff04c3c50c2944243ef3", + "prepared_review_unit_hash": "6286cb9d9d49ca28b8cf2dd1d3ffeb62630fb73f8b0653d13a787f93e32e0c62", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 28, - "serialized_chars": 35392, - "estimated_input_tokens": 8848, - "estimated_eval_tokens": 17952 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36365, + "estimated_input_tokens": 9092, + "estimated_eval_tokens": 18440 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is heterogeneous: most items are distinct regressions around tokenizers, processors, model loading, quantization, or docs, not one duplicate cluster.", + "summary": "This cluster is broadly mixed: several tokenizer/loading regressions, some multimodal processor/chat-template bugs, and a number of model-specific failures. None of the proposed soft pairs look like true duplicates of the same concrete bug.", "confidence": 0.34, - "canonical_issue_reason": "No single true duplicate stands out from the titles alone. If forced to pick one representative issue, 45020 is the broadest umbrella report about recent transformers versions breaking remote_code models.", + "canonical_issue_reason": "issue:44462 is the clearest representative of the tokenizer/loading regression theme and sits near several other tokenizer-related reports, even though the cluster as a whole is not a tight duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "45020 is the most general, user-facing regression report in the set and best represents the cluster\u2019s overall theme better than the narrower subsystem-specific issues.", + "best_issue_reason": "issue:44462 is the strongest standalone representative: concrete repro, broad enough to anchor the tokenizer-loading subgroup, and more central than the more model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Both involve tokenizers, but one is AutoTokenizer ignoring repository tokenizer.json and the other is a Mistral regex patch crash; different failure modes and code paths." + }, { "left": "issue:41950", "right": "issue:45020", "accept": false, - "reason": "Different bugs: video-classification processor selection vs. remote_code model-loading regressions." + "reason": "Video-classification processor lookup is unrelated to the broad remote_code loading regressions in v5; not the same bug." }, { "left": "issue:44297", "right": "issue:44462", "accept": false, - "reason": "Both concern tokenizers, but one is save_pretrained metadata and the other is repo loading behavior; not the same concrete bug." + "reason": "save_pretrained writing the wrong tokenizer_class and AutoTokenizer ignoring tokenizer.json are related areas, but they are distinct serialization vs loading bugs." }, { "left": "issue:44279", "right": "issue:45405", "accept": false, - "reason": "Unrelated: a generic dependency issue vs. a version pin bump to an unreleased PEFT release." + "reason": "These are generic dependency/version issues affecting different packages and symptoms, not one underlying defect." }, { - "left": "issue:41950", - "right": "issue:43650", + "left": "issue:44496", + "right": "issue:44877", "accept": false, - "reason": "Completely unrelated: video pipeline processor lookup vs. placeholder issue text." + "reason": "Missing model_type in config.json and strict config rejection for granite_speech are different config-loading problems." }, { - "left": "issue:44496", - "right": "issue:44877", + "left": "issue:41950", + "right": "issue:43650", "accept": false, - "reason": "Both are config-loading failures, but for different models and different strictness checks; not one bug." + "reason": "Video pipeline processor mismatch and a placeholder 'ADD THE DATA' issue are clearly unrelated." }, { "left": "issue:43299", "right": "issue:43854", "accept": false, - "reason": "Different model-loading failures for different architectures; no evidence of a shared code-path bug." + "reason": "Both are model loading failures, but they concern different models and likely different root causes." }, { - "left": "issue:44462", - "right": "issue:44779", + "left": "issue:38175", + "right": "issue:44617", "accept": false, - "reason": "Tokenizer loading from repo vs. incorrect Deepseek tokenization are different regressions." + "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to Sam3Video CUDA OOM." }, { - "left": "issue:38175", - "right": "issue:44617", + "left": "issue:44462", + "right": "issue:44779", "accept": false, - "reason": "Unrelated: SigLIP2 probability output vs. Sam3Video CUDA memory usage." + "reason": "Both are tokenizer regressions, but one is loading repository tokenizer.json and the other is incorrect Deepseek tokenization; not enough evidence of one shared bug." }, { "left": "issue:43295", "right": "issue:44779", "accept": false, - "reason": "Custom processor.tokenizer access regression vs. Deepseek tokenizer output regression; distinct failures." + "reason": "Custom code accessing processor.tokenizer and Deepseek tokenizer output regressions are separate issues." }, { "left": "issue:39692", "right": "issue:43901", "accept": false, - "reason": "Both are docs-related, but one is a model/processor mismatch and quantization example issue; the other is stale pipeline docs." + "reason": "Documentation/example errors in different sections and for different APIs; not the same change or bug." }, { "left": "issue:43650", "right": "issue:45290", "accept": false, - "reason": "Placeholder issue text vs. a specific apply_chat_template crash on tool-call assistant messages." + "reason": "A placeholder issue and an apply_chat_template crash with tool-call messages are unrelated." }, { "left": "issue:43723", "right": "issue:44162", "accept": false, - "reason": "Tokenizer loading regression vs. an ESM2 model breakage; unrelated." + "reason": "AutoTokenizer.from_pretrained loading regression and ESM2 model breakage are different failures." }, { "left": "issue:43450", "right": "issue:44279", "accept": false, - "reason": "Video processor shape bug vs. dependency/version issue; not the same underlying change." + "reason": "Video batch-shape bug and a dependency issue do not share a concrete code path." }, { "left": "issue:44016", "right": "issue:44291", "accept": false, - "reason": "Notebook syntax error vs. init_empty_weights loading TypeError; unrelated." + "reason": "Notebook syntax error and init_empty_weights argument mismatch are unrelated." }, { "left": "issue:43262", "right": "issue:44521", "accept": false, - "reason": "Both mention apply_chat_template, but one is audio sampling-rate defaulting and the other is multimodal assistant mask generation." + "reason": "Both mention apply_chat_template, but one is audio sampling-rate defaults and the other is assistant_mask generation for multimodal inputs." }, { "left": "issue:42994", "right": "issue:43317", "accept": false, - "reason": "Different quantization-related failures: saving quantized models vs. loading dequantized models with device_map=auto." + "reason": "Quantized model saving and dequantized model offload loading are different parts of the quantization workflow." }, { "left": "issue:44261", "right": "issue:44263", "accept": false, - "reason": "Precision/config issue in MLA layernorm vs. a torch.split return-value issue in a different model component." + "reason": "One is an MLA layernorm precision/config issue; the other is a torch.split return-value issue in a different model component." }, { "left": "issue:43441", "right": "issue:44683", "accept": false, - "reason": "Different attention-backend problems: FlashAttention failure for a model vs. compiled flex_attention failing on newer torch." + "reason": "FlashAttention failure on Ministral-3 and compiled flex_attention failure on torch>=2.9 are different attention stack problems." }, { "left": "issue:43065", "right": "issue:44898", "accept": false, - "reason": "Unrelated model/component bugs: dummy Conv2d in Sam3PixelDecoder vs. Perceiver image classification resize handling." + "reason": "A dummy Conv2d in Sam3PixelDecoder and a Perceiver resize/interpolation bug are not the same defect." }, { "left": "issue:44466", "right": "issue:45276", "accept": false, - "reason": "Serialization of lm_head.weight vs. resize_token_embeddings not propagating; related area but different concrete bugs." + "reason": "Inconsistent lm_head.weight serialization and resize_token_embeddings not updating Gemma4 embeddings are related to weights, but not the same concrete bug." }, { "left": "issue:44623", "right": "issue:45397", "accept": false, - "reason": "processor.save_pretrained missing files vs. gemma-4 zero3 from_pretrained; different failure modes." + "reason": "processor.save_pretrained missing files and gemma-4 zero3 from_pretrained are separate save/load failures." }, { "left": "issue:43122", "right": "issue:43650", "accept": false, - "reason": "Tokenizer behavior regression vs. placeholder issue text; no overlap." + "reason": "Tokenizer regression across versions and a placeholder issue are unrelated." }, { "left": "issue:44623", "right": "issue:44749", "accept": false, - "reason": "Processor saving bug vs. performance regression during data filtering; unrelated." + "reason": "Missing processor save files and slow filtering after upgrade are not the same issue." }, { "left": "issue:43784", "right": "issue:45542", "accept": false, - "reason": "Import-time NameError from transformers integration vs. tensorboard/TensorFlow backend detection issue." + "reason": "Import-time NameError in sentence-transformers integration and tensorboard/TensorFlow backend mismatch are distinct environment/import problems." }, { "left": "issue:44671", "right": "issue:44779", "accept": false, - "reason": "Different regressions: CamemBERT masked LM predictions vs. Deepseek tokenizer output." + "reason": "CamemBERT masked LM predictions and Deepseek tokenizer outputs are different model-specific regressions." }, { "left": "issue:43673", "right": "issue:43761", "accept": false, - "reason": "Cache-missing generation regression vs. CLIP vision hidden_states regression; unrelated." + "reason": "Generation cache missing in chunked_prefill and CLIPVision hidden_states being None are unrelated model/runtime bugs." }, { "left": "issue:44987", "right": "issue:45542", "accept": false, - "reason": "Model loading under transformers>=5.1.0 vs. tensorboard without TensorFlow backend handling; different areas." + "reason": "A model loading regression for physical-intelligence/fast and a tensorboard backend issue are unrelated." }, { "left": "issue:43653", "right": "issue:45593", "accept": false, - "reason": "Tokenizer special-token registration bug vs. D-FINE auxiliary-loss behavior; unrelated." + "reason": "BigBirdTokenizer special-token registration and D-FINE auxiliary-loss behavior do not share a code path." } ] }, @@ -9583,8 +9622,8 @@ "issue:45561|issue:45563", "issue:43824|issue:43976", "issue:43295|issue:44617", - "issue:43525|issue:43881", "issue:43352|issue:45250", + "issue:43525|issue:43881", "issue:44589|issue:45362", "issue:44188|issue:44336", "issue:41628|issue:43976", @@ -9606,208 +9645,208 @@ "issue:43673|issue:44561", "issue:44162|issue:45200" ], - "prepared_review_unit_hash": "927a8378370efb0e1783a633764ddf08e8dce0b6a876aef02ed14a6fc2ddf28f", + "prepared_review_unit_hash": "f61dbba875ffbaf9e69a4aedb83506d2a76412f177601d95f986609f10d47c76", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36334, - "estimated_input_tokens": 9084, - "estimated_eval_tokens": 18424 + "serialized_chars": 36332, + "estimated_input_tokens": 9083, + "estimated_eval_tokens": 18422 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is not a true duplicate set; it mixes many unrelated Transformers issues (imports, model loading, generation, tokenizer behavior, docs, tests, and backend/runtime problems). I would not merge any of the soft pairs.", - "confidence": 0.91, - "canonical_issue_reason": "issue:43824 is the strongest representative issue here: it has the highest discussion/inbound signal and a concrete, user-facing import regression. That said, the overall set is heterogeneous rather than one coherent bug family.", + "summary": "This cluster is a grab-bag of distinct Transformers issues (API import failures, model-specific load regressions, tokenizer bugs, docs/tests, and training-loss behavior). None of the soft pairs look like the same underlying defect, so all soft edges should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "issue:43824 is the strongest representative of the cluster by impact and discussion volume, but it is not a duplicate of the other reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43824 is the best single issue to anchor the cluster because it is specific, high-impact, and clearly described. It is still only a representative, not a duplicate target for the rest.", + "best_issue_reason": "issue:43824 has the clearest, widely visible user-facing regression and the most reference activity among mostly unrelated issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42175", "right": "issue:43976", "accept": false, - "reason": "Packaging/backend dependency complaint vs a Python-version compatibility bug; different failure modes and fixes." + "reason": "Different problems: missing TensorFlow in the torch extra vs a Python-version import compatibility issue." }, { "left": "issue:43901", "right": "issue:43994", "accept": false, - "reason": "Docs mismatch in TextClassificationPipeline vs a model inference correctness issue; not the same underlying bug." + "reason": "Docs mismatch for `return_all_scores` is unrelated to SigLIP2 model output quality." }, { "left": "issue:44297", "right": "issue:45397", "accept": false, - "reason": "Tokenizer metadata saved wrong vs a zero3 from_pretrained loading failure; unrelated code paths." + "reason": "Tokenizer class metadata bug is unrelated to Gemma-4 ZeRO3 loading failure." }, { "left": "issue:44361", "right": "issue:45593", "accept": false, - "reason": "Tokenizer/task AttributeError vs missing auxiliary losses in D-FINE; different models and mechanisms." + "reason": "MLukeTokenizer task AttributeError and D-FINE auxiliary-loss behavior are different model/code paths." }, { "left": "issue:43475", "right": "issue:44016", "accept": false, - "reason": "SAM 3 video model attribute bug vs a notebook syntax error; clearly unrelated." + "reason": "SAM 3 Video attribute error is unrelated to a notebook syntax error." }, { "left": "issue:45561", "right": "issue:45563", "accept": false, - "reason": "pytest-xdist file race vs stale generation warning; testing infrastructure vs generation logic." + "reason": "pytest-xdist file race and stale generation warning are different testing/runtime bugs." }, { "left": "issue:43824", "right": "issue:43976", "accept": false, - "reason": "Import/export regression for a Qwen2.5 VL symbol vs a Python support/version issue; not the same bug." + "reason": "Import failure for a specific Qwen2.5-VL symbol is unrelated to Python version support." }, { "left": "issue:43295", "right": "issue:44617", "accept": false, - "reason": "Processor/tokenizer API regression vs CUDA OOM in SAM3Video; different symptoms and fixes." + "reason": "Processor/tokenizer regression is unrelated to SAM3 video OOM." }, { - "left": "issue:43525", - "right": "issue:43881", + "left": "issue:43352", + "right": "issue:45250", "accept": false, - "reason": "Missing config attribute in Llama4 vs a separate model-loading failure for GLM-4V; not one code path." + "reason": "A model-specific lack of Flash Attention 2 support is not the same as a generic Flash Attention 2 issue." }, { - "left": "issue:43352", - "right": "issue:45250", + "left": "issue:43525", + "right": "issue:43881", "accept": false, - "reason": "Model does not support Flash Attention 2.0 vs a broad Flash Attention topic issue; not the same concrete problem." + "reason": "Llama4Config missing pad_token_id and GLM-4V loading failure are distinct model-loading bugs." }, { "left": "issue:44589", "right": "issue:45362", "accept": false, - "reason": "Storage serialization/type error vs a Qwen3.5 chat crash; unrelated runtime failures." + "reason": "Float8 storage deserialization failure is unrelated to Qwen3.5 chat crashes." }, { "left": "issue:44188", "right": "issue:44336", "accept": false, - "reason": "torch.compile attention-kernel divergence vs ANSI code output formatting; no overlap in underlying bug." + "reason": "Torch.compile attention-kernel divergence and ANSI code output are unrelated." }, { "left": "issue:41628", "right": "issue:43976", "accept": false, - "reason": "AutoImageProcessor import failure vs Python-version incompatibility; different root causes." + "reason": "AutoImageProcessor import failure is unrelated to the Python 3.9/3.10 compatibility report." }, { "left": "issue:44295", "right": "issue:45397", "accept": false, - "reason": "Position_ids buffer access bug vs distributed loading issue; unrelated behavior." + "reason": "position_ids buffer access bug is unrelated to Gemma-4 ZeRO3 loading." }, { "left": "issue:43901", "right": "issue:44246", "accept": false, - "reason": "Pipeline documentation mismatch vs slow import performance; not the same issue." + "reason": "A docs issue and a slow-import performance issue do not share the same root cause." }, { "left": "issue:42491", "right": "issue:43299", "accept": false, - "reason": "Both involve Qwen MoE/version regressions, but they target different model families and failure points, so they are not safely mergeable." + "reason": "Both mention Qwen3 MoE, but one is LoRA compatibility across branches and the other is a model-loading regression for Qwen3VL MoE; not the same bug." }, { "left": "issue:41950", "right": "issue:44220", "accept": false, - "reason": "Video pipeline selecting image processors vs audio fbank extraction; different subsystems." + "reason": "Video classification pipeline processor lookup and `_torch_extract_fbank_features()` are different subsystems." }, { "left": "issue:44336", "right": "issue:44373", "accept": false, - "reason": "Terminal ANSI formatting bug vs wrong docstring; docs and runtime output are separate concerns." + "reason": "ANSI color output in loading reports is unrelated to a docstring typo." }, { "left": "issue:44242", "right": "issue:45593", "accept": false, - "reason": "Missing load-balancing loss in MoE routing vs missing auxiliary losses in D-FINE; both are loss issues but not the same code path." + "reason": "Different loss paths and different models; both concern losses, but not the same concrete defect." }, { "left": "issue:44462", "right": "issue:45356", "accept": false, - "reason": "AutoTokenizer ignoring tokenizer.json vs tokenizer codec/regex regression; different tokenizer loading bugs." + "reason": "Tokenizer repository file selection and Kimi-K2.5 codec/regression handling are separate tokenizer issues." }, { "left": "issue:44661", "right": "issue:45542", "accept": false, - "reason": "Model-registration template failure vs missing TensorFlow backend dependency; unrelated setup problems." + "reason": "Model registration logic and missing TensorFlow backend installation are unrelated." }, { "left": "issue:44246", "right": "issue:44496", "accept": false, - "reason": "Slow import vs unrecognized model/config loading; different failure modes." + "reason": "Import-performance bug and unrecognized model config error are not the same issue." }, { "left": "issue:36246", "right": "issue:42757", "accept": false, - "reason": "Import error in transformers Qwen2.5 VL export vs missing huggingface_hub symbol; same symptom class, different package and symbol." + "reason": "Different missing symbols from different packages; both are import errors but not the same defect." }, { "left": "issue:36246", "right": "issue:44351", "accept": false, - "reason": "Different missing exports from transformers; superficial similarity only, not one underlying bug." + "reason": "Missing `Qwen2_5_VLImageProcessor` and missing `HybridCache` are unrelated exported-symbol regressions." }, { "left": "issue:43531", "right": "issue:45362", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior vs a Qwen3.5 chat crash; not the same concrete issue." + "reason": "Qwen3-MoE sliding-window behavior and Qwen3.5 chat crashes are distinct failures." }, { "left": "issue:43526", "right": "issue:45003", "accept": false, - "reason": "BEiT label reduction logic bug vs unsafe sys.modules access in modeling_utils; unrelated code paths." + "reason": "A BeitImageProcessorFast label-reduction bug is unrelated to unsafe `sys.modules` access." }, { "left": "issue:44336", "right": "issue:44485", "accept": false, - "reason": "ANSI terminal output bug vs GLM-5 RoPE implementation discussion; no shared failure." + "reason": "ANSI codes in loading_report and GLM-5 RoPE implementation are unrelated." }, { "left": "issue:43673", "right": "issue:44561", "accept": false, - "reason": "Chunked-prefill cache regression vs removal of is_torch_fx_available affecting trust_remote_code; both are version regressions, but not the same fix." + "reason": "Chunked-prefill cache missing and removal of `is_torch_fx_available` break different code paths." }, { "left": "issue:44162", "right": "issue:45200", "accept": false, - "reason": "ESM2 regression impacting workflows vs Gemma 4 mm_token_type_ids defaulting; different models and failures." + "reason": "ESM2 breakage and Gemma 4 mm-token defaults are unrelated model-specific issues." } ] }, @@ -9888,8 +9927,8 @@ "issue:43066|issue:44779", "issue:43441|issue:43540", "issue:45292|issue:45584", - "issue:44987|issue:45459", "issue:44779|issue:45292", + "issue:44987|issue:45459", "issue:41628|issue:41950", "issue:42886|issue:45229", "issue:43065|issue:43408", @@ -9901,210 +9940,210 @@ "issue:43295|issue:45292", "issue:44995|issue:45588", "issue:44060|issue:44077", - "issue:43653|issue:44162", "issue:43262|issue:44246", + "issue:43653|issue:44162", "issue:43475|issue:43540", "issue:44368|issue:45563", "issue:44466|issue:45335", "issue:44368|issue:44977", "issue:44933|issue:45459" ], - "prepared_review_unit_hash": "753cb1695756858fa3cc9a12133a8900da23d25a7a536affddd195d520694a54", + "prepared_review_unit_hash": "24561f8c8bcc96f3effbda4f24549cdd58a4546c0f67ef6b3a408b4788b6bc29", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 28, - "serialized_chars": 35635, - "estimated_input_tokens": 8909, - "estimated_eval_tokens": 18074 + "serialized_chars": 35643, + "estimated_input_tokens": 8911, + "estimated_eval_tokens": 18078 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous: the soft pairs share surface vocabulary (tokenizers, embeddings, generation, import/load paths) but usually describe different concrete bugs. I would not merge any of the suggested soft edges.", - "confidence": 0.89, - "canonical_issue_reason": "If a single issue must anchor the set, issue 45292 is the best fit because it is a concrete, reproducible API regression with broader applicability and external references; however, the overall cluster is not a true duplicate group.", + "summary": "This cluster is mostly heterogeneous: several unrelated Transformers regressions are grouped together, with only a few loose theme overlaps around config/loading, tokenizers, and embedding resizing. I do not see any soft edge that clearly represents the same underlying bug/change.", + "confidence": 0.9, + "canonical_issue_reason": "If a single issue must anchor the cluster, 44877 is the most concrete, self-contained bug report around config loading/strictness; it is the closest fit to the small config-related subset, though the cluster overall is not a true duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45292 is the strongest general-purpose bug report among the items and the most plausible long-term canonical reference for the resize_token_embeddings-related threads.", + "best_issue_reason": "44933 is the best representative for triage because it is open, user-facing, and broad enough to stand in for the import/image-utils subset without overcommitting to an exact duplicate relationship.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44295", "right": "issue:44749", "accept": false, - "reason": "Both mention position/processing behavior, but one is a buffer read error and the other is a tokenizer performance regression; different code paths and symptoms." + "reason": "Different failures: a position_ids buffer access error versus a performance regression after upgrading. No shared code-path bug." }, { "left": "issue:44242", "right": "issue:44857", "accept": false, - "reason": "Load-balancing loss gating in MOE is unrelated to a CUDA float16 crash in a detection loss; no shared underlying bug." + "reason": "One is about router/load-balancing loss handling; the other is a CUDA AMP crash in LwDetrImageLoss. Same broad ML area, not the same bug." }, { "left": "issue:44246", "right": "issue:44871", "accept": false, - "reason": "Import latency and eos_token_id mismatch are unrelated issues despite both being configuration/runtime complaints." + "reason": "Import slowdown is unrelated to the Gemma eos_token_id config mismatch." }, { "left": "issue:44877", "right": "issue:45375", "accept": false, - "reason": "Both involve config parsing/strictness, but they target different model families and different missing-field behavior." + "reason": "Both touch config strictness, but one is a loading failure for granite_speech while the other is a missing field being dropped in Qwen3_5MoeVisionConfig. Similar theme, not same bug." }, { "left": "issue:36246", "right": "issue:43596", "accept": false, - "reason": "One is a missing export/import error, the other is a DeepSpeed ZeRO3 index error in BertModel; no common fix path." + "reason": "Qwen2.5 image processor import error and a ZeRO3/BertModel index error are unrelated." }, { "left": "issue:43066", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer-related, but one is a wrong decoder-type warning and the other is incorrect Deepseek tokenization output; not the same defect." + "reason": "Both are tokenizer regressions in v5, but one is a wrong decoder-type warning and the other is incorrect Deepseek tokenization output. Different concrete issues." }, { "left": "issue:43441", "right": "issue:43540", "accept": false, - "reason": "FlashAttention failure in Ministral and video-input validation in Qwen3OmniMoe are different model paths with different failure modes." + "reason": "FlashAttention failure for Ministral-3 is not the same as Qwen3OmniMoe video input processing error." }, { "left": "issue:45292", "right": "issue:45584", "accept": false, - "reason": "resize_token_embeddings behavior and Whisper empty-transcription generation after align_special_tokens are separate regressions." + "reason": "resize_token_embeddings not updating output embeddings is unrelated to Whisper empty-transcription failure after align_special_tokens." }, { - "left": "issue:44987", - "right": "issue:45459", + "left": "issue:44779", + "right": "issue:45292", "accept": false, - "reason": "Model-loading failure for a specific checkpoint is unrelated to protobuf exception handling in tokenizer decoding." + "reason": "Tokenizer output regression versus embedding resize propagation bug; no shared fix path." }, { - "left": "issue:44779", - "right": "issue:45292", + "left": "issue:44987", + "right": "issue:45459", "accept": false, - "reason": "Tokenizer incorrect results and embedding resize not propagating affect different APIs and likely different fixes." + "reason": "Loading a specific model and masking protobuf decode errors are different problems affecting different parts of the stack." }, { "left": "issue:41628", "right": "issue:41950", "accept": false, - "reason": "AutoImageProcessor import failure and video-classification pipeline misrouting are both image/video related but not the same bug." + "reason": "Importing AutoImageProcessor and a video-classification pipeline picking image processors are only loosely related by image handling, not the same bug." }, { "left": "issue:42886", "right": "issue:45229", "accept": false, - "reason": "Offline cache loading for tokenizers is unrelated to Gemma4 multi-GPU OOM." + "reason": "Offline cache loading and multi-GPU CUDA OOM are different failure classes." }, { "left": "issue:43065", "right": "issue:43408", "accept": false, - "reason": "A dummy Conv2d in SAM3 pixel decoder is not the same as a model-type mismatch warning for sam3_video vs sam3_tracker." + "reason": "SAM 3 pixel decoder dummy Conv2d and a model-type warning for sam3_video vs sam3_tracker are not the same underlying issue." }, { "left": "issue:44995", "right": "issue:45200", "accept": false, - "reason": "A stale cache crash on second forward pass is unrelated to mm_token_type_ids defaulting for text-only fine-tuning." + "reason": "A stale second-forward cache crash and missing mm_token_type_ids defaults are different bugs." }, { "left": "issue:43901", "right": "issue:45003", "accept": false, - "reason": "Documentation drift for return_all_scores and unsafe sys.modules access are unrelated." + "reason": "Docs mismatch for return_all_scores is unrelated to unsafe sys.modules access in modeling_utils." }, { "left": "issue:43232", "right": "issue:44336", "accept": false, - "reason": "Generation kwargs after sync_gpus and stray ANSI output in loading_report do not share a code path." + "reason": "Generation kwargs after sync_gpus and ANSI codes in loading_report are unrelated code paths." }, { "left": "issue:43257", "right": "issue:45137", "accept": false, - "reason": "Qwen3 MOE weight conversion under accelerate/deepspeed and a ZeRO3 deque underflow are separate distributed-training bugs." + "reason": "Qwen3 MOE weight conversion under accelerate+deepspeed and a ZeRO3 deque underflow are different DeepSpeed-related bugs." }, { "left": "issue:44496", "right": "issue:45003", "accept": false, - "reason": "Unrecognized model/config loading and unsafe sys.modules access are different model-loading failures." + "reason": "Unrecognized model/config loading and unsafe sys.modules access are distinct problems." }, { "left": "issue:43295", "right": "issue:45292", "accept": false, - "reason": "Processor.tokenizer regression is about multimodal processor API behavior, while 45292 is about token embedding resizing." + "reason": "Processor.tokenizer regression and resize_token_embeddings propagation bug are not the same issue." }, { "left": "issue:44995", "right": "issue:45588", "accept": false, - "reason": "A stale indexer cache in GLM MOE is unrelated to flash_attention.py crashing on s_aux=None." + "reason": "Stale indexer cache crash and flash_attention s_aux=None AttributeError are unrelated." }, { "left": "issue:44060", "right": "issue:44077", "accept": false, - "reason": "Tied-weights warning behavior and patchtsmixer post_init policy are not the same underlying issue." + "reason": "Both are tied to model config warnings, but one is about incorrect tied-weight warnings and the other about post_init being optional for patchtsmixer. Not the same change." }, { - "left": "issue:43653", - "right": "issue:44162", + "left": "issue:43262", + "right": "issue:44246", "accept": false, - "reason": "BigBirdTokenizer special-token registration and ESM2 breakage are different tokenizer/model regressions." + "reason": "Audio chat-template sampling-rate behavior and import slowness have no shared bug surface." }, { - "left": "issue:43262", - "right": "issue:44246", + "left": "issue:43653", + "right": "issue:44162", "accept": false, - "reason": "Audio chat-template sampling-rate defaulting and import latency are unrelated." + "reason": "BigBirdTokenizer special-token registration and ESM2 breakage are unrelated." }, { "left": "issue:43475", "right": "issue:43540", "accept": false, - "reason": "Missing fpn_position_embeddings in SAM3 video output is a different bug from Qwen3OmniMoe video input processing." + "reason": "Missing fpn_position_embeddings in SAM 3 video output and Qwen3OmniMoe video-input processing errors are different model-specific bugs." }, { "left": "issue:44368", "right": "issue:45563", "accept": false, - "reason": "A tied-embeddings warning in Qwen3.5 and a stale warning for num_return_sequences are different warning-regression reports." + "reason": "A tied-embeddings warning and a stale num_return_sequences warning in paged generate are unrelated warnings." }, { "left": "issue:44466", "right": "issue:45335", "accept": false, - "reason": "Both concern embedding/tied-weight behavior, but one is serialization inconsistency and the other is resize_token_embeddings not updating decoder.embed_tokens; close but not the same bug." + "reason": "Serialization of lm_head.weight depending on device and t5gemma resize_token_embeddings not affecting decoder.embed_tokens are different tied-weights/resize issues." }, { "left": "issue:44368", "right": "issue:44977", "accept": false, - "reason": "A tied-weights warning and a FlashAttention generation failure in Qwen3.5 are separate regressions." + "reason": "Tie-word-embeddings warning versus Qwen3.5 flash-attention generation failure are unrelated." }, { "left": "issue:44933", "right": "issue:45459", "accept": false, - "reason": "Missing import from image_utils and protobuf-related tokenizer exception masking are unrelated import/error-handling bugs." + "reason": "An import from image_utils missing entirely and protobuf-related tokenizer error masking are different failure modes." } ] }, @@ -10210,20 +10249,20 @@ "issue:45362|issue:45375", "issue:41628|issue:44351" ], - "prepared_review_unit_hash": "a5a66186dc29a33786f6f247e33ebae4fd65af52400a556c82617e046374a327", + "prepared_review_unit_hash": "0fa2ca43eaf693f982e04d6749bcdce67a4b71c500c55137401d56dc2550ef23", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 36663, + "serialized_chars": 36661, "estimated_input_tokens": 9166, "estimated_eval_tokens": 18588 }, @@ -10232,213 +10271,195 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly a grab-bag of unrelated Transformers issues. Only one soft pair is a true duplicate: the Qwen3ForCausalLM VRAM leak reports. The rest are same-subsystem or same-version regressions at best, but not the same underlying bug.", - "confidence": 0.95, - "canonical_issue_reason": "issue:42673 is the better canonical because issue:43704 is an exact duplicate of the same Qwen3ForCausalLM VRAM leak in multiple dataloader threads, and 42673 has the fuller report and more discussion/references.", + "summary": "Mostly noisy cluster with two clear duplicate subclusters: the Qwen3ForCausalLM VRAM leak pair (42673/43704) and the Qwen3.5-Moe from_pretrained error pair (45310/45478). The remaining soft edges look like broader version-regression overlaps rather than the same concrete bug.", + "confidence": 0.96, + "canonical_issue_reason": "Issue 45310 is the cleanest concrete bug report to anchor the Qwen3.5-Moe from_pretrained failure, and 45478 is an exact duplicate with the same title and failure mode.", "canonical_pr_reason": null, - "best_issue_reason": "Given the cluster is heterogeneous, issue:42673 is the strongest representative only because it is backed by a true duplicate pair; none of the other issues are close enough to serve as a broader cluster canonical.", + "best_issue_reason": "45310 is the best representative issue because it is specific and reproducible, with exact duplicate 45478 confirming the same bug. The cluster also contains a separate exact duplicate subcluster at 42673/43704.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43673", "right": "issue:44734", "accept": false, - "reason": "Both mention generation/cache behavior, but one is chunked_prefill cache handling and the other is /v1/responses KV-cache continuation indexing; different code paths." + "reason": "Both involve generation/cache behavior, but they describe different failures and code paths." }, { "left": "issue:44869", "right": "issue:45356", "accept": false, - "reason": "Both involve tokenizer/codec edge cases, but they are different models and different failures; no shared underlying bug is evident." + "reason": "Whisper timestamp decoding and Kimi tokenizer regression handling are unrelated bugs." }, { "left": "issue:45137", "right": "issue:45600", "accept": false, - "reason": "DeepSpeed ZeRO3 deque failure and auto_mappings CI breakage are unrelated bugs." + "reason": "DeepSpeed ZeRO3 deque error and removed Sam3LiteText config references are different issues." }, { "left": "issue:43931", "right": "issue:44368", "accept": false, - "reason": "A weight-shape mismatch and a warning about tie_word_embeddings during LoRA fine-tuning are not the same issue." + "reason": "Model shape mismatch and a tie_word_embeddings warning are not the same bug." }, { "left": "issue:43232", "right": "issue:43673", "accept": false, - "reason": "Both are generation-related, but one is sync_gpus/_update_model_kwargs_for_generation and the other is chunked_prefill cache missing; not the same defect." + "reason": "One is a sync_gpus kwargs problem; the other is a missing cache issue in chunked_prefill." }, { "left": "issue:44297", "right": "issue:44623", "accept": false, - "reason": "Tokenizer save_pretrained metadata mismatch versus processor.save_pretrained missing files are distinct save-path bugs." + "reason": "Tokenizer metadata mismatch and missing processor files are distinct save_pretrained failures." }, { "left": "issue:43673", "right": "issue:45278", "accept": false, - "reason": "A specific generation cache bug is not the same as a broad set of import errors after upgrading." + "reason": "A specific generation/cache regression is not the same as broad import errors after upgrade." }, { "left": "issue:43646", "right": "issue:44162", "accept": false, - "reason": "Generic custom model initialization breakage and the ESM2-specific regression are not the same underlying problem." + "reason": "Custom model initialization breakage is broader than the ESM2-specific failure described here." }, { "left": "issue:44361", "right": "issue:44749", "accept": false, - "reason": "MLukeTokenizer task AttributeError and a performance slowdown after upgrade are unrelated." + "reason": "Tokenizer AttributeError and data filtering slowdown are unrelated." }, { "left": "issue:42673", "right": "issue:43704", "accept": true, - "reason": "Exact same Qwen3ForCausalLM VRAM leak in multiple dataloader threads; 43704 is a duplicate of 42673." + "reason": "Exact same title and same Qwen3ForCausalLM multi-thread VRAM leak; this is a clear duplicate." }, { "left": "issue:28282", "right": "issue:30990", "accept": false, - "reason": "Missing PyTorch import error and a Sentence Transformers loading hang are different failure modes." + "reason": "Missing PyTorch ImportError and Sentence Transformers hanging while loading are different problems." }, { "left": "issue:44749", "right": "issue:45310", "accept": false, - "reason": "Performance regression after upgrading is unrelated to a Qwen3.5 Moe from_pretrained error." + "reason": "Performance regression in filtering data is unrelated to the Qwen3.5-Moe from_pretrained error." }, { "left": "issue:45230", "right": "issue:45397", "accept": false, - "reason": "Generic placeholder bug report and gemma-4 zero3 loading failure are not the same bug." + "reason": "Generic bug report vs a specific gemma-4 zero3 from_pretrained failure; not the same bug." }, { "left": "issue:44038", "right": "issue:44623", "accept": false, - "reason": "Qwen3-VL-Moe loading failure and processor.save_pretrained missing files touch different code paths." + "reason": "Qwen3-VL-Moe/transformers v5 and missing processor save files are different issues." }, { "left": "issue:42994", "right": "issue:43482", "accept": false, - "reason": "Quantized model saving failure is distinct from GGUF loading failure." + "reason": "Quantized model saving failure and Qwen2.5-GGUF loading failure are separate paths." }, { "left": "issue:43701", "right": "issue:44749", "accept": false, - "reason": "Checkpoint resume key mismatch and a throughput regression are unrelated." + "reason": "Checkpoint key mismatch is unrelated to the data-processing slowdown report." }, { "left": "issue:42371", "right": "issue:45092", "accept": false, - "reason": "TF32 API-setting guidance and an old remote-code checkpoint/meta-init incompatibility are different issues." + "reason": "TF32 API behavior and remote-code checkpoint meta-init incompatibility are different concerns." }, { "left": "issue:43475", "right": "issue:45381", "accept": false, - "reason": "SAM 3 Video missing attribute and qwen2.5-vl video position-id bug are different model-specific regressions." + "reason": "SAM 3 Video attribute error and qwen2.5-vl wrong vision_position_ids are different model-specific bugs." }, { "left": "issue:45375", "right": "issue:45478", "accept": false, - "reason": "A missing Qwen3_5MoeVisionConfig field dropped by strict handling is not the same as a generic Qwen3.5 Moe from_pretrained error." + "reason": "Both touch Qwen3.5/Moe config handling, but one is a missing vision field and the other is a generic from_pretrained failure." }, { "left": "issue:43475", "right": "issue:43643", "accept": false, - "reason": "SAM 3 Video attribute access failure and trust_remote_code missing-fields behavior are unrelated." + "reason": "Different failure classes: SAM 3 Video attribute error vs missing fields from trust_remote_code AutoConfig." }, { "left": "issue:41628", "right": "issue:44933", "accept": false, - "reason": "Missing AutoImageProcessor import and a different nonexistent import from image_utils are not clearly the same bug." + "reason": "Both are import issues, but they concern different modules and different missing symbols." }, { "left": "issue:45081", "right": "issue:45103", "accept": false, - "reason": "Tokenizer regex patch crash and a future-annotations docstring helper crash are separate AttributeError sites." + "reason": "Tokenizer regex patch crash and auto_docstring __future__ annotations crash are unrelated." }, { "left": "issue:45310", "right": "issue:45375", "accept": false, - "reason": "Qwen3.5 Moe from_pretrained error and a missing vision-config field are different failures, even if both involve Qwen configs." + "reason": "Related only at a broad Qwen3.5/Moe family level, not the same concrete bug." }, { "left": "issue:42994", "right": "issue:43525", "accept": false, - "reason": "Quantized save failure and missing pad_token_id on Llama4Config are unrelated." + "reason": "Quantized model saving failure is not the same as Llama4Config missing pad_token_id." }, { "left": "issue:42222", "right": "issue:45200", "accept": false, - "reason": "VitPose model breakage and Gemma 4 text-only fine-tuning token-type defaults are different model issues." + "reason": "vitpose breakage and Gemma 4 mm_token_type_ids defaulting are unrelated regressions." }, { "left": "issue:43873", "right": "issue:44945", "accept": false, - "reason": "Quantization/offloading behavior and pipeline-parallelism output corruption are different problems." + "reason": "Quantization offloading and pipeline-parallel output corruption are different code paths." }, { "left": "issue:43475", "right": "issue:44661", "accept": false, - "reason": "SAM 3 Video attribute failure and add-new-model-like tokenizer-mapping failure are not the same code path." + "reason": "SAM 3 Video attribute error is unrelated to add-new-model-like failing for TOKENIZER_MAPPING_NAMES." }, { "left": "issue:42831", "right": "issue:44222", "accept": false, - "reason": "FineGrainedFP8 accuracy degradation and FP8 save_pretrained bugs are different FP8-related issues." - }, - { - "left": "issue:45310", - "right": "issue:45397", - "accept": false, - "reason": "Qwen3.5 Moe loading error and gemma-4 zero3 loading error are separate model regressions." - }, - { - "left": "issue:45362", - "right": "issue:45375", - "accept": false, - "reason": "Qwen3.5-35B chat crash and missing deepstack_visual_indexes in a vision config are unrelated." + "reason": "FP8 accuracy degradation and FP8 save_pretrained failure are different bugs." }, { "left": "issue:41628", "right": "issue:44351", "accept": false, - "reason": "AutoImageProcessor import failure and HybridCache import failure are different missing-symbol regressions." - }, - { - "left": "issue:45081", - "right": "issue:45356", - "accept": false, - "reason": "Mistral regex patch crash and Kimi-K2.5 tokenizer regression are distinct tokenizer bugs." + "reason": "Missing AutoImageProcessor and missing HybridCache are separate import regressions." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet, and the duplicate call on issue:42673/issue:43704 is supported by the identical title and report pattern. The remaining soft-edge rejections stay conservative and do not overclaim shared root causes." + "accept": false, + "feedback": "The analysis is mostly conservative, but the summary is not fully grounded: it says the only clear duplicate pair is 42673/43704, while the packet also shows an exact duplicate pair for 45310/45478 (same title and same Qwen3.5-Moe from_pretrained failure). That omission makes the cluster summary imprecise. The issue anchor choice is otherwise reasonable, but the summary should acknowledge both clear duplicate pairs." }, "evaluator_used": true, - "retried": false, + "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -10528,8 +10549,8 @@ "issue:44623|issue:45310", "issue:44188|issue:44373", "issue:44060|issue:45125", - "issue:43723|issue:44568", "issue:42898|issue:45356", + "issue:43723|issue:44568", "issue:43278|issue:45137", "issue:44734|issue:45103", "issue:42898|issue:44779", @@ -10539,226 +10560,226 @@ "issue:41628|issue:45459", "issue:44222|issue:44297" ], - "prepared_review_unit_hash": "dc37dc9fd1922b47509c9e4ff77430137fea80f305545fbfe8adb410c1514b9e", + "prepared_review_unit_hash": "349a280c5afd3e77286169a7ac8d68862baad2fb5b29f994dfca59b4bb36ed78", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 32, - "serialized_chars": 37314, - "estimated_input_tokens": 9329, - "estimated_eval_tokens": 18914 + "serialized_chars": 37317, + "estimated_input_tokens": 9330, + "estimated_eval_tokens": 18916 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is a set of mostly unrelated transformer bugs and regressions; none of the soft pairs look like the same underlying issue, so no duplicate merges are justified.", - "confidence": 0.93, - "canonical_issue_reason": "Issue 42898 is the best single representative because it is a clear, user-facing v5 tokenizer regression with a broad and well-scoped description; however, it does not subsume the other reports.", + "summary": "This cluster is mostly heterogeneous; the soft edges are largely false positives from broad v5/model-loading/tokenizer similarity rather than true duplicates. I would not merge any of the candidate pairs.", + "confidence": 0.82, + "canonical_issue_reason": "issue:42898 is the closest thematic anchor: it is a broad tokenizer-regression report and has multiple nearby tokenizer-related soft matches, even though the cluster as a whole is not a real duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 42898 is the strongest anchor for this cluster if one must be chosen: it is concrete, reproducible, and broadly representative of the v5 behavior-change reports.", + "best_issue_reason": "issue:42898 is the best representative issue for this cluster because it is the most central broad regression report among the candidates, but it should still be treated as a loose topical anchor rather than a duplicate canonical.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43232", "right": "issue:44188", "accept": false, - "reason": "Both mention generation internals, but one is about sync_gpus/update kwargs and the other about torch.compile branching in attention kernels; different code paths and symptoms." + "reason": "Different failure modes: generation kwargs after sync_gpus vs torch.compile attention-kernel branching." }, { "left": "issue:44222", "right": "issue:45310", "accept": false, - "reason": "Both involve MoE/FP8 wording, but one is about save_pretrained for FP8 and the other is a from_pretrained loading error for Qwen3.5 MoE; not the same bug." + "reason": "Both mention MoE/save-pretrained/from-pretrained, but one is FP8 save serialization and the other is a Qwen3.5 load error." }, { "left": "issue:42994", "right": "issue:44164", "accept": false, - "reason": "One is about quantized model saving failure, the other about save/from_pretrained handling extra_state; related persistence area, but distinct failure modes and fixes." + "reason": "Both concern saving/loading, but one is quantized-model saving and the other is extra_state handling in save/from_pretrained." }, { "left": "issue:44038", "right": "issue:45310", "accept": false, - "reason": "Qwen3-VL-Moe transformers v5 compatibility is a different model-loading issue from Qwen3.5 MoE from_pretrained errors." + "reason": "Same model family, but these appear to be different Qwen MoE loading bugs in different code paths." }, { "left": "issue:43232", "right": "issue:44792", "accept": false, - "reason": "Generation cache/update logic is unrelated to a failed janus image-generation test." + "reason": "Unrelated subsystems: generation bookkeeping vs a janus image-generation test failure." }, { "left": "issue:44496", "right": "issue:45561", "accept": false, - "reason": "Model config auto-discovery and pytest-xdist file races are unrelated." + "reason": "Unrelated: model config recognition vs a pytest-xdist file race." }, { "left": "issue:43526", "right": "issue:45561", "accept": false, - "reason": "A BeitImageProcessorFast label-reduction bug is unrelated to a testing-utils race on captured_info.txt." + "reason": "Unrelated: Beit label reduction bug vs captured_info.txt race." }, { "left": "issue:43377", "right": "issue:44242", "accept": false, - "reason": "Missing padding-mask support in MIMI encoder is unrelated to MoE load-balancing loss gating." + "reason": "Different model behavior bugs: missing padding-mask support vs load-balancing loss gating." }, { "left": "issue:43504", "right": "issue:45561", "accept": false, - "reason": "A legacy-field model preset loading bug is unrelated to a test-worker file race." + "reason": "Unrelated: legacy preset loading vs test utility file race." }, { "left": "issue:36010", "right": "issue:41628", "accept": false, - "reason": "Both are import errors, but they concern different symbols and modules: GenerationMixin vs AutoImageProcessor." + "reason": "Both are import errors, but for different symbols and likely different API breakages." }, { "left": "issue:43673", "right": "issue:44188", "accept": false, - "reason": "Chunked-prefill cache handling and compile-time attention-kernel branching are different generation bugs." + "reason": "Different code paths: GenerationMixin cache/chunked prefill vs attention kernel divergence under torch.compile." }, { "left": "issue:30990", "right": "issue:35141", "accept": false, - "reason": "Sentence Transformers loading hangs and token-embedding resizing reinitialization are unrelated." + "reason": "Unrelated: a loading hang vs embedding reinitialization after resize." }, { "left": "issue:42898", "right": "issue:44568", "accept": false, - "reason": "Both are tokenizer v5 regressions, but one is about clean_up_tokenization_spaces and the other about add_special_tokens/BOS-EOS behavior; different semantics and code paths." + "reason": "Both tokenizer regressions in v5, but one is clean-up-space behavior and the other is special-token insertion." }, { "left": "issue:43479", "right": "issue:44079", "accept": false, - "reason": "Default multimodal config initialization and ModelOutput key assignment are unrelated." + "reason": "Unrelated: multimodal config defaults vs ModelOutput key assignment." }, { "left": "issue:43479", "right": "issue:44869", "accept": false, - "reason": "Default multimodal config initialization is unrelated to Whisper word-timestamp decoding on trailing replacement characters." + "reason": "Different areas: multimodal config initialization vs Whisper timestamp decoding crash." }, { "left": "issue:43976", "right": "issue:44933", "accept": false, - "reason": "Python version compatibility and a missing image_utils import are different issues with no shared underlying bug." + "reason": "Unrelated: Python-version incompatibility vs missing image_utils import." }, { "left": "issue:42175", "right": "issue:44933", "accept": false, - "reason": "Backend dependency installation and a nonexistent image_utils import are unrelated." + "reason": "Unrelated: backend extras packaging vs missing import symbol." }, { "left": "issue:44964", "right": "issue:45137", "accept": false, - "reason": "Phi-4 multimodal loading and DeepSpeed ZeRO3 deque underflow are different failures." + "reason": "Unrelated: Phi-4-multimodal loading vs DeepSpeed ZeRO3 deque error." }, { "left": "issue:43784", "right": "issue:43824", "accept": false, - "reason": "Both are import failures, but one is a NameError during sentence-transformers import and the other is a missing Qwen2.5-VL class export." + "reason": "Both are import failures, but they involve different missing names and different packages/models." }, { "left": "issue:44623", "right": "issue:45310", "accept": false, - "reason": "Processor save_pretrained missing files is unrelated to Qwen3.5 MoE from_pretrained loading errors." + "reason": "Both involve pretrained I/O, but one is processor save-pretrained missing files and the other is a Qwen MoE load failure." }, { "left": "issue:44188", "right": "issue:44373", "accept": false, - "reason": "A generation-kernel divergence bug is unrelated to a wrong docstring for position_ids." + "reason": "Unrelated: kernel divergence bug vs docstring typo." }, { "left": "issue:44060", "right": "issue:45125", "accept": false, - "reason": "Both mention Qwen3 MoE/tensor parallelism, but one is a tied-weights warning bug and the other is a missing _tp_plan attribute; different concrete problems." + "reason": "Same model family, but tied-weights warning and missing _tp_plan are different issues." }, { - "left": "issue:43723", - "right": "issue:44568", + "left": "issue:42898", + "right": "issue:45356", "accept": false, - "reason": "Tokenizer loading in v5 and special-token addition behavior are distinct tokenizer issues." + "reason": "Both are tokenizer regressions, but they affect different models and different tokenizer internals." }, { - "left": "issue:42898", - "right": "issue:45356", + "left": "issue:43723", + "right": "issue:44568", "accept": false, - "reason": "Both are tokenizer regressions, but one is clean_up_tokenization_spaces and the other is Kimi-K2.5 codec handling with a misleading warning; not the same defect." + "reason": "Both are tokenizer loading/behavior issues, but not the same concrete bug." }, { "left": "issue:43278", "right": "issue:45137", "accept": false, - "reason": "Embedding dtype drift in evaluate and a ZeRO3 deque pop error are unrelated." + "reason": "Different problem domains: dtype drift in evaluate vs DeepSpeed queue underflow." }, { "left": "issue:44734", "right": "issue:45103", "accept": false, - "reason": "KV-cache continuation indexing in serving and an auto_docstring AttributeError from future annotations are unrelated." + "reason": "Unrelated: server KV-cache continuation crash vs auto_docstring annotation parsing." }, { "left": "issue:42898", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer regressions in v5, but they affect different models and different mechanisms: cleanup spaces versus DeepSeek codec output." + "reason": "Both are tokenizer regressions in v5, but they affect different tokenizers/models and different failure mechanisms." }, { "left": "issue:43874", "right": "issue:45092", "accept": false, - "reason": "A missing image-patch counting method and an old InternVL2 meta-init incompatibility are different multimodal-loading issues." + "reason": "Both are multimodal-related, but one is a missing image-patches method and the other is a meta-init compatibility issue." }, { "left": "issue:30333", "right": "issue:33290", "accept": false, - "reason": "MLflow job status reporting and Deepspeed/Adafactor OOM are unrelated training issues." + "reason": "Unrelated: MLflow reporting on failed training jobs vs AdaFactor/deepspeed OOM." }, { "left": "issue:42898", "right": "issue:44448", "accept": false, - "reason": "A tokenizer cleanup regression and a Pegasus output mismatch across v4/v5 are different behavior changes." + "reason": "Both mention v4-v5 behavior differences, but one is tokenizer cleanup and the other is Pegasus generation output drift." }, { "left": "issue:41628", "right": "issue:45459", "accept": false, - "reason": "AutoImageProcessor import failure and protobuf-related tokenizer error masking are different import/runtime issues." + "reason": "Unrelated: missing AutoImageProcessor import vs protobuf decode-error masking." }, { "left": "issue:44222", "right": "issue:44297", "accept": false, - "reason": "FP8 MoE save_pretrained behavior and tokenizer_class metadata mismatch are unrelated save-path bugs." + "reason": "Both involve save_pretrained, but one is MoE FP8 model saving and the other is tokenizer metadata serialization." } ] }, @@ -10851,11 +10872,11 @@ "issue:28282|issue:30333", "issue:44295|issue:44623", "issue:44514|issue:45103", - "issue:43723|issue:44871", "issue:43479|issue:44913", + "issue:43723|issue:44871", "issue:44261|issue:45092", - "issue:43723|issue:44991", "issue:44373|issue:44485", + "issue:43723|issue:44991", "issue:43723|issue:44442", "issue:45092|issue:45357", "issue:30990|issue:38175", @@ -10866,238 +10887,238 @@ "issue:42757|issue:45278", "issue:43576|issue:43784" ], - "prepared_review_unit_hash": "de99eca2efe21f3403b8e850e71dda177ea26d57ce219d326a0714d3373d2da0", + "prepared_review_unit_hash": "8b516a83ef3993a2ebe5256072682c35a2936efaa38febd18f71151eabbb0f2f", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 34, - "serialized_chars": 37052, - "estimated_input_tokens": 9263, - "estimated_eval_tokens": 18782 + "serialized_chars": 37055, + "estimated_input_tokens": 9264, + "estimated_eval_tokens": 18784 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous: most items are unrelated regressions or import/load failures in different subsystems, so the soft duplicate links are not strong enough to merge. No PRs are present.", - "confidence": 0.95, - "canonical_issue_reason": "No single canonical issue cleanly subsumes the cluster. The closest thematic anchor is issue:43723 (tokenizer loading in v5), but the surrounding items span unrelated bugs and are not duplicate-equivalent.", + "summary": "This cluster is heterogeneous: most items are separate Transformers regressions/import issues rather than true duplicates. The closest thing to an umbrella issue is 45278, but it only loosely represents the group.", + "confidence": 0.42, + "canonical_issue_reason": "issue:45278 is the broadest and most central user-facing regression report in the set (many import errors after upgrading to v5.5.0), so it is the best loose anchor if a single issue must represent the cluster.", "canonical_pr_reason": null, - "best_issue_reason": "If one issue must represent the set, issue:43723 is the closest representative because it captures the broad tokenizer-loading regression theme seen in a few nearby reports, but it still should not be treated as a duplicate hub.", + "best_issue_reason": "issue:45278 is the most general, current, and cluster-like issue; it can serve as a high-level representative, but it should not be treated as a strict duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42898", "right": "issue:43295", "accept": false, - "reason": "Both are v5 regression-style issues, but one is about `clean_up_tokenization_spaces` behavior and the other about `processor.tokenizer` access; different code paths and fixes." + "reason": "Both are v5 regressions around tokenization/processor behavior, but they describe different failures and different code paths." }, { "left": "issue:43479", "right": "issue:43644", "accept": false, - "reason": "Different problems: config default initialization for Phi4Multimodal vs non-persistent buffer garbage in Transformers 5.0." + "reason": "One is config initialization for Phi4MultimodalConfig; the other is non-persistent buffers in v5. Different bugs." }, { "left": "issue:44442", "right": "issue:45459", "accept": false, - "reason": "Both involve tokenizer-related failures, but one is AutoTokenizer loading a specific tokenizer class and the other is protobuf error masking; not the same bug." + "reason": "Different tokenizer-loading failures with different causes; sharing 'load tokenizer' is too broad." }, { "left": "issue:44222", "right": "issue:44623", "accept": false, - "reason": "Both mention saving, but one is FP8/MoE `save_pretrained` and the other is missing processor save files; different save paths." + "reason": "FP8 MoE save_pretrained and processor.save_pretrained missing files are unrelated save-path bugs." }, { "left": "issue:42831", "right": "issue:44623", "accept": false, - "reason": "Accuracy loss in FineGrainedFP8 is unrelated to processor save file omissions." + "reason": "Accuracy issues in FineGrainedFP8 do not match processor save_pretrained file emission failures." }, { "left": "issue:44206", "right": "issue:45103", "accept": false, - "reason": "One is a feature extractor regression with unsupported `center`; the other is an auto-docstring annotation crash." + "reason": "A feature-extractor argument regression and an auto_docstring annotation crash are unrelated." }, { "left": "issue:41628", "right": "issue:42757", "accept": false, - "reason": "Both are import errors, but they concern different missing symbols from different packages and likely different dependency/version issues." + "reason": "Both are import errors, but for different missing symbols and different underlying dependency problems." }, { "left": "issue:44297", "right": "issue:45310", "accept": false, - "reason": "Tokenizer config mismatch on save is not the same as Qwen3.5 MoE `from_pretrained` loading failure." + "reason": "Tokenizer config mismatch on save_pretrained and Qwen3.5 MoE from_pretrained failure are different issues." }, { "left": "issue:22355", "right": "issue:28282", "accept": false, - "reason": "Different import failures: missing `transformers.onnx` vs missing PyTorch for `AutoModel`." + "reason": "Both are import errors, but one is missing transformers.onnx and the other is missing PyTorch; not the same bug." }, { "left": "issue:44222", "right": "issue:44484", "accept": false, - "reason": "One is a concrete FP8 MoE save failure; the other is a question about `max_shard_size` defaults." + "reason": "Both mention save_pretrained, but one is FP8 MoE checkpoint saving and the other is max_shard_size behavior." }, { "left": "issue:44483", "right": "issue:45406", "accept": false, - "reason": "Chat-completions request rejection is unrelated to Gemma4Processor missing `_tokenizer` in serve." + "reason": "Chat/completions request handling and Gemma4Processor _tokenizer attribute errors are different failures." }, { "left": "issue:43723", "right": "issue:44977", "accept": false, - "reason": "Both are tokenizer/model-loading regressions in v5, but one is generic `AutoTokenizer` loading and the other is Qwen3.5 flash-attention generation; not the same code-path bug." + "reason": "AutoTokenizer loading regression and Qwen3.5 flash-attention generation issue are unrelated." }, { "left": "issue:44263", "right": "issue:44360", "accept": false, - "reason": "Both mention GLM DSA internals, but one is a `torch.split` return-value issue and the other is a missing ReLU in the indexer." + "reason": "torch.split return handling and missing ReLU in the DSA indexer are distinct implementation bugs." }, { "left": "issue:30333", "right": "issue:36331", "accept": false, - "reason": "MLflow reporting on failed jobs is unrelated to the trainer `compute_loss(num_items_in_batch)` signature break." + "reason": "MLFlow reporting on failed training jobs and CustomTrainer compute_loss signature mismatch are unrelated." }, { "left": "issue:44263", "right": "issue:45092", "accept": false, - "reason": "GLM indexer behavior and InternVL2 meta-initialization incompatibility are different bugs." + "reason": "DSA indexer split handling and remote-code/meta-init incompatibility are not the same code path." }, { "left": "issue:30333", "right": "issue:34689", "accept": false, - "reason": "Training-job reporting with MLflow is unrelated to Llama 3.2 Vision model-loading regression." + "reason": "MLFlow job state reporting and Llama 3.2 Vision model loading breakage are unrelated." }, { "left": "issue:30333", "right": "issue:35141", "accept": false, - "reason": "Failed-job reporting has nothing to do with output embeddings being reinitialized after resizing token embeddings." + "reason": "Training-job reporting and embedding reinitialization on resize are different bugs." }, { "left": "issue:28282", "right": "issue:30333", "accept": false, - "reason": "PyTorch-missing import error and MLflow failed-job reporting are unrelated." + "reason": "ImportError for AutoModel/PyTorch and MLFlow reporting of failed jobs are not the same issue." }, { "left": "issue:44295", "right": "issue:44623", "accept": false, - "reason": "Registering `position_ids` as a buffer and processor save-file omissions are unrelated." + "reason": "Registering position_ids as a buffer and missing processor.save_pretrained files are unrelated." }, { "left": "issue:44514", "right": "issue:45103", "accept": false, - "reason": "Qwen2_5_VL batched chat-template crash and auto-docstring annotation crash are unrelated." + "reason": "Qwen2_5_VLProcessor chat template batching crash and auto_docstring annotation crash are unrelated." }, { - "left": "issue:43723", - "right": "issue:44871", + "left": "issue:43479", + "right": "issue:44913", "accept": false, - "reason": "Tokenizer loading in v5 and Gemma-3 EOS token config inconsistency are different issues." + "reason": "Multimodal config default initialization and GPTNeoX rotary_pct reload behavior affect different components." }, { - "left": "issue:43479", - "right": "issue:44913", + "left": "issue:43723", + "right": "issue:44871", "accept": false, - "reason": "Phi4Multimodal config initialization and GPTNeoX rotary_pct reloading are unrelated config bugs." + "reason": "AutoTokenizer loading in v5 and Gemma-3 eos_token_id inconsistency are different problems." }, { "left": "issue:44261", "right": "issue:45092", "accept": false, - "reason": "MLA q_a_layernorm precision error is not the same as old InternVL2 checkpoint meta-init incompatibility." + "reason": "MLA q_a_layernorm precision/config issue and InternVL2 meta-initialization incompatibility are not duplicates." }, { - "left": "issue:43723", - "right": "issue:44991", + "left": "issue:44373", + "right": "issue:44485", "accept": false, - "reason": "Both concern tokenizer loading in v5, but the evidence is too weak to say they are the same concrete bug or fixable by one PR." + "reason": "A wrong docstring and a RoPE implementation discussion are clearly different." }, { - "left": "issue:44373", - "right": "issue:44485", + "left": "issue:43723", + "right": "issue:44991", "accept": false, - "reason": "A wrong docstring for `position_ids` is not the same as the GLM-5 RoPE implementation discussion." + "reason": "Generic tokenizer load failure and EMBEDDIA/est-roberta-specific tokenizer loading failure are not the same bug." }, { "left": "issue:43723", "right": "issue:44442", "accept": false, - "reason": "Generic tokenizer loading regression and FastSpeech2ConformerTokenizer import/load failure are different failures." + "reason": "Two different tokenizer-loading failures with different affected tokenizers and root causes." }, { "left": "issue:45092", "right": "issue:45357", "accept": false, - "reason": "Both involve Qwen3.5-family models, but one is an old remote-code meta-init incompatibility and the other is incorrect visual encoder keys on save." + "reason": "Remote-code/meta-init incompatibility and incorrect visual encoder keys on save_pretrained are separate regressions." }, { "left": "issue:30990", "right": "issue:38175", "accept": false, - "reason": "Sentence Transformers getting stuck loading and SigLIP2 zero-probabilities are unrelated model behaviors." + "reason": "Sentence Transformers stuck loading and SigLIP2 zero-probability outputs are unrelated." }, { "left": "issue:42886", "right": "issue:42994", "accept": false, - "reason": "Offline tokenizer cache loading and quantized model saving are different code paths." + "reason": "Offline tokenizer cache loading and quantized model saving failures are different paths." }, { "left": "issue:42831", "right": "issue:45310", "accept": false, - "reason": "FP8 accuracy issues are unrelated to Qwen3.5 MoE `from_pretrained` loading errors." + "reason": "FineGrainedFP8 accuracy concerns and Qwen3.5 MoE from_pretrained errors do not match." }, { "left": "issue:41628", "right": "issue:43824", "accept": false, - "reason": "Both are import errors from transformers, but they involve different missing exported classes and likely different fixes." + "reason": "AutoImageProcessor import failure and Qwen2_5_VLForConditionalGeneration import failure are different missing symbols." }, { "left": "issue:42831", "right": "issue:44038", "accept": false, - "reason": "FineGrainedFP8 accuracy and Qwen3.5-VL-Moe bug reports are unrelated." + "reason": "FineGrainedFP8 accuracy issue and Qwen3-VL-Moe bug are unrelated." }, { "left": "issue:42757", "right": "issue:45278", "accept": false, - "reason": "A specific missing `huggingface_hub.is_offline_mode` import is not the same as a broad report of many import errors after upgrading to v5.5.0." + "reason": "An is_offline_mode import error may be one symptom of broader v5 import breakage, but it is still a distinct issue from the broad 'many import errors' report." }, { "left": "issue:43576", "right": "issue:43784", "accept": false, - "reason": "Broken `transformers env` command and `sentence-transformers` import NameError are different regressions." + "reason": "Broken transformers env command and sentence-transformers import NameError are unrelated." } ] }, @@ -11182,8 +11203,8 @@ "issue:44295|issue:44297", "issue:42907|issue:44945", "issue:43976|issue:44945", - "issue:39401|issue:44488", "issue:43232|issue:44743", + "issue:39401|issue:44488", "issue:43232|issue:45245", "issue:42907|issue:43937", "issue:43408|issue:43475", @@ -11191,8 +11212,8 @@ "issue:43784|issue:45600", "issue:44625|issue:44829", "issue:44360|issue:44485", - "issue:39401|issue:44521", "issue:43116|issue:45200", + "issue:39401|issue:44521", "issue:43122|issue:44779", "issue:44038|issue:44297", "issue:29127|issue:30990", @@ -11205,238 +11226,238 @@ "issue:42907|issue:43452", "issue:43992|issue:44704" ], - "prepared_review_unit_hash": "81d4856a5ac29d80c275ab3660bd5364e9a2220e904c968b21ab9310e1afec43", + "prepared_review_unit_hash": "2caff0631d0b232fb89de3ee318cb233441203d7acb3037a5a1e94f86b33e0ed", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 34, - "serialized_chars": 36855, - "estimated_input_tokens": 9214, - "estimated_eval_tokens": 18684 + "serialized_chars": 36860, + "estimated_input_tokens": 9215, + "estimated_eval_tokens": 18686 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly false-positive similarity edges across unrelated Transformers bugs; the closest cluster is around tokenizer/regression behavior in v5, but none of the candidate pairs look like true duplicates of the same concrete bug.", - "confidence": 0.23, - "canonical_issue_reason": "issue:43122 is the broadest tokenizer-regression report here and sits near several of the tokenizer/chat-template soft links, so it is the best representative issue for triage.", + "summary": "All proposed soft pairs look like false positives: they share at most a broad subsystem label (tokenizers, loading, generation, multimodal, etc.) but describe different concrete bugs, regressions, or even docs issues.", + "confidence": 0.97, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Same as canonical: it is the most general issue in the nearest tokenizer-related subset, but the overall cluster is too mixed to treat as one duplicate family.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43122", "right": "issue:44521", "accept": false, - "reason": "Both involve tokenizer/template output, but one is a version-wide tokenization change and the other is zero assistant masks for multimodal inputs." + "reason": "Both mention tokenization/chat templates, but one is a tokenizer regression across versions and the other is an all-zero assistant_mask bug for multimodal inputs; different failure modes and code paths." }, { "left": "issue:44263", "right": "issue:44373", "accept": false, - "reason": "One is a torch.split behavior bug and the other is only a wrong docstring for position_ids; not the same defect." + "reason": "One is a torch.split bug in a GLM MoE indexer; the other is a wrong docstring for position_ids. Docs issue vs runtime bug." }, { "left": "issue:43232", "right": "issue:45593", "accept": false, - "reason": "Generation kwargs after sync_gpus and missing auxiliary losses in D-FINE are unrelated code paths." + "reason": "Generation kwargs after sync_gpus is unrelated to D-FINE auxiliary losses being disabled; different models and different subsystems." }, { "left": "issue:44117", "right": "issue:44704", "accept": false, - "reason": "Both touch from_pretrained internals, but one is TOKENIZER_MAPPING_NAMES returning None and the other is cached_file kwargs passthrough." + "reason": "Both touch from_pretrained/loading internals, but one is TOKENIZER_MAPPING_NAMES returning None and the other is AutoProcessor not forwarding kwargs to cached_file; not the same bug." }, { "left": "issue:42886", "right": "issue:43066", "accept": false, - "reason": "Offline cache loading failure and wrong tokenizer decoder type are different tokenizer bugs." + "reason": "Offline cache loading failure and wrong tokenizer decoder type are separate tokenizer issues with different causes." }, { "left": "issue:43122", "right": "issue:44514", "accept": false, - "reason": "Both are tokenizer-related, but this is a batched chat-template crash, not the same regression as version-to-version tokenization differences." + "reason": "Both involve tokenization-related APIs, but one is version-dependent tokenization drift and the other is a batched apply_chat_template crash with padding=False." }, { "left": "issue:42371", "right": "issue:44263", "accept": false, - "reason": "TF32 behavior settings and a torch.split return-value issue are unrelated." + "reason": "TF32 behavior settings and a GLM MoE torch.split bug are unrelated." }, { "left": "issue:30990", "right": "issue:33357", "accept": false, - "reason": "Sentence-transformers loading hang and a MacOS bus error with a CLIP model are different failure modes." + "reason": "These are different loading failures: Sentence Transformers hanging vs a MacOS bus error with a community CLIP model." }, { "left": "issue:44704", "right": "issue:45290", "accept": false, - "reason": "AutoProcessor kwargs forwarding and chat-template crashes with tool-call messages are separate bugs." + "reason": "AutoProcessor/cached_file kwargs forwarding is unrelated to apply_chat_template crashing on tool-call assistant messages." }, { "left": "issue:44295", "right": "issue:44297", "accept": false, - "reason": "Reading a buffer-backed position_ids tensor and save_pretrained writing the wrong tokenizer_class are different issues." + "reason": "A position_ids buffer read error and a tokenizer_class mismatch on save_pretrained are distinct issues." }, { "left": "issue:42907", "right": "issue:44945", "accept": false, - "reason": "Saving dequantized Ministrals/Devstrals and incorrect output under pipeline parallelism do not share the same root cause." + "reason": "Saving dequantized Ministrals/Devstrals and incorrect pipeline-parallel LLM output are different bugs." }, { "left": "issue:43976", "right": "issue:44945", "accept": false, - "reason": "Python version compatibility and pipeline-parallel output corruption are unrelated." + "reason": "Python version compatibility for transformers 5.1.0 is unrelated to pipeline parallelism output corruption." }, { - "left": "issue:39401", - "right": "issue:44488", + "left": "issue:43232", + "right": "issue:44743", "accept": false, - "reason": "Qwen3 offset_mapping errors and failure to load a specific tokenizer/model are not the same bug." + "reason": "Both concern generation-state handling, but one is sync_gpus kwargs update logic and the other is recurrent state reset in Qwen3.5 modular code; not the same defect." }, { - "left": "issue:43232", - "right": "issue:44743", + "left": "issue:39401", + "right": "issue:44488", "accept": false, - "reason": "These are both generation/cache related, but one is sync_gpus kwargs handling and the other is recurrent state reset with cache." + "reason": "Qwen3 tokenizer offset_mapping is unrelated to loading cjvt/sleng-bert." }, { "left": "issue:43232", "right": "issue:45245", "accept": false, - "reason": "Generation kwargs handling and a categories-limit runtime error are unrelated." + "reason": "Generation kwargs handling has nothing to do with the 2^24 category limit error." }, { "left": "issue:42907", "right": "issue:43937", "accept": false, - "reason": "Dequantized save failures and invalid GenerationConfig for GLM-5 are different issues." + "reason": "Dequantized model save failure and GLM-5 GenerationConfig validation are different problems." }, { "left": "issue:43408", "right": "issue:43475", "accept": false, - "reason": "A sam3_video/sam3_tracker type warning is not the same as the missing fpn_position_embeddings attribute." + "reason": "Both are SAM3-related, but one is a model-type/config mismatch warning and the other is a missing attribute on Sam3VisionEncoderOutput; different concrete bugs." }, { "left": "issue:41628", "right": "issue:45278", "accept": false, - "reason": "A specific missing AutoImageProcessor import is not enough to equate with broad import failures after upgrade." + "reason": "An AutoImageProcessor import error is not the same as a broad set of import errors after upgrading versions." }, { "left": "issue:43784", "right": "issue:45600", "accept": false, - "reason": "Sentence-transformers import NameError and Sam3LiteText auto_mappings breakage are unrelated." + "reason": "Importing sentence-transformers breaking because nn is undefined is unrelated to Sam3LiteText configs breaking CI." }, { "left": "issue:44625", "right": "issue:44829", "accept": false, - "reason": "Qwen3.5 num_labels propagation and flash_attention_3 training degeneration are different problems." + "reason": "num_labels propagation in Qwen3.5 and degenerate training with flash_attention_3 are separate issues." }, { "left": "issue:44360", "right": "issue:44485", "accept": false, - "reason": "DSA indexer missing ReLU and GLM-5 RoPE implementation discussion are unrelated." + "reason": "A missing ReLU in a DSA indexer and a GLM-5 RoPE implementation issue are unrelated model-internal changes." }, { - "left": "issue:39401", - "right": "issue:44521", + "left": "issue:43116", + "right": "issue:45200", "accept": false, - "reason": "Offset_mapping correctness and all-zero assistant_masks are both tokenizer-related, but they are distinct bugs." + "reason": "Empty results in a classification example and mm_token_type_ids defaulting for Gemma 4 are different bugs and different call paths." }, { - "left": "issue:43116", - "right": "issue:45200", + "left": "issue:39401", + "right": "issue:44521", "accept": false, - "reason": "Multi-label example-script behavior and Gemma 4 mm_token_type_ids defaults are separate issues." + "reason": "Tokenizer offset mapping and all-zero assistant masks are both tokenization-adjacent, but they are distinct outputs and distinct bugs." }, { "left": "issue:43122", "right": "issue:44779", "accept": false, - "reason": "Both are tokenization regressions in v5, but they affect different models and symptoms, so they are not the same bug." + "reason": "Both are tokenizer regressions in v5, but they affect different model families and different symptoms; too broad to be the same underlying bug." }, { "left": "issue:44038", "right": "issue:44297", "accept": false, - "reason": "A Qwen3-VL-Moe bug and tokenizer_class mismatch on save_pretrained are unrelated." + "reason": "A Qwen3-VL-Moe bug and a tokenizer save_pretrained tokenizer_class mismatch are unrelated." }, { "left": "issue:29127", "right": "issue:30990", "accept": false, - "reason": "LayoutLMv3 error-message clarity and Sentence Transformers loading hang are unrelated." + "reason": "A LayoutLMv3 validation/error-message issue is unrelated to Sentence Transformers getting stuck while loading." }, { "left": "issue:44261", "right": "issue:44373", "accept": false, - "reason": "MLA q_a_layernorm precision/config issue and wrong position_ids docstring are not the same defect." + "reason": "A precision bug from missing rms_norm_eps and a wrong position_ids docstring are not the same issue." }, { "left": "issue:44038", "right": "issue:44295", "accept": false, - "reason": "Qwen3-VL-Moe and position_ids buffer read failure are different bugs." + "reason": "Qwen3-VL-Moe behavior and a position_ids buffer read error are separate bugs." }, { "left": "issue:42371", "right": "issue:42831", "accept": false, - "reason": "TF32 API-setting guidance and FineGrainedFP8 accuracy issues are not the same root cause." + "reason": "TF32 API settings and FineGrainedFP8 accuracy regression are different precision-related issues with different scopes." }, { "left": "issue:42907", "right": "issue:44964", "accept": false, - "reason": "Dequantized save failure and inability to load Phi-4 multimodal are unrelated." + "reason": "Saving dequantized Ministrals/Devstrals and loading Phi-4 multimodal instruct are different model I/O failures." }, { "left": "issue:44038", "right": "issue:44222", "accept": false, - "reason": "Qwen3-VL-Moe regression and FP8 save_pretrained moe are only loosely related by model family, not the same bug." + "reason": "Qwen3-VL-Moe compatibility and FP8 save_pretrained for moe are separate problems; same broad area, not the same bug." }, { "left": "issue:36296", "right": "issue:43116", "accept": false, - "reason": "Tensor parallel training bug and multi-label classification example failure are unrelated." + "reason": "Tensor parallel training and an example-script multi-label classification issue are unrelated." }, { "left": "issue:42907", "right": "issue:43452", "accept": false, - "reason": "Dequantized Ministrals/Devstrals save failure and gguf_file load failure are different code paths." + "reason": "A dequantized model save failure is unrelated to gguf_file breaking from_pretrained for tokenizer/model loading." }, { "left": "issue:43992", "right": "issue:44704", "accept": false, - "reason": "Missing embed_tokens.weight in UMT5Encoder and AutoProcessor cached_file kwargs handling are unrelated." + "reason": "UMT5Encoder missing embed_tokens.weight and AutoProcessor forwarding kwargs to cached_file are different from_pretrained bugs affecting different components." } ] }, @@ -11474,6 +11495,7 @@ "issue:43116", "issue:43122", "issue:43278", + "issue:43421", "issue:43441", "issue:43493", "issue:43673", @@ -11490,19 +11512,18 @@ "issue:44075", "issue:44117", "issue:44222", - "issue:44291", "issue:44351", "issue:44485", "issue:44779", "issue:44936", "issue:44945", "issue:44977", - "issue:45071", "issue:45072", "issue:45083", "issue:45216", "issue:45278", "issue:45341", + "issue:45405", "issue:45446", "issue:45478", "issue:45561", @@ -11540,222 +11561,193 @@ "issue:43937|issue:44977", "issue:45341|issue:45561", "issue:42673|issue:43906", - "issue:44291|issue:45071" + "issue:45405|issue:45446", + "issue:39401|issue:43421" ], - "prepared_review_unit_hash": "7d59db835bee6fee1d619ce0b55f7178f9d391d16471c5515d84703acded473f", + "prepared_review_unit_hash": "c778ae355e11467d04182bc03dcefbf81532465cc9e4a74dc069e4693daf13cb", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 36612, - "estimated_input_tokens": 9153, - "estimated_eval_tokens": 18562 + "soft_pair_count": 32, + "serialized_chars": 36902, + "estimated_input_tokens": 9226, + "estimated_eval_tokens": 18708 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The set is highly heterogeneous: most pairs share only broad symptoms (imports, tokenization, save/load, training) or a common subsystem, but not the same concrete bug. I reject all soft edges and see no true duplicate cluster here.", - "confidence": 0.84, - "canonical_issue_reason": "No single issue cleanly represents a duplicate cluster. If forced, issue:45278 is the broadest umbrella, but the items are too diverse for a real canonical duplicate.", + "summary": "The set is mostly a grab-bag of unrelated issues; the soft pairs are mostly broad topical similarities (imports, tokenizers, Qwen3, save/load) rather than the same concrete bug. I would not merge any of the proposed pairs.", + "confidence": 0.23, + "canonical_issue_reason": "No single canonical duplicate stands out: the items split into several unrelated problem areas, and the soft pairs do not share a sufficiently specific root cause.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the closest thing to a cluster anchor because it describes a broad import-regression symptom, but it is still too generic to serve as a strong canonical for the whole set.", + "best_issue_reason": "issue:45278 is the broadest umbrella-style report among the issues (many import errors after upgrading), so it is the closest thing to a representative issue, though it is not a true duplicate target for the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45216", "right": "issue:45478", "accept": false, - "reason": "Same model family and version range, but one is save_pretrained output corruption and the other is from_pretrained loading failure; not the same bug." + "reason": "Both involve Qwen3.5 MoE save/load behavior, but one is a save_pretrained checkpoint issue and the other is a from_pretrained loading error; different concrete failure modes." }, { "left": "issue:43824", "right": "issue:44351", "accept": false, - "reason": "Both are import errors, but for different missing symbols and likely different breakages." + "reason": "Both are import errors, but they concern different missing symbols from different modules; same symptom, not the same underlying bug." }, { "left": "issue:43122", "right": "issue:44977", "accept": false, - "reason": "Both concern tokenizer behavior, but one is a generic version-to-version change and the other is a DeepSeek-specific regression." + "reason": "Tokenizer regression vs flash-attention generation failure; unrelated code paths and symptoms." }, { "left": "issue:42907", "right": "issue:44222", "accept": false, - "reason": "Both mention saving/quantized models, but they are different model families and different failure modes." + "reason": "Both are save/export problems, but for different model families and different quantization formats; not the same bug." }, { "left": "issue:42757", "right": "issue:44351", "accept": false, - "reason": "Both are import failures, but they involve different packages/symbols and different root causes." + "reason": "Different missing imports from different packages; too generic to treat as one duplicate." }, { "left": "issue:43278", "right": "issue:44945", "accept": false, - "reason": "Different areas: dtype change in evaluation versus incorrect outputs under pipeline parallelism." + "reason": "Dtype drift between train/eval is unrelated to trainer.evaluate() failing after trainer.train()." }, { "left": "issue:43784", "right": "issue:45446", "accept": false, - "reason": "One is a missing internal name during import; the other is a bad version gate for a flex_attention import." + "reason": "Separate import/version-guard failures involving different symbols and libraries." }, { "left": "issue:42222", "right": "issue:43441", "accept": false, - "reason": "Different model families and different subsystems: VitPose breakage versus FlashAttention failure." + "reason": "Different model families and different failures; only the broad 'model broke under v5/flash-attention' theme overlaps." }, { "left": "issue:42673", "right": "issue:45072", "accept": false, - "reason": "VRAM leak in Qwen3ForCausalLM is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "VRAM leak in Qwen3ForCausalLM vs dtype mismatch in unrelated models; no shared concrete root cause." }, { "left": "issue:38617", "right": "issue:45278", "accept": false, - "reason": "Both are import-related regressions, but one is a single missing symbol while the other is a broad update report; not the same concrete bug." + "reason": "Both are import-related regressions, but one is a specific missing helper while the other is a broad upgrade report; not the same underlying issue." }, { "left": "issue:43064", "right": "issue:43673", "accept": false, - "reason": "Training optimizer-state corruption is unrelated to a missing generation cache in chunked prefill." + "reason": "Optimizer state corruption in FSDP/PEFT vs missing GenerationMixin cache during chunked prefill; different subsystems." }, { "left": "issue:43937", "right": "issue:44485", "accept": false, - "reason": "GenerationConfig validation failure is unrelated to a RoPE implementation discussion." + "reason": "Invalid GenerationConfig vs RoPE implementation discussion; distinct bugs." }, { "left": "issue:22355", "right": "issue:29127", "accept": false, - "reason": "Completely different failures: missing transformers.onnx module versus unclear LayoutLMv3 error message." + "reason": "No overlap in functionality or failure mode; unrelated issues." }, { "left": "issue:42371", "right": "issue:43874", "accept": false, - "reason": "TF32 settings advice and missing multimodal-token helper are unrelated." + "reason": "TF32 behavior/API settings vs multimodal token counting bug; unrelated." }, { "left": "issue:43116", "right": "issue:43493", "accept": false, - "reason": "Example-script classification bug versus SigLIP2 implementation discrepancy; different code paths." + "reason": "Example-script multi-label output bug vs SigLIP2 implementation discrepancy; unrelated." }, { "left": "issue:44075", "right": "issue:44936", "accept": false, - "reason": "Optimizer argument handling and trainer.evaluate after train are different bugs." + "reason": "Optimizer argument handling vs trainer.evaluate() after train(); not the same bug." }, { "left": "issue:44222", "right": "issue:45478", "accept": false, - "reason": "Both involve Qwen3.5/MoE save/load flows, but one is FP8 save_pretrained and the other is from_pretrained loading; not the same defect." + "reason": "Both mention Qwen3.5/MoE and save/load, but one is FP8 save_pretrained and the other is from_pretrained loading error; too different to merge." }, { "left": "issue:44038", "right": "issue:45478", "accept": false, - "reason": "Same broad Qwen3.5 area, but different symptoms and likely different failing code paths." + "reason": "Both involve Qwen3-ish MoE models, but the model/version and failure details differ enough that they are not the same concrete bug." }, { "left": "issue:43116", "right": "issue:43825", "accept": false, - "reason": "Classification example failure is unrelated to a pipeline() error message regression." + "reason": "Example-script classification bug vs pipeline API error messaging; unrelated." }, { "left": "issue:43976", "right": "issue:45446", "accept": false, - "reason": "Python-version compatibility issue versus a flex_attention import guard bug." + "reason": "Python version compatibility problem vs a version check bug for a specific import; different issues." }, { "left": "issue:43937", "right": "issue:45563", "accept": false, - "reason": "GenerationConfig invalidity and a stale warning about num_return_sequences are different issues." + "reason": "GenerationConfig validation vs a stale warning in paged generate(); not the same failure." }, { "left": "issue:39401", "right": "issue:44779", "accept": false, - "reason": "Both tokenizer-related, but one is offset_mapping and the other is generic incorrect tokenization in DeepSeek." + "reason": "Both are tokenizer regressions, but in different model families and with different symptoms; too broad to call duplicate." }, { "left": "issue:33453", "right": "issue:39401", "accept": false, - "reason": "Tokenizer-loading regression and Qwen3 offset_mapping bug are not the same concrete failure." + "reason": "Tokenizer loading regression vs wrong offset mapping; same subsystem but not the same bug." }, { "left": "issue:43931", "right": "issue:44945", "accept": false, - "reason": "Model weight-shape mismatch and pipeline-parallelism incorrect output are unrelated." + "reason": "Model weight-shape mismatch on load vs pipeline-parallelism output issue; unrelated." }, { "left": "issue:45083", "right": "issue:45446", "accept": false, - "reason": "A helper-function behavior change in qwen3_omni_moe is unrelated to a PyTorch version check bug." + "reason": "Feature-extraction length helper issue in qwen3_omni_moe vs an import/version-check bug; different code paths." }, { "left": "issue:44117", "right": "issue:45588", "accept": false, - "reason": "Tokenizer mapping fallback bug and flash_attention s_aux=None crash are different subsystems and failures." - }, - { - "left": "issue:43827", - "right": "issue:45446", - "accept": false, - "reason": "Documentation mentioning pipeline() is unrelated to a runtime version-check import bug." - }, - { - "left": "issue:43937", - "right": "issue:44977", - "accept": false, - "reason": "Different generation/tokenization problems with different model contexts." - }, - { - "left": "issue:45341", - "right": "issue:45561", - "accept": false, - "reason": "A vague testing_utils bug is not enough to merge with a specific xdist race on captured_info.txt." - }, - { - "left": "issue:42673", - "right": "issue:43906", - "accept": false, - "reason": "VRAM leak in Qwen3ForCausalLM is unrelated to a reproduction of another issue." - }, - { - "left": "issue:44291", - "right": "issue:45071", - "accept": false, - "reason": "Unexpected _is_hf_initialized argument during init_empty_weights is unrelated to a PretrainedConfig type-checking breakage." + "reason": "Tokenizer mapping None handling vs flash_attention crash on sink-less models; unrelated." } ] }, @@ -11780,7 +11772,6 @@ "cluster_id": "cluster-15354-374", "nodes": [ "issue:38617", - "issue:39401", "issue:41628", "issue:42175", "issue:42222", @@ -11792,9 +11783,9 @@ "issue:43097", "issue:43122", "issue:43408", - "issue:43421", "issue:43441", "issue:43450", + "issue:43452", "issue:43577", "issue:43582", "issue:43673", @@ -11807,6 +11798,7 @@ "issue:44220", "issue:44261", "issue:44263", + "issue:44291", "issue:44410", "issue:44483", "issue:44484", @@ -11815,22 +11807,21 @@ "issue:44496", "issue:44829", "issue:44933", + "issue:44987", + "issue:45003", "issue:45071", "issue:45081", "issue:45216", "issue:45278", "issue:45310", "issue:45341", - "issue:45405", - "issue:45446", "issue:45459", "issue:45464", "issue:45478", "issue:45542" ], "soft_pairs": [ - "issue:45405|issue:45446", - "issue:39401|issue:43421", + "issue:44291|issue:45071", "issue:43450|issue:43986", "issue:44484|issue:45310", "issue:44829|issue:45216", @@ -11840,16 +11831,16 @@ "issue:43408|issue:43742", "issue:42673|issue:43582", "issue:42222|issue:45542", - "issue:42371|issue:44261", "issue:42673|issue:43577", + "issue:42371|issue:44261", "issue:43937|issue:44496", "issue:38617|issue:44220", "issue:43012|issue:43577", "issue:42175|issue:42222", - "issue:44483|issue:45464", "issue:44933|issue:45278", - "issue:44484|issue:45478", + "issue:44483|issue:45464", "issue:42617|issue:45341", + "issue:44484|issue:45478", "issue:43066|issue:43122", "issue:43673|issue:45341", "issue:44410|issue:45478", @@ -11862,225 +11853,238 @@ "issue:44263|issue:44485", "issue:41628|issue:45071", "issue:45071|issue:45542", - "issue:43931|issue:44410" + "issue:43931|issue:44410", + "issue:45003|issue:45341", + "issue:44117|issue:44987", + "issue:43986|issue:45341", + "issue:43097|issue:43452" ], - "prepared_review_unit_hash": "ac0bbe99f108a1aa6ec2458e234c56532bc2602ef102b24057fc7106ad4908c8", + "prepared_review_unit_hash": "90b9381620f9b37f8e9a38ba16d45db6dd9c4eb82e3dfe3ca764511fdac44944", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 34, - "serialized_chars": 37633, - "estimated_input_tokens": 9409, - "estimated_eval_tokens": 19074 + "soft_pair_count": 37, + "serialized_chars": 38591, + "estimated_input_tokens": 9648, + "estimated_eval_tokens": 19552 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab bag of unrelated issues. The only clearly supported duplicate set is the Qwen3.5 MoE `from_pretrained` regression between 45310 and 45478. Issue 44484 is about `save_pretrained()` / `max_shard_size` and is not, by title alone, enough to treat it as the same bug.", - "confidence": 0.82, - "canonical_issue_reason": "issue:45478 is the clearest representative of the Qwen3.5 MoE `from_pretrained` regression; it matches issue:45310 closely and has slightly stronger discussion signal. Issue:44484 does not have enough evidence to join that duplicate set.", + "summary": "These are mostly thematically similar Transformers regression reports, but they do not appear to be true duplicates. The strongest representative issue is the broad import-regression report, while the rest are separate bugs affecting different code paths or model families.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45278 is the broadest representative of the visible cluster theme: widespread import failures after upgrading Transformers. It is generic enough to stand in for the import-regression subset without overfitting to one specific missing symbol.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45478 is the best issue to keep as the representative of the only clearly duplicated bug in the cluster.", + "best_issue_reason": "issue:45278 is the best global issue candidate because it is the most umbrella-like and can anchor the cluster\u2019s import-breakage side, whereas the other issues are narrower model- or API-specific regressions.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:45405", - "right": "issue:45446", - "accept": false, - "reason": "Both are version/dependency-related, but one is about PEFT pinning and the other is about an import/version check for AuxRequest; different bugs." - }, - { - "left": "issue:39401", - "right": "issue:43421", + "left": "issue:44291", + "right": "issue:45071", "accept": false, - "reason": "Both concern tokenizer behavior, but one is wrong `offset_mapping` and the other is runtime special-token post-processor updates; not the same issue." + "reason": "Both relate to v5-era model-loading/type issues, but one is about an unexpected _is_hf_initialized argument and the other about PretrainedConfig type checking; they are different failures." }, { "left": "issue:43450", "right": "issue:43986", "accept": false, - "reason": "One is a video-processor batch shape bug; the other is a crash when `torchvision` is missing. Different code paths and symptoms." + "reason": "Both involve video-processing paths, but one is a batched-shape bug in processors and the other is a crash when AutoProcessor is used without torchvision." }, { "left": "issue:44484", "right": "issue:45310", "accept": false, - "reason": "`44484` is about `save_pretrained()` / `max_shard_size`, while `45310` is a Qwen3.5 MoE `from_pretrained` error. The titles do not establish the same underlying bug." + "reason": "These are unrelated: max_shard_size/save_pretrained behavior versus a Qwen3.5 MoE from_pretrained load error." }, { "left": "issue:44829", "right": "issue:45216", "accept": false, - "reason": "FlashAttention-3 training degeneration and Qwen3.5 checkpoint save corruption are distinct problems." + "reason": "One is a training-degeneration problem with flash_attention_3, the other is a save_pretrained checkpoint regression for Qwen3.5; different code paths and symptoms." }, { "left": "issue:38617", "right": "issue:41628", "accept": false, - "reason": "Both are import errors, but for different symbols (`layer_type_validation` vs `AutoImageProcessor`)." + "reason": "Both are import errors, but for different missing names from different modules; no evidence they are the same underlying bug." }, { "left": "issue:44410", "right": "issue:45310", "accept": false, - "reason": "One is missing projections in qwen3next layer 0; the other is a Qwen3.5 MoE loading regression. Different bugs." + "reason": "These describe different Qwen-family model loading issues: missing layer projections versus a MoE from_pretrained error." }, { "left": "issue:44117", "right": "issue:45081", "accept": false, - "reason": "`TOKENIZER_MAPPING_NAMES` returning `None` and the Mistral regex patch crash are unrelated tokenizer internals." + "reason": "Tokenization-registry assumptions and Mistral regex patching are separate problems with different failure points." }, { "left": "issue:43408", "right": "issue:43742", "accept": false, - "reason": "A model-type warning for sam3_tracker is not the same as a key error when loading MobileLLM." + "reason": "A model-type warning during instantiation is not the same bug as a key error loading MobileLLM-125M." }, { "left": "issue:42673", "right": "issue:43582", "accept": false, - "reason": "A VRAM leak in Qwen3ForCausalLM multi-threaded loading is unrelated to an Apple Silicon allocator `TypeError`." + "reason": "VRAM leakage in multi-threaded Qwen3ForCausalLM use is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." }, { "left": "issue:42222", "right": "issue:45542", "accept": false, - "reason": "VitPose breakage and undefined TF backend errors are different dependency/model-loading problems." + "reason": "VitPose breakage and a TensorBoard/TensorFlow backend mismatch are different installation/runtime issues." }, { - "left": "issue:42371", - "right": "issue:44261", + "left": "issue:42673", + "right": "issue:43577", "accept": false, - "reason": "TF32 behavior/settings guidance is unrelated to the MLA `rms_norm_eps` precision bug." + "reason": "A multi-thread VRAM leak and BLIP2 dtype propagation are unrelated regressions." }, { - "left": "issue:42673", - "right": "issue:43577", + "left": "issue:42371", + "right": "issue:44261", "accept": false, - "reason": "The Qwen3 VRAM leak and BLIP2 dtype staying float32 are both about memory/precision, but not the same bug." + "reason": "TF32 behavior warnings and MLA q_a_layernorm precision issues are different subsystems with different fixes." }, { "left": "issue:43937", "right": "issue:44496", "accept": false, - "reason": "GLM-5 invalid `GenerationConfig` and OLMo-Hybrid unrecognized model/config are distinct loading/configuration failures." + "reason": "GenerationConfig validation for GLM-5 is not the same as an unrecognized model_type/config.json problem." }, { "left": "issue:38617", "right": "issue:44220", "accept": false, - "reason": "An import error from `configuration_utils` is not the same as the `_torch_extract_fbank_features()` issue." + "reason": "These are both import/feature issues, but one is a missing symbol import and the other is an audio feature-extraction problem." }, { "left": "issue:43012", "right": "issue:43577", "accept": false, - "reason": "Both involve dtype/precision, but one is a compile-time warning and the other is incorrect loading dtypes in BLIP2." + "reason": "A PyTorch warning during bfloat16 compilation is unrelated to BLIP2 dtype not being propagated." }, { "left": "issue:42175", "right": "issue:42222", "accept": false, - "reason": "Missing TensorFlow in the `[torch]` install and broken VitPose models are unrelated." - }, - { - "left": "issue:44483", - "right": "issue:45464", - "accept": false, - "reason": "Both mention chat/completions, but one is a generic endpoint request failure and the other is a model-specific streaming inference problem." + "reason": "Missing TensorFlow in a torch-only install and broken VitPose models are separate issues." }, { "left": "issue:44933", "right": "issue:45278", "accept": false, - "reason": "A specific missing import from `image_utils` is not enough to equate it with the broader import-error report after upgrading to 5.5.0." + "reason": "The latter is a broad import-regression report, but the former is a specific missing import from image_utils; related theme, not the same concrete bug." }, { - "left": "issue:44484", - "right": "issue:45478", + "left": "issue:44483", + "right": "issue:45464", "accept": false, - "reason": "`44484` is about `save_pretrained()` / `max_shard_size`, while `45478` is a Qwen3.5 MoE `from_pretrained` error. The evidence does not support treating them as the same bug." + "reason": "Both mention chat/completions, but one is a generic API acceptance failure and the other is a streaming-inference failure on a specific Qwen3.5 model." }, { "left": "issue:42617", "right": "issue:45341", "accept": false, - "reason": "`3d_parallel.py` execution failure and a `testing_utils.py` bug are unrelated." + "reason": "Running 3d_parallel.py and a bug in testing_utils.py are unrelated." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "These are different Qwen3.5/save_pretrained vs from_pretrained problems, not the same bug." }, { "left": "issue:43066", "right": "issue:43122", "accept": false, - "reason": "Both are tokenizer regression reports, but one targets decoder-type metadata and the other reports changed tokenization output; not clearly the same concrete bug." + "reason": "Both concern tokenizer behavior in v5, but one is about decoder type and the other about changed tokenization output; too different to merge." }, { "left": "issue:43673", "right": "issue:45341", "accept": false, - "reason": "Missing generation cache in chunked prefill and a testing utility bug do not share the same code path." + "reason": "A GenerationMixin cache regression during chunked_prefill is unrelated to a testing_utils bug." }, { "left": "issue:44410", "right": "issue:45478", "accept": false, - "reason": "qwen3next missing attention projections is unrelated to the Qwen3.5 MoE loading regression." + "reason": "Missing attention projections in qwen3next is not the same as a Qwen3.5 MoE from_pretrained error." }, { "left": "issue:43097", "right": "issue:44493", "accept": false, - "reason": "Removal of `tie_embeddings_and_encoder_decoder` and unexpected `position_id` keys are different regressions." + "reason": "Removal of tie_embeddings_and_encoder_decoder is a config/API change; unexpected key warnings in models are a separate loading issue." }, { "left": "issue:43874", "right": "issue:44261", "accept": false, - "reason": "A missing image-patch helper and an `rms_norm_eps` precision mismatch are unrelated bugs." + "reason": "A missing image-patch helper in GLM46V and MLA layernorm epsilon precision are unrelated multimodal/model bugs." }, { "left": "issue:41628", "right": "issue:45071", "accept": false, - "reason": "A missing top-level import and a `PreTrainedConfig` type-checking breakage are different failures." - }, - { - "left": "issue:44263", - "right": "issue:44485", - "accept": false, - "reason": "Both are GLM-related, but one concerns `torch.split` handling and the other RoPE implementation; not the same issue." + "reason": "AutoImageProcessor import failure and PretrainedConfig type-checking breakage are different regressions." }, { "left": "issue:45071", "right": "issue:45542", "accept": false, - "reason": "`PreTrainedConfig` type checking and undefined TF backend errors are unrelated." + "reason": "PretrainedConfig type checking and TensorBoard-only TF backend handling are unrelated." }, { "left": "issue:43931", "right": "issue:44410", "accept": false, - "reason": "Qwen3-VL weight-shape mismatch and qwen3next missing projections are different model-loading bugs." + "reason": "Qwen3-VL weight-shape mismatch is a loading/weights issue, while qwen3next missing projections is a model-definition issue." + }, + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Unsafe sys.modules access in modeling_utils is not the same as a testing_utils bug." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "A None entry in TOKENIZER_MAPPING_NAMES and a specific model-loading failure for physical-intelligence/fast are different issues." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "Video-model loading without torchvision and a small testing_utils bug are unrelated." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "A removed tie_embeddings_and_encoder_decoder setting is unrelated to gguf_file breaking tokenizer/model loading." } ] }, - "evaluator_result": { - "accept": false, - "feedback": "The cluster summary is too strong about the Qwen3.5 MoE duplicate set. In the packet, 45310 and 45478 clearly match on `Qwen3.5 Moe from_pretrained error`, but 44484\u2019s title is about `save_pretrained()` / `max_shard_size` and does not, by itself, ground the claim that it is the same bug. The accepted soft-edge verdicts involving 44484 therefore overstate the evidence. The rest of the rejected pairs look conservative, but I can\u2019t accept the cluster with that unsupported equivalence." - }, - "evaluator_used": true, - "retried": true, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null @@ -12101,15 +12105,16 @@ "issue:38617", "issue:42757", "issue:42886", + "issue:42907", "issue:43012", "issue:43064", "issue:43066", "issue:43097", + "issue:43208", "issue:43335", "issue:43352", "issue:43381", "issue:43408", - "issue:43452", "issue:43502", "issue:43531", "issue:43550", @@ -12137,21 +12142,16 @@ "issue:44655", "issue:44704", "issue:44829", - "issue:44987", - "issue:45003", "issue:45071", "issue:45103", "issue:45245", "issue:45341", "issue:45372", "issue:45375", + "issue:45479", "issue:45588" ], "soft_pairs": [ - "issue:45003|issue:45341", - "issue:44117|issue:44987", - "issue:43986|issue:45341", - "issue:43097|issue:43452", "issue:43931|issue:44038", "issue:43335|issue:45071", "issue:43408|issue:44368", @@ -12165,255 +12165,260 @@ "issue:44261|issue:44485", "issue:44410|issue:45375", "issue:38617|issue:44351", - "issue:44038|issue:44410", "issue:44062|issue:44117", + "issue:44038|issue:44410", "issue:43097|issue:45588", "issue:43986|issue:44117", "issue:44704|issue:45103", "issue:43502|issue:43992", "issue:43992|issue:44484", - "issue:43824|issue:43986", "issue:43097|issue:43873", + "issue:43824|issue:43986", "issue:43066|issue:43906", "issue:43352|issue:44380", "issue:43825|issue:44655", "issue:38617|issue:43097", "issue:43012|issue:44368", "issue:42886|issue:43381", - "issue:43012|issue:43550" + "issue:43012|issue:43550", + "issue:42907|issue:44655", + "issue:43208|issue:43550", + "issue:44380|issue:45479", + "issue:43352|issue:43976", + "issue:43064|issue:44380" ], - "prepared_review_unit_hash": "617e895a6f98e5fe2713f014fb05f78b1ef8463d1823f84152e4298749265309", + "prepared_review_unit_hash": "681c0a5e32c3908f1b4cc0e5b4b46229963f380fb240bcf3e00adb9c8fdfb528", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37573, - "estimated_input_tokens": 9394, - "estimated_eval_tokens": 19044 + "soft_pair_count": 34, + "serialized_chars": 37911, + "estimated_input_tokens": 9478, + "estimated_eval_tokens": 19212 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a noisy mix of unrelated Transformers v5 regressions and model-specific bugs. I do not see any soft pair that is the same underlying bug/change, so all candidate edges are rejected. The closest representative issue is the v5 breaking-change report about `tie_embeddings_and_encoder_decoder` being removed.", - "confidence": 0.66, - "canonical_issue_reason": "Issue 43097 is the most cluster-like representative: it describes a concrete, cross-cutting v5 API/config break with inbound references, while most other issues are narrower model- or dependency-specific failures.", + "summary": "The items are mostly unrelated bug reports from the same repo, with only superficial theme overlap (imports, loading, attention, quantization, etc.). None of the soft pairs look like the same underlying fixable bug/change, so I would not merge any of them as duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "No clear canonical issue emerges: the cluster is heterogeneous and the soft-pair overlaps are mostly broad symptom similarity rather than the same root cause.", "canonical_pr_reason": null, - "best_issue_reason": "43097 is the best anchor for this set because it captures a broad Transformers v5 regression that plausibly sits near the center of the cluster\u2019s general theme of breaking changes.", + "best_issue_reason": "No single issue is a strong representative for the cluster because the reports span many distinct subsystems and failure modes.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:45003", - "right": "issue:45341", - "accept": false, - "reason": "Different bug classes: unsafe `sys.modules` access vs a testing utility issue." - }, - { - "left": "issue:44117", - "right": "issue:44987", - "accept": false, - "reason": "Tokenizer mapping fallback bug vs a model-loading failure for `physical-intelligence/fast`." - }, - { - "left": "issue:43986", - "right": "issue:45341", - "accept": false, - "reason": "AutoProcessor/video-model crash without torchvision vs unrelated testing utility bug." - }, - { - "left": "issue:43097", - "right": "issue:43452", - "accept": false, - "reason": "Removed config flag warning vs `gguf_file` loading breakage; different code paths." - }, { "left": "issue:43931", "right": "issue:44038", "accept": false, - "reason": "Weight-shape mismatch for one model vs a separate Qwen3-VL-Moe loading problem." + "reason": "Both are Qwen3/VL-related model-loading problems, but one is a specific weight-shape mismatch and the other is a version/model-family issue; not the same bug." }, { "left": "issue:43335", "right": "issue:45071", "accept": false, - "reason": "SwitchTransformers layer creation bug vs PretrainedConfig type-checking regression." + "reason": "One is a SwitchTransformers config edge case; the other is a broad PretrainedConfig type-checking regression. Different failures and code paths." }, { "left": "issue:43408", "right": "issue:44368", "accept": false, - "reason": "Model-type warning mismatch vs tie_word_embeddings warning; not the same failure." + "reason": "Different warning classes in different model families; no shared root cause." }, { "left": "issue:43874", "right": "issue:44263", "accept": false, - "reason": "Missing image-patch method vs incorrect `torch.split()` return handling." + "reason": "Different GLM components and different errors: a missing image-processor method vs a torch.split return-value issue." }, { "left": "issue:43976", "right": "issue:45071", "accept": false, - "reason": "Python version compatibility issue vs config type-checking failure." + "reason": "A Python-version compatibility report is not the same bug as PretrainedConfig type checking." }, { "left": "issue:44829", "right": "issue:45245", "accept": false, - "reason": "FA3 training degeneration vs category-cardinality runtime limit; unrelated." + "reason": "Completely different domains: flash-attention training degeneration vs category cardinality limit." }, { "left": "issue:43986", "right": "issue:45372", "accept": false, - "reason": "Missing torchvision dependency vs `mistral_common` import failure; different models/dependencies." + "reason": "Both involve loading processors, but one is about missing torchvision and the other about a missing mistral_common symbol; different root causes." }, { "left": "issue:43064", "right": "issue:44315", "accept": false, - "reason": "FSDP2 optimizer-state corruption vs Liger Kernel not being applied." + "reason": "Training-state corruption with FSDP2/PEFT is unrelated to Liger Kernel application under model_init." }, { "left": "issue:43531", "right": "issue:45341", "accept": false, - "reason": "Qwen3-MoE `sliding_window` behavior vs a testing utility bug." + "reason": "Qwen3-MoE sliding-window behavior and a testing_utils bug are unrelated." }, { "left": "issue:38617", "right": "issue:42757", "accept": false, - "reason": "Both are import errors, but for different missing names and different modules." + "reason": "Both are import errors, but for different symbols from different packages; no shared underlying fix." }, { "left": "issue:44261", "right": "issue:44485", "accept": false, - "reason": "RMS norm epsilon precision issue vs GLM-5 RoPE implementation." + "reason": "Different model-specific numerical/configuration issues (rms_norm_eps precision vs RoPE implementation)." }, { "left": "issue:44410", "right": "issue:45375", "accept": false, - "reason": "Missing attention projections vs missing config field in a different model family." + "reason": "Different model config problems: missing attention projections vs a missing vision config field." }, { "left": "issue:38617", "right": "issue:44351", "accept": false, - "reason": "Different import failures: `layer_type_validation` vs `HybridCache`." + "reason": "Different missing exports from transformers; similar symptom, not the same code-path problem." }, { - "left": "issue:44038", - "right": "issue:44410", + "left": "issue:44062", + "right": "issue:44117", "accept": false, - "reason": "Qwen3-VL-Moe loading bug vs qwen3next missing projections; not the same underlying defect." + "reason": "Tokenizer construction error vs tokenizer mapping lookup assumption; not the same bug." }, { - "left": "issue:44062", - "right": "issue:44117", + "left": "issue:44038", + "right": "issue:44410", "accept": false, - "reason": "`AddedToken` argument duplication vs tokenizer mapping returning None." + "reason": "Different Qwen-related model bugs with different symptoms and fixes." }, { "left": "issue:43097", "right": "issue:45588", "accept": false, - "reason": "Config-removal warning vs flash_attention crash on `s_aux=None`; different bugs." + "reason": "A config API removal is unrelated to a flash-attention crash on sink-less models." }, { "left": "issue:43986", "right": "issue:44117", "accept": false, - "reason": "Video AutoProcessor dependency crash vs tokenizer mapping fallback bug." + "reason": "Different areas: processor loading without torchvision vs tokenizer mapping None-handling." }, { "left": "issue:44704", "right": "issue:45103", "accept": false, - "reason": "Kwargs not forwarded to `cached_file` vs docstring/annotation-related AttributeError." + "reason": "Different preprocessing/docstring-related failures and different code paths." }, { "left": "issue:43502", "right": "issue:43992", "accept": false, - "reason": "Offline/network leakage bug vs missing `embed_tokens.weight` during loading." + "reason": "Offline/network behavior is unrelated to a missing checkpoint weight during UMT5 loading." }, { "left": "issue:43992", "right": "issue:44484", "accept": false, - "reason": "Missing state-dict key vs `max_shard_size` default question." + "reason": "Model-loading weight mismatch and save_pretrained shard-size behavior are unrelated." }, { - "left": "issue:43824", - "right": "issue:43986", + "left": "issue:43097", + "right": "issue:43873", "accept": false, - "reason": "Missing model symbol import vs video processor crash without torchvision." + "reason": "Removed config API and quantization offloading behavior are different issues." }, { - "left": "issue:43097", - "right": "issue:43873", + "left": "issue:43824", + "right": "issue:43986", "accept": false, - "reason": "Removed config attribute warning vs quantization offloading behavior." + "reason": "Different import/load failures for different multimodal models and dependencies." }, { "left": "issue:43066", "right": "issue:43906", "accept": false, - "reason": "Wrong tokenizer decoder type vs a reproduction of a different issue." + "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same bug." }, { "left": "issue:43352", "right": "issue:44380", "accept": false, - "reason": "Flash Attention support gating vs GPT2 attention scaling being ignored." + "reason": "Flash-Attn support gating for Nemotron is unrelated to GPT2 attention scaling being ignored under SDPA/FlashAttention." }, { "left": "issue:43825", "right": "issue:44655", "accept": false, - "reason": "Pipeline translation-task messaging vs inability to save Pipeline objects." + "reason": "Error messaging for pipeline tasks and saving Pipeline objects are different concerns." }, { "left": "issue:38617", "right": "issue:43097", "accept": false, - "reason": "ImportError for a missing symbol vs a removed config field warning." - }, - { - "left": "issue:44261", - "right": "issue:44485", - "accept": false, - "reason": "Precision mismatch in MLA q_a_layernorm vs GLM-5 RoPE implementation." + "reason": "Different import/config regressions with no shared root cause." }, { "left": "issue:43012", "right": "issue:44368", "accept": false, - "reason": "bfloat16 compilation warning vs tie_word_embeddings warning." + "reason": "A PyTorch precision warning on bfloat16 compilation is unrelated to a tie_word_embeddings warning." }, { "left": "issue:42886", "right": "issue:43381", "accept": false, - "reason": "Offline cache-loading failure vs gradient checkpointing being disallowed in eval mode." + "reason": "Offline cache loading and gradient-checkpointing in eval mode are unrelated." }, { "left": "issue:43012", "right": "issue:43550", "accept": false, - "reason": "PyTorch precision warning vs Bamba-9B-v2 compile/SDPA failure." + "reason": "Different runtime behaviors in different subsystems: warning on compile vs SDPA compile failure." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Saving dequantized models and saving Pipeline objects share only a broad save_pretrained theme, not the same bug." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "xLSTM training blockers and Bamba torch.compile/SDPA failure are unrelated." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "Attention-backend config handling is unrelated to zero-loss classification degeneration." + }, + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "Flash-Attention support for a specific model is unrelated to Python-version compatibility." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "Distributed training optimizer-state corruption is unrelated to attention scaling config being ignored." } ] }, @@ -12441,8 +12446,8 @@ "issue:36683", "issue:38617", "issue:42757", - "issue:42907", "issue:42994", + "issue:43010", "issue:43012", "issue:43064", "issue:43066", @@ -12453,7 +12458,6 @@ "issue:43502", "issue:43525", "issue:43526", - "issue:43550", "issue:43582", "issue:43618", "issue:43756", @@ -12467,8 +12471,8 @@ "issue:44038", "issue:44077", "issue:44188", - "issue:44220", "issue:44230", + "issue:44242", "issue:44263", "issue:44380", "issue:44483", @@ -12479,19 +12483,15 @@ "issue:44683", "issue:44938", "issue:45216", + "issue:45278", "issue:45290", "issue:45310", "issue:45381", "issue:45440", - "issue:45479", + "issue:45478", "issue:45593" ], "soft_pairs": [ - "issue:42907|issue:44655", - "issue:43208|issue:43550", - "issue:44380|issue:45479", - "issue:43352|issue:43976", - "issue:43064|issue:44380", "issue:44483|issue:44514", "issue:43208|issue:43526", "issue:43618|issue:44380", @@ -12499,8 +12499,8 @@ "issue:43618|issue:43761", "issue:44483|issue:45290", "issue:43012|issue:44683", - "issue:42757|issue:43824", "issue:44038|issue:44230", + "issue:42757|issue:43824", "issue:44483|issue:45381", "issue:43299|issue:43867", "issue:38617|issue:43502", @@ -12509,7 +12509,7 @@ "issue:42994|issue:44655", "issue:43867|issue:45216", "issue:36683|issue:43475", - "issue:44220|issue:44655", + "issue:44655|issue:45478", "issue:43066|issue:45381", "issue:43352|issue:43937", "issue:44077|issue:44380", @@ -12522,252 +12522,243 @@ "issue:44188|issue:44380", "issue:43618|issue:45216", "issue:43352|issue:45593", - "issue:43992|issue:45310" + "issue:43992|issue:45310", + "issue:36683|issue:44242", + "issue:43010|issue:45278", + "issue:36683|issue:43352" ], - "prepared_review_unit_hash": "189834f88c865a0bd70f83c6075a362629939539de30f3be8e7505f5a68fc38c", + "prepared_review_unit_hash": "768fe13701556680904c5d2e6175e1334f16348b75634556c033b367abee8fe1", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 36, - "serialized_chars": 38094, - "estimated_input_tokens": 9524, - "estimated_eval_tokens": 19304 + "soft_pair_count": 34, + "serialized_chars": 37466, + "estimated_input_tokens": 9367, + "estimated_eval_tokens": 18990 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly heterogeneous: many title-level similarities (imports, saving, Qwen/CLIP/model regressions) but the underlying bugs and code paths differ. I would not merge any soft pair as a duplicate.", + "summary": "This cluster is a grab bag of unrelated Transformers issues, mostly around v5 regressions and import/load failures. None of the soft pairs look like the same concrete bug or change, so I would not merge any of them as duplicates.", "confidence": 0.88, - "canonical_issue_reason": null, + "canonical_issue_reason": "issue:45278 is the broadest umbrella item here: it explicitly describes post-upgrade import failures after moving from 4.57.0 to 5.5.0, which is the closest shared theme among several import-regression reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43502 is the strongest standalone representative here: concrete, reproducible API regression, broad user impact, and more discussion/inbound references than most others.", + "best_issue_reason": "If a single representative issue is needed, issue:45278 is the most general and cluster-like anchor because it captures the widest version-regression/import-failure pattern rather than a model-specific edge case.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:42907", - "right": "issue:44655", - "accept": false, - "reason": "Both involve saving, but one is dequantized model saving and the other is Pipeline.save_pretrained; different failure paths." - }, - { - "left": "issue:43208", - "right": "issue:43550", - "accept": false, - "reason": "xLSTM training bugs vs Bamba torch.compile/SDPA regression; different models and code paths." - }, - { - "left": "issue:44380", - "right": "issue:45479", - "accept": false, - "reason": "Attention scaling under SDPA/FlashAttention is unrelated to single-label classification zero-loss behavior." - }, - { - "left": "issue:43352", - "right": "issue:43976", - "accept": false, - "reason": "Nemotron Flash Attention support and Python/version compatibility are unrelated issues." - }, - { - "left": "issue:43064", - "right": "issue:44380", - "accept": false, - "reason": "FSDP/PEFT optimizer-state bug is unrelated to GPT2 attention scaling under backend kernels." - }, { "left": "issue:44483", "right": "issue:44514", "accept": false, - "reason": "Chat-completions request rejection and batched chat template padding crash are different layers of the stack." + "reason": "Both mention chat-related behavior, but one is an API request rejection and the other is a tokenizer/template crash; different code paths and symptoms." }, { "left": "issue:43208", "right": "issue:43526", "accept": false, - "reason": "xLSTM training bugs vs BeitImageProcessorFast label reduction bug; no shared underlying defect." + "reason": "Completely different models and bugs: xLSTM training blockers versus BEIT image processor label reduction." }, { "left": "issue:43618", "right": "issue:44380", "accept": false, - "reason": "CLIP attentions assignment regression is separate from GPT2 attention scaling config being ignored." + "reason": "One is a CLIP output regression and the other is GPT2 attention scaling under SDPA/FlashAttention; not the same failure." }, { "left": "issue:36683", "right": "issue:43582", "accept": false, - "reason": "Gemma3 vocab_size attribute error and AppleSilicon TypeError in caching_allocator_warmup are unrelated." + "reason": "A missing Gemma3 config attribute is unrelated to an Apple Silicon allocator TypeError." }, { "left": "issue:43618", "right": "issue:43761", "accept": false, - "reason": "Both are CLIP regressions, but one drops attentions and the other drops hidden_states; not the same bug." + "reason": "Both are CLIP regressions, but one is missing attentions and the other is missing hidden_states; related area, different concrete bugs." }, { "left": "issue:44483", "right": "issue:45290", "accept": false, - "reason": "Server-side request validation issue vs apply_chat_template crash on tool-call assistant messages." + "reason": "Chat-completions request validation and apply_chat_template tool-call handling are distinct problems." }, { "left": "issue:43012", "right": "issue:44683", "accept": false, - "reason": "PyTorch warning on bfloat16 compile and flex_attention failure on torch>=2.9 are distinct compiler/backend problems." + "reason": "A bfloat16 compile warning is not the same as a torch>=2.9 compiled flex_attention failure." }, { - "left": "issue:42757", - "right": "issue:43824", + "left": "issue:44038", + "right": "issue:44230", "accept": false, - "reason": "Different missing imports from different modules; same symptom, different symbols and root causes." + "reason": "One is a Qwen3-VL-Moe bug report, the other is an fp8 support request; not the same underlying issue." }, { - "left": "issue:44038", - "right": "issue:44230", + "left": "issue:42757", + "right": "issue:43824", "accept": false, - "reason": "Qwen3-VL-Moe loading regression vs fp8 inference support request; not the same change or bug." + "reason": "Both are import errors, but for different symbols and different root causes." }, { "left": "issue:44483", "right": "issue:45381", "accept": false, - "reason": "Chat/completions request handling and Qwen2.5-VL video vision_position_ids are unrelated." + "reason": "A chat/completions acceptance issue is unrelated to qwen2.5-vl video vision_position_ids being wrong." }, { "left": "issue:43299", "right": "issue:43867", "accept": false, - "reason": "Qwen3VL-Moe loading regression and state_dict-sorted load error differ in model path and failure mode." + "reason": "Qwen3VL-Moe loading failure and a sorted-state_dict load error are not the same bug." }, { "left": "issue:38617", "right": "issue:43502", "accept": false, - "reason": "Missing configuration_utils import and local_files_only still making network calls are unrelated." + "reason": "An import error in configuration_utils is unrelated to local_files_only still allowing API requests." }, { "left": "issue:43756", "right": "issue:44485", "accept": false, - "reason": "Smollm3 RoPE-layer mismatch is unrelated to GLM-5 RoPE implementation discussion." + "reason": "Both mention RoPE, but one is about Smollm3 layer dropping and the other is a GLM-5 RoPE implementation discussion." }, { "left": "issue:43901", "right": "issue:44655", "accept": false, - "reason": "Documentation mismatch for return_all_scores and Pipeline save_pretrained failure are different concerns." + "reason": "Documentation mismatch for return_all_scores is unrelated to save_pretrained on Pipeline objects." }, { "left": "issue:42994", "right": "issue:44655", "accept": false, - "reason": "Quantized model save failure is not the same as saving Pipeline objects." + "reason": "Quantized model saving failure and pipeline save_pretrained failure are different save-path bugs." }, { "left": "issue:43867", "right": "issue:45216", "accept": false, - "reason": "State_dict ordering load error and Qwen3.5 checkpoint corruption after save_pretrained are different bugs." + "reason": "A load error with sorted state_dict and a Qwen3.5 save_pretrained regression are not the same defect." }, { "left": "issue:36683", "right": "issue:43475", "accept": false, - "reason": "Gemma3 vocab_size attribute error and SAM3 video missing fpn_position_embeddings are unrelated model config issues." + "reason": "Missing Gemma3Config.vocab_size and missing Sam3VisionEncoderOutput.fpn_position_embeddings are unrelated model-specific attribute bugs." }, { - "left": "issue:44220", - "right": "issue:44655", + "left": "issue:44655", + "right": "issue:45478", "accept": false, - "reason": "_torch_extract_fbank_features bug and Pipeline serialization bug are different subsystems." + "reason": "Saving Pipeline objects and Qwen3.5 MoE from_pretrained errors affect different APIs and code paths." }, { "left": "issue:43066", "right": "issue:45381", "accept": false, - "reason": "Tokenizer decoder-type regression and Qwen2.5-VL video position-id bug are unrelated." + "reason": "Tokenizer decoder type selection and qwen2.5-vl video position ids are different issues." }, { "left": "issue:43352", "right": "issue:43937", "accept": false, - "reason": "FlashAttention support gating for Nemotron and GLM-5 GenerationConfig validation are different issues." + "reason": "Nemotron FlashAttention support and GLM-5 generation config validation are unrelated." }, { "left": "issue:44077", "right": "issue:44380", "accept": false, - "reason": "patchtsmixer post_init allowance and GPT2 attention scaling under SDPA/FlashAttention are unrelated." + "reason": "A PatchTSMixer post_init policy change is not the same as GPT2 attention scaling being ignored under specific backends." }, { "left": "issue:43064", "right": "issue:44492", "accept": false, - "reason": "Distributed optimizer-state bug and a cache-strategy typo are not the same underlying problem." + "reason": "Distributed optimizer-state corruption and a cache-strategy typo are clearly unrelated." }, { "left": "issue:44263", "right": "issue:45440", "accept": false, - "reason": "torch.split return handling in GLM-Moe indexer is unrelated to DeepseekV3MoE implementation divergence." + "reason": "Both are MoE-adjacent, but one is a torch.split indexer bug and the other is a DeepseekV3MoE implementation mismatch." }, { "left": "issue:43976", "right": "issue:44938", "accept": false, - "reason": "Both are environment-compatibility reports, but Python 3.9/3.10 and Python 3.14 failures have different causes." + "reason": "Python-version compatibility problems in 5.1.0 and 3.14 load failures are not the same bug." }, { "left": "issue:36683", "right": "issue:44077", "accept": false, - "reason": "Gemma3 config attribute error is unrelated to patchtsmixer post_init policy." + "reason": "A Gemma3 config attribute error has nothing to do with PatchTSMixer post_init handling." }, { "left": "issue:36683", "right": "issue:43525", "accept": false, - "reason": "Gemma3 vocab_size missing and Llama4 pad_token_id missing are different config attribute regressions." + "reason": "Both are missing config fields, but they affect different models and different attributes." }, { "left": "issue:36010", "right": "issue:42757", "accept": false, - "reason": "Different import failures from different modules and packages; not the same missing symbol." + "reason": "Both are import errors, but they come from different packages and symbols with no shared concrete fix." }, { "left": "issue:44188", "right": "issue:44380", "accept": false, - "reason": "Torch.compile branching divergence in attention kernels is not the same as GPT2 attention scaling config being ignored." + "reason": "A torch.compile branching divergence in attention kernels is not the same as GPT2 attention scaling being ignored." }, { "left": "issue:43618", "right": "issue:45216", "accept": false, - "reason": "CLIP attentions regression and Qwen3.5 checkpoint save corruption are unrelated." + "reason": "A CLIP attentions regression and a Qwen3.5 checkpoint regression do not share the same underlying bug." }, { "left": "issue:43352", "right": "issue:45593", "accept": false, - "reason": "FlashAttention support for Nemotron and D-FINE auxiliary-loss behavior are different model bugs." + "reason": "Nemotron FA2 support and D-FINE auxiliary-loss handling are unrelated model issues." }, { "left": "issue:43992", "right": "issue:45310", "accept": false, - "reason": "Missing embed_tokens.weight in UMT5Encoder and Qwen3.5 Moe from_pretrained error are different load paths and failures." + "reason": "UMT5 missing embed_tokens.weight during loading and Qwen3.5 MoE from_pretrained failure are different loading bugs." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "A missing Gemma3Config attribute is unrelated to load balancing loss not being added for MoE routing." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Cache update no_grad decoration and broad import failures after upgrade are different parts of the library." + }, + { + "left": "issue:36683", + "right": "issue:43352", + "accept": false, + "reason": "A Gemma3 config error and Nemotron FlashAttention support are unrelated." } ] }, @@ -12792,6 +12783,8 @@ "cluster_id": "cluster-15354-374", "nodes": [ "issue:28282", + "issue:30990", + "issue:31795", "issue:33453", "issue:36296", "issue:36683", @@ -12802,7 +12795,6 @@ "issue:43064", "issue:43065", "issue:43232", - "issue:43352", "issue:43502", "issue:43575", "issue:43618", @@ -12815,13 +12807,13 @@ "issue:44077", "issue:44112", "issue:44222", - "issue:44242", "issue:44246", "issue:44351", "issue:44355", "issue:44360", "issue:44387", "issue:44484", + "issue:44485", "issue:44556", "issue:44749", "issue:44857", @@ -12829,7 +12821,6 @@ "issue:44964", "issue:45071", "issue:45125", - "issue:45278", "issue:45341", "issue:45357", "issue:45375", @@ -12841,9 +12832,6 @@ "issue:45600" ], "soft_pairs": [ - "issue:36683|issue:44242", - "issue:43010|issue:45278", - "issue:36683|issue:43352", "issue:41669|issue:44246", "issue:43867|issue:45375", "issue:36683|issue:43064", @@ -12864,8 +12852,8 @@ "issue:42890|issue:45600", "issue:42890|issue:44112", "issue:43010|issue:44075", - "issue:43502|issue:44062", "issue:43723|issue:44556", + "issue:43502|issue:44062", "issue:44222|issue:45125", "issue:42890|issue:45341", "issue:44387|issue:44556", @@ -12874,250 +12862,243 @@ "issue:42548|issue:44351", "issue:45125|issue:45478", "issue:44355|issue:44749", - "issue:45125|issue:45375" + "issue:45125|issue:45375", + "issue:43502|issue:44485", + "issue:30990|issue:31795" ], - "prepared_review_unit_hash": "680a4d8cfe5f60e8baff9bffaea25f07d7d0f8dc3eb86667a4eb383ca4f1736f", + "prepared_review_unit_hash": "1db37b0ccab3b01daa062b2d1faf402c46f70bb0dfeb21ea0575bdcd96059578", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 34, - "serialized_chars": 37356, - "estimated_input_tokens": 9339, - "estimated_eval_tokens": 18934 + "soft_pair_count": 33, + "serialized_chars": 37033, + "estimated_input_tokens": 9259, + "estimated_eval_tokens": 18774 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous and centered on unrelated loading/import/model-specific regressions. Only the import-time slowdown pair looks like a true duplicate; the rest are better treated as distinct bugs.", - "confidence": 0.71, - "canonical_issue_reason": "issue:45278 is the best umbrella issue: it captures the broad post-upgrade import-breakage theme that most closely represents this mixed cluster, whereas the other items are narrower model- or test-specific regressions.", + "summary": "This cluster is heterogeneous: it mixes import/API breakages, tokenizer/model loading regressions, tensor-parallel/training bugs, and unrelated model-specific issues. The soft links are mostly keyword similarity, not the same underlying bug/change, so I reject them all.", + "confidence": 0.93, + "canonical_issue_reason": "issue:42548 is the clearest concrete user-facing import failure in the set and the best representative of the recurring import/load theme, though the cluster is too mixed for it to be a true duplicate hub.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the strongest global representative because it is broad, user-facing, and aligned with the dominant import/load regression theme across the set.", + "best_issue_reason": "issue:42548 has the strongest, most actionable signal among the issues: a specific API break (`cannot import name 'PreTrainedModel'`) with direct impact and clear repro surface.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:36683", - "right": "issue:44242", - "accept": false, - "reason": "Gemma3Config vocab_size missing is a model-config bug; load-balancing loss omission is a different MoE training issue." - }, - { - "left": "issue:43010", - "right": "issue:45278", - "accept": false, - "reason": "Cache update/no_grad behavior is unrelated to broad import errors after upgrading." - }, - { - "left": "issue:36683", - "right": "issue:43352", - "accept": false, - "reason": "Different subsystems and symptoms: Gemma3Config attribute error vs Flash Attention 2.0 support gate." - }, { "left": "issue:41669", "right": "issue:44246", - "accept": true, - "reason": "Both describe the same import-time slowdown regression; 41669 identifies the `import *` cause and 44246 reports the user-visible import slowness." + "accept": false, + "reason": "Both concern import performance, but one is about removing `import *` and the other is about slow intermittent `import transformers`; related theme, not the same bug." }, { "left": "issue:43867", "right": "issue:45375", "accept": false, - "reason": "State-dict loading failure and a missing config field are different bugs in different code paths." + "reason": "Both mention loading/config behavior, but one is a generic state_dict loading error and the other is a missing `deepstack_visual_indexes` config field for Qwen3.5 MoE." }, { "left": "issue:36683", "right": "issue:43064", "accept": false, - "reason": "Gemma3Config missing vocab_size is unrelated to the FSDP2/PEFT optimizer-state issue." + "reason": "Gemma3 `vocab_size` missing is a model-config attribute issue; FSDP2+PEFT optimizer-state corruption is a distributed training bug." }, { "left": "issue:43992", "right": "issue:45478", "accept": false, - "reason": "Both concern model loading, but they affect different models and likely different root causes." + "reason": "UMT5 encoder missing `embed_tokens.weight` during load is unrelated to Qwen3.5 MoE `from_pretrained` failure." }, { "left": "issue:42890", "right": "issue:45561", "accept": false, - "reason": "One is flaky test seeding, the other is an xdist file-race in testing utils; not the same bug." + "reason": "One is flaky SAM HQ tests from missing seeds; the other is an xdist race on `captured_info.txt` in testing utils." }, { "left": "issue:44938", "right": "issue:45071", "accept": false, - "reason": "Python 3.14 import failure and PretrainedConfig type-checking regression are distinct compatibility issues." + "reason": "Python 3.14 import/load failure is a runtime-compatibility issue; v5.4.0 `PretrainedConfig` type checking is a separate typing regression." }, { "left": "issue:43867", "right": "issue:45357", "accept": false, - "reason": "A state_dict sort/load issue is not the same as incorrect visual encoder keys in save_pretrained." + "reason": "Both involve loading/saving regressions, but one is generic state_dict loading and the other is incorrect visual encoder keys in Qwen3.5 `save_pretrained`." }, { "left": "issue:43010", "right": "issue:43976", "accept": false, - "reason": "Cache update decoration behavior is unrelated to Python-version support in Transformers 5.1.0." + "reason": "One is about cache/layer `update()` needing `torch.no_grad`; the other is a Python version compatibility problem in Transformers 5.1.0." }, { "left": "issue:44938", "right": "issue:45381", "accept": false, - "reason": "Python 3.14 load failure and Qwen2.5-VL vision_position_ids are unrelated." + "reason": "Python 3.14 import failure and Qwen2.5-VL video `vision_position_ids` mismatch are different symptoms and code paths." }, { "left": "issue:44360", "right": "issue:45440", "accept": false, - "reason": "DSA indexer activation bug and DeepSeekV3MoE divergence are different implementation issues." + "reason": "The DSA indexer/ReLU discussion is unrelated to the DeepseekV3MoE divergence from remote implementation." }, { "left": "issue:43618", "right": "issue:44077", "accept": false, - "reason": "CLIPOutput attentions regression and patchtsmixer post_init policy are unrelated." + "reason": "`CLIPOutput.attentions` disappearing is an output-structure regression; `patchtsmixer` `post_init` allowance is a separate model-API validation issue." }, { "left": "issue:36296", "right": "issue:43575", "accept": false, - "reason": "Tensor-parallel training bug and TP OOM on a specific large model are not the same concrete failure." + "reason": "Tensor-parallel training bug and Qwen2-57B-A14B-Instruct TP OOM are both TP-related but not the same concrete failure." }, { "left": "issue:43867", "right": "issue:44484", "accept": false, - "reason": "State_dict loading error is not the same as the default max_shard_size question." + "reason": "A generic load error from sorted state_dict keys is not the same as the `max_shard_size` default question in `save_pretrained`." }, { "left": "issue:43618", "right": "issue:45588", "accept": false, - "reason": "Different failures: CLIPOutput attentions missing vs flash_attention.py crashing on s_aux=None." + "reason": "`CLIPOutput.attentions` missing is unrelated to `flash_attention.py` crashing on `s_aux=None`." }, { "left": "issue:43618", "right": "issue:44857", "accept": false, - "reason": "CLIPOutput attentions regression and AMP/CUDA crash in LwDetrImageLoss are unrelated." + "reason": "Output field regression in CLIP is not the same as the float16 AMP crash in `LwDetrImageLoss`." }, { "left": "issue:28282", "right": "issue:33453", "accept": false, - "reason": "AutoModel missing PyTorch dependency and tokenizer loading regression are different import/load problems." + "reason": "Both are loading/import problems, but one is a missing PyTorch dependency error and the other is a tokenizer loading regression." }, { "left": "issue:42890", "right": "issue:44964", "accept": false, - "reason": "Seed-related test flakiness is unrelated to Phi-4 multimodal model loading." + "reason": "Seed-related test flakiness and inability to load `microsoft/Phi-4-multimodal-instruct` are unrelated." }, { "left": "issue:42890", "right": "issue:45600", "accept": false, - "reason": "Missing set_seed in a test is unrelated to removed Sam3LiteText config references in auto_mappings." + "reason": "A flaky integration test and `auto_mappings.py` referencing removed Sam3LiteText configs are different CI failures." }, { "left": "issue:42890", "right": "issue:44112", "accept": false, - "reason": "Flaky integration test seeding and GraniteSpeech device-override CI failure are different test bugs." + "reason": "Missing seeds in tests and a stale device-override test failure in GraniteSpeech are different test issues." }, { "left": "issue:43010", "right": "issue:44075", "accept": false, - "reason": "Cache update decoration is unrelated to SGD args not being applied." + "reason": "`torch.no_grad` on cache/layer updates is unrelated to SGD args not being used." }, { - "left": "issue:43502", - "right": "issue:44062", + "left": "issue:43723", + "right": "issue:44556", "accept": false, - "reason": "Ignoring local_files_only and AddedToken special-arg duplication are different tokenizer/file-fetch bugs." + "reason": "Tokenizer loading via `AutoTokenizer.from_pretrained` is not the same as checkpoint reload failures after upgrading from v4.57 to v5.x." }, { - "left": "issue:43723", - "right": "issue:44556", + "left": "issue:43502", + "right": "issue:44062", "accept": false, - "reason": "Tokenizer loading regression and checkpoint reload incompatibility are related only loosely; the concrete failures differ." + "reason": "`local_files_only=True` still making API requests is a download-policy bug, not the `AddedToken(..., special=...)` argument collision." }, { "left": "issue:44222", "right": "issue:45125", "accept": false, - "reason": "FP8 save_pretrained behavior and missing `_tp_plan` for tensor parallelism are different MoE issues." + "reason": "FP8 `save_pretrained` issues in MoE and missing `_tp_plan` for Qwen3_5Moe are distinct model-export vs tensor-parallel setup problems." }, { "left": "issue:42890", "right": "issue:45341", "accept": false, - "reason": "Missing test seeding is not the same as the generic testing_utils bug referenced here." + "reason": "General seed flakiness in tests is not the same as a specific bug in `testing_utils.py`." }, { "left": "issue:44387", "right": "issue:44556", "accept": false, - "reason": "Int4 quantization OOM and checkpoint reload failure are distinct regressions." + "reason": "Int4 quantization memory regression is different from checkpoint reload breakage after upgrading versions." }, { "left": "issue:43010", "right": "issue:43065", "accept": false, - "reason": "Cache update no_grad behavior and the Sam3PixelDecoder dummy Conv2d issue are unrelated." + "reason": "`@torch.no_grad` on cache updates and a dummy `nn.Conv2d` in `Sam3PixelDecoder` do not share the same code-path bug." }, { "left": "issue:43010", "right": "issue:43232", "accept": false, - "reason": "Cache update no_grad behavior and generation kwarg synchronization are different code paths." + "reason": "Cache/layer update decoration and `_update_model_kwargs_for_generation` after `sync_gpus` are separate generation bugs." }, { "left": "issue:42548", "right": "issue:44351", "accept": false, - "reason": "Missing PreTrainedModel export and missing HybridCache export are both import errors, but not the same missing symbol or root cause." + "reason": "Both are import-name failures, but they target different missing symbols (`PreTrainedModel` vs `HybridCache`) and likely different export paths." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Missing `_tp_plan` and Qwen3.5 MoE `from_pretrained` error are related to the same family, but the reported failures are not the same concrete bug." }, { "left": "issue:44355", "right": "issue:44749", "accept": false, - "reason": "Compiled Python-file errors and slow filtering after upgrade are unrelated." + "reason": "Compiled Python file loading errors and slow filtering after upgrading from 4.57.3 to 5.3.0 are unrelated." }, { "left": "issue:45125", - "right": "issue:45478", + "right": "issue:45375", "accept": false, - "reason": "Missing `_tp_plan` and Qwen3.5 Moe from_pretrained error are related only at a high level; not the same concrete bug." + "reason": "Tensor-parallel plan missing is different from `deepstack_visual_indexes` being dropped by strict config handling." }, { - "left": "issue:44222", - "right": "issue:45375", + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Offline-download leakage and GLM-5 RoPE implementation discussion are not the same underlying bug." + }, + { + "left": "issue:30990", + "right": "issue:31795", "accept": false, - "reason": "FP8 MoE save_pretrained and missing deepstack_visual_indexes are different Qwen3.5 vision/config issues." + "reason": "Sentence Transformers loading hangs and confusing `input_ids`/`past_key_values` documentation are unrelated." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet: issue:45278 is a reasonable umbrella for the import/load-related regressions, and the rest of the cluster is mostly heterogeneous. The only accepted soft edge, issue:41669 \u2194 issue:44246, looks like a plausible duplicate import-slowdown pair. The other pairwise rejections are conservative and consistent with the issue titles." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -13135,7 +13116,6 @@ "cluster_id": "cluster-15354-374", "nodes": [ "issue:30333", - "issue:30990", "issue:31795", "issue:33453", "issue:34689", @@ -13178,14 +13158,13 @@ "issue:44961", "issue:45003", "issue:45071", + "issue:45092", "issue:45125", "issue:45230", "issue:45468", "issue:45542" ], "soft_pairs": [ - "issue:43502|issue:44485", - "issue:30990|issue:31795", "issue:44075|issue:44871", "issue:44315|issue:44360", "issue:30333|issue:33453", @@ -13208,8 +13187,8 @@ "issue:43673|issue:44492", "issue:44961|issue:45468", "issue:31795|issue:38617", - "issue:43576|issue:44351", "issue:41628|issue:42548", + "issue:43576|issue:44351", "issue:44908|issue:44961", "issue:44485|issue:44961", "issue:36296|issue:43901", @@ -13222,270 +13201,265 @@ "issue:43576|issue:44556", "issue:44038|issue:45125", "issue:44393|issue:45071", - "issue:41669|issue:43673" + "issue:41669|issue:43673", + "issue:44556|issue:45092" ], - "prepared_review_unit_hash": "99194bfbff11ba0d515efd1661cab690c54cadf7855fb5ed9774db2c75d3382a", + "prepared_review_unit_hash": "5ae75ddd3cf52fbd57b9539fbcf5b84e476e41e863f29198221f5c0ab0b25875", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 39, - "serialized_chars": 38649, - "estimated_input_tokens": 9663, - "estimated_eval_tokens": 19582 + "soft_pair_count": 38, + "serialized_chars": 38406, + "estimated_input_tokens": 9602, + "estimated_eval_tokens": 19460 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a loose set of Transformers v5 regression reports, especially import/loading failures, but the soft pairs do not look like true duplicates of the same concrete bug. I would not merge any of the proposed soft edges.", - "confidence": 0.64, - "canonical_issue_reason": "issue:43576 is the most central and broadly connected report in the set, with multiple soft similarities to other v5 breakage/import-loading issues. It best represents the cluster theme, even though it is not a duplicate of the others.", + "summary": "Most items are unrelated or only loosely similar v5/Transformers regression reports. The soft similarities are mostly broad-topic overlaps (imports, loading, docs, FP8, cache) rather than the same concrete bug, so no soft-edge merge is justified.", + "confidence": 0.69, + "canonical_issue_reason": "issue:43576 is the clearest, most actionable regression report in the set and sits near several v5-related similarity links, while the others are either narrower, more ambiguous, or about different code paths.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43576 is the best single issue to anchor this cluster because it is the most connected to the other v5-related reports and captures the shared breakage theme more broadly than the narrower model-specific issues.", + "best_issue_reason": "issue:43576 is the strongest standalone candidate because it describes a specific, user-visible breakage with a clear surface area and better triage value than the more ambiguous or peripheral reports.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:43502", - "right": "issue:44485", - "accept": false, - "reason": "Different problems: offline/local-files-only networking behavior vs GLM-5 RoPE implementation." - }, - { - "left": "issue:30990", - "right": "issue:31795", - "accept": false, - "reason": "Unrelated topics: Sentence Transformers load hang vs documentation confusion about forward arguments." - }, { "left": "issue:44075", "right": "issue:44871", "accept": false, - "reason": "Different bugs: SGD optimizer args ignored vs Gemma-3 EOS token config mismatch." + "reason": "Different problems: optimizer argument handling vs Gemma eos_token_id config mismatch." }, { "left": "issue:44315", "right": "issue:44360", "accept": false, - "reason": "No shared code-path or symptom; model_init/Liger Kernel issue is unrelated to the DSA indexer ReLU discussion." + "reason": "Liger Kernel application bug and DSA indexer ReLU issue are unrelated code paths." }, { "left": "issue:30333", "right": "issue:33453", "accept": false, - "reason": "Different failures: MLFlow reporting on training failure vs tokenizer loading regression." + "reason": "MLFlow failure reporting and tokenizer loading regression are different bugs." }, { "left": "issue:43010", "right": "issue:44368", "accept": false, - "reason": "Different areas: cache/layer no_grad decoration vs tie_word_embeddings warning during Qwen fine-tuning." + "reason": "Cache update no_grad decoration and tie_word_embeddings warning are unrelated." }, { "left": "issue:43502", "right": "issue:44222", "accept": false, - "reason": "Different issues: local_files_only network requests vs FP8 moe save_pretrained behavior." + "reason": "local_files_only network leakage and FP8 save_pretrained behavior are different issues." }, { "left": "issue:43299", "right": "issue:44556", "accept": false, - "reason": "Both are v5-related loading regressions, but they affect different models and scenarios; not the same concrete bug." + "reason": "Both concern v5 loading regressions, but one is Qwen3VL MoE-specific while the other is a broader checkpoint reload incompatibility." }, { "left": "issue:43502", "right": "issue:44961", "accept": false, - "reason": "No meaningful overlap; local_files_only networking bug vs unrelated 'racoon' issue." + "reason": "API requests despite local_files_only and the placeholder racoon issue are unrelated." }, { "left": "issue:44222", "right": "issue:44230", "accept": false, - "reason": "Related FP8/moe area, but one is save_pretrained and the other is inference support; not the same bug." + "reason": "save_pretrained FP8 MOE bug vs FP8 inference support; different operations and failure modes." }, { "left": "issue:44355", "right": "issue:44655", "accept": false, - "reason": "Different functionality: compiled Python file errors vs saving Pipeline objects." + "reason": "Compiled-file import errors and Pipeline.save_pretrained are unrelated." }, { "left": "issue:30333", "right": "issue:36296", "accept": false, - "reason": "Different subsystems and symptoms: MLFlow reporting vs tensor parallel training." + "reason": "MLFlow reporting failure and tensor parallel training bug are different problems." }, { "left": "issue:44355", "right": "issue:45071", "accept": false, - "reason": "Different issues: compiled file execution error vs PretrainedConfig type-checking regression." + "reason": "Runtime import failure and PretrainedConfig type-checking regression are different code paths." }, { "left": "issue:42831", "right": "issue:43874", "accept": false, - "reason": "Different model-specific bugs: FineGrainedFP8 accuracy vs GLM46V image processor token counting." + "reason": "FineGrainedFP8 accuracy issue and missing image patch method are unrelated." }, { "left": "issue:33453", "right": "issue:34689", "accept": false, - "reason": "Both are loading regressions, but for different models and likely different root causes." + "reason": "Both are loading regressions, but they affect different models and failure modes." }, { "left": "issue:42757", "right": "issue:43576", "accept": false, - "reason": "Both are v5 compatibility complaints, but one is a missing import and the other is the env command being broken; not the same bug." + "reason": "Missing hub symbol import and v5 env CLI breakage are distinct surfaces." }, { "left": "issue:33453", "right": "issue:36296", "accept": false, - "reason": "Unrelated: tokenizer loading regression vs tensor parallel training bug." + "reason": "Tokenizer loading regression and tensor parallel training bug are unrelated." }, { "left": "issue:43827", "right": "issue:44961", "accept": false, - "reason": "Docs reference to pipeline() is unrelated to the 'racoon' issue." + "reason": "Docs about pipeline removals and the placeholder issue are unrelated." }, { "left": "issue:43576", "right": "issue:45071", "accept": false, - "reason": "Both involve v5 breakage, but one is the env command and the other is PretrainedConfig typing; different concrete failures." + "reason": "CLI env regression and config type checking are different issues." }, { "left": "issue:43576", "right": "issue:43824", "accept": false, - "reason": "Both are import/loading regressions, but the missing symbol and the env command bug are not the same change or code-path." + "reason": "CLI env regression and Qwen2.5-VL import error are unrelated." }, { "left": "issue:38617", "right": "issue:43576", "accept": false, - "reason": "Different specific failures: missing layer_type_validation import vs broken env command." + "reason": "Missing exported symbol import error and CLI env breakage are different bugs." }, { "left": "issue:43673", "right": "issue:44492", "accept": false, - "reason": "Cache-missing during chunked prefill is not the same as a typo in cache strategies." + "reason": "Chunked_prefill cache bug and cache-strategy typo are not the same underlying problem." }, { "left": "issue:44961", "right": "issue:45468", "accept": false, - "reason": "No overlap: 'racoon' vs Gemma-4 audio positional encoding." + "reason": "Placeholder title and Gemma-4 audio positional encoding bug are unrelated." }, { "left": "issue:31795", "right": "issue:38617", "accept": false, - "reason": "Documentation confusion vs import error; not duplicate bugs." + "reason": "Documentation confusion and missing import symbol are unrelated." }, { - "left": "issue:43576", - "right": "issue:44351", + "left": "issue:41628", + "right": "issue:42548", "accept": false, - "reason": "Both are v5 import-related, but they concern different missing symbols and likely different fixes." + "reason": "Both are import/export failures, but they involve different symbols and likely different root causes." }, { - "left": "issue:41628", - "right": "issue:42548", + "left": "issue:43576", + "right": "issue:44351", "accept": false, - "reason": "Both are import errors, but for different classes and missing symbols; not enough evidence of the same underlying bug." + "reason": "CLI env regression and HybridCache import error are different failure modes." }, { "left": "issue:44908", "right": "issue:44961", "accept": false, - "reason": "Scheduler kwargs bug is unrelated to the 'racoon' issue." + "reason": "Scheduler kwargs bug and placeholder issue are unrelated." }, { "left": "issue:44485", "right": "issue:44961", "accept": false, - "reason": "RoPE implementation discussion is unrelated to 'racoon'." + "reason": "GLM-5 RoPE implementation issue and placeholder issue are unrelated." }, { "left": "issue:36296", "right": "issue:43901", "accept": false, - "reason": "Tensor parallel training bug is unrelated to a docs mismatch in TextClassificationPipeline." + "reason": "Tensor parallel training bug and TextClassificationPipeline docs mismatch are unrelated." }, { "left": "issue:36296", "right": "issue:43526", "accept": false, - "reason": "Different model/training bug vs BeitImageProcessorFast reduce_labels issue." + "reason": "Tensor parallel training bug and BeitImageProcessorFast label bug are unrelated." }, { "left": "issue:41628", "right": "issue:44355", "accept": false, - "reason": "Import error for AutoImageProcessor is unrelated to compiled Python file execution errors." + "reason": "ImportError for a public API and compiled-file errors are unrelated." }, { "left": "issue:44273", "right": "issue:45003", "accept": false, - "reason": "Both mention loading behavior, but lazy loading problems and unsafe sys.modules access are not clearly the same bug." + "reason": "Lazy loading malfunction and unsafe sys.modules access are related only broadly, not the same concrete bug." }, { "left": "issue:44261", "right": "issue:44360", "accept": false, - "reason": "MLA q_a_layernorm precision/config issue is unrelated to the DSA indexer ReLU topic." + "reason": "MLA layernorm epsilon precision issue and DSA indexer ReLU issue are unrelated." }, { "left": "issue:44623", "right": "issue:45230", "accept": false, - "reason": "Generic 'Bug report' is too vague to treat as the same issue as missing processor.save_pretrained files." + "reason": "processor.save_pretrained missing files and a generic bug report are not the same issue." }, { "left": "issue:44273", "right": "issue:45542", "accept": false, - "reason": "Lazy loading behavior is unrelated to the TensorFlow backend error when only tensorboard is installed." + "reason": "Lazy loading bug and TensorFlow backend installation issue are unrelated." }, { "left": "issue:43576", "right": "issue:44556", "accept": false, - "reason": "Both are v5 breakages, but one is the env command and the other is checkpoint reload compatibility; not the same concrete bug." + "reason": "v5 env CLI regression and checkpoint reload incompatibility after upgrade are both version-related, but not the same concrete bug." }, { "left": "issue:44038", "right": "issue:45125", "accept": false, - "reason": "Different Qwen/moe and tensor-parallel problems; not the same underlying change." + "reason": "Qwen3-VL/Qwen3.5-Moe bug and missing tensor-parallel plan are different issues." }, { "left": "issue:44393", "right": "issue:45071", "accept": false, - "reason": "Qwen3-VL bounding-box output issue is unrelated to PretrainedConfig type checking." + "reason": "Qwen3-VL 2D bbox output problem and PretrainedConfig type checking regression are unrelated." }, { "left": "issue:41669", "right": "issue:43673", "accept": false, - "reason": "Import-star cleanup/perf issue is unrelated to a missing cache during chunked prefill." + "reason": "Removing import * usage and chunked_prefill cache missing are unrelated." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both are old-checkpoint/v5 compatibility complaints, but the model families and failure contexts differ enough that they do not look like the same bug." } ] }, @@ -13541,7 +13515,6 @@ "issue:44393", "issue:44485", "issue:44492", - "issue:44556", "issue:44623", "issue:44655", "issue:44704", @@ -13553,13 +13526,13 @@ "issue:45071", "issue:45092", "issue:45230", + "issue:45290", "issue:45310", "issue:45341", "issue:45468", "issue:45478" ], "soft_pairs": [ - "issue:44556|issue:45092", "issue:42371|issue:44492", "issue:42890|issue:44908", "issue:44704|issue:44908", @@ -13602,314 +13575,329 @@ "issue:44263|issue:45468", "issue:44261|issue:45468", "issue:43606|issue:45468", - "issue:42890|issue:43704", "issue:42913|issue:43066", - "issue:44908|issue:44998" + "issue:42890|issue:43704", + "issue:44908|issue:44998", + "issue:44861|issue:45290", + "issue:44998|issue:45341", + "issue:44492|issue:44998" ], - "prepared_review_unit_hash": "49fde7466f69de5fd53f79114bc3d20afb46bc84605228a528fecfbe42ffe323", + "prepared_review_unit_hash": "910ca75e55aea22a478fbdaee55c4db7ad829a233b2769b1bd08019b7be98c52", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 46, - "serialized_chars": 40783, - "estimated_input_tokens": 10196, - "estimated_eval_tokens": 20648 + "soft_pair_count": 48, + "serialized_chars": 41396, + "estimated_input_tokens": 10349, + "estimated_eval_tokens": 20954 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a mixed set of mostly unrelated Transformers issues, with several loose clusters around tokenizer/model loading in v5, but no soft pair is strong enough to treat as the same underlying bug under strict duplicate triage.", - "confidence": 0.76, - "canonical_issue_reason": "issue:43723 is the clearest broad anchor in the set because it directly describes an AutoTokenizer.from_pretrained loading regression in v5, which matches the repeated tokenizer-loading theme better than the narrower or more model-specific reports.", + "summary": "This set is mostly heterogeneous: tokenizer/loading, model init, scheduler, docs, tests, and various model-specific bugs. I did not find a strong duplicate cluster; the closest pair is the AutoProcessor/cached_file kwargs vs local_files_only network-request issue, but evidence is still insufficient to merge them as the same bug.", + "confidence": 0.61, + "canonical_issue_reason": "Issue 44704 is the most concrete, actionable bug in the set: it names a specific code path, a likely root cause, and a clear failure mode around kwargs propagation in from_pretrained.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43723 is also the best overall issue to keep as the representative reference for this cluster because it is the most central and reusable description of the shared v5 tokenizer-loading problem area.", + "best_issue_reason": "Issue 44704 is a strong representative issue because it describes a specific loading-path bug with an identifiable fix surface and good triage value.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:44556", - "right": "issue:45092", - "accept": false, - "reason": "Both are v5 checkpoint-loading complaints, but one is a generic reload failure and the other is a specific InternVL2 remote-code/meta-init incompatibility; not the same concrete bug." - }, { "left": "issue:42371", "right": "issue:44492", "accept": false, - "reason": "TF32 behavior settings and a cache-strategy typo are unrelated problems." + "reason": "TF32 configuration behavior is unrelated to a cache-strategy typo." }, { "left": "issue:42890", "right": "issue:44908", "accept": false, - "reason": "A flaky integration test missing set_seed is unrelated to the inverse_sqrt scheduler ignoring kwargs." + "reason": "Test seed coverage and inverse_sqrt scheduler kwargs are different failure domains." }, { "left": "issue:44704", "right": "issue:44908", "accept": false, - "reason": "AutoProcessor kwargs forwarding and scheduler kwargs handling are different code paths and different bugs." + "reason": "One is about cached_file kwargs propagation during loading; the other is scheduler kwarg handling. Different code paths." }, { "left": "issue:44246", "right": "issue:44273", "accept": false, - "reason": "Import-time slowness and lazy-loading correctness are related only at a high level, not the same bug." + "reason": "Import-time slowness and lazy loading not working are related broadly, but not the same concrete bug." }, { "left": "issue:43408", "right": "issue:44315", "accept": false, - "reason": "A model-type warning for sam3_video/sam3_tracker is unrelated to Liger Kernel application with model_init." + "reason": "Model-type warning in SAM3 is unrelated to Liger Kernel application with model_init." }, { "left": "issue:44297", "right": "issue:45230", "accept": false, - "reason": "A tokenizer_class mismatch in save_pretrained is not the same as an unspecified generic bug report." + "reason": "Tokenizer-class save_pretrained mismatch is too specific to equate with an unnamed generic bug report." }, { "left": "issue:43352", "right": "issue:44393", "accept": false, - "reason": "Flash Attention 2 support for Nemotron and Qwen3-VL bounding-box output issues are unrelated." + "reason": "Flash Attention support and Qwen3-VL bounding-box output errors are distinct model issues." }, { "left": "issue:43576", "right": "issue:44908", "accept": false, - "reason": "A broken transformers env command is unrelated to scheduler kwargs handling." + "reason": "Broken env command and scheduler kwargs are unrelated." }, { "left": "issue:43976", "right": "issue:44393", "accept": false, - "reason": "Python version compatibility and Qwen3-VL output errors are unrelated." + "reason": "Python version compatibility is unrelated to Qwen3-VL output correctness." }, { "left": "issue:44908", "right": "issue:45071", "accept": false, - "reason": "Scheduler kwargs handling and PretrainedConfig type checking are different regressions." + "reason": "Scheduler kwargs and PretrainedConfig type checking are different bug classes." }, { "left": "issue:44295", "right": "issue:45230", "accept": false, - "reason": "A position_ids buffer access error is not the same as a generic bug report." + "reason": "position_ids buffer access and a generic bug report do not show the same underlying defect." }, { "left": "issue:41669", "right": "issue:44351", "accept": false, - "reason": "Removing import * for performance is not the same as failing to import HybridCache." + "reason": "Import-* cleanup causing import cost is not the same as HybridCache import failure." }, { "left": "issue:41669", "right": "issue:45341", "accept": false, - "reason": "Import cleanup/performance and a testing_utils bug are unrelated." + "reason": "Import performance refactor and a testing_utils bug are unrelated." }, { "left": "issue:44492", "right": "issue:44945", "accept": false, - "reason": "A cache-strategy typo does not match an incorrect pipeline-parallelism output bug." + "reason": "Cache-strategy typo and pipeline parallelism output error are different issues." }, { "left": "issue:41669", "right": "issue:44492", "accept": false, - "reason": "Import cleanup and cache-strategy wording are unrelated." + "reason": "Import-system cleanup and cache-strategy typo are not the same code-path bug." }, { "left": "issue:44315", "right": "issue:44829", "accept": false, - "reason": "Liger Kernel not being applied with model_init and flash_attention_3 causing degenerate training are different issues." + "reason": "Liger Kernel not applied with model_init is unrelated to degenerate training under flash_attention_3." }, { "left": "issue:44393", "right": "issue:45478", "accept": false, - "reason": "Qwen3-VL 2D bounding-box hallucinations and Qwen3.5 MoE from_pretrained loading failures are unrelated." + "reason": "Qwen3-VL 2D bbox hallucination and Qwen3.5 MoE from_pretrained error are different model problems." }, { "left": "issue:31795", "right": "issue:33453", "accept": false, - "reason": "Documentation confusion about forward arguments is not the same as a tokenizer-loading regression." + "reason": "Forward doc confusion is not the same as a tokenizer loading regression." }, { "left": "issue:43827", "right": "issue:44908", "accept": false, - "reason": "Docs still referencing pipeline() after removals is unrelated to inverse_sqrt scheduler kwargs." + "reason": "Pipeline docs after v5 removals and scheduler kwargs are unrelated." }, { "left": "issue:44077", "right": "issue:44861", "accept": false, - "reason": "An optional post_init restriction and a tied-weights AttributeError are different bugs." + "reason": "Optional post_init policy and _tied_weight_keys list crash are different implementation bugs." }, { "left": "issue:41669", "right": "issue:44908", "accept": false, - "reason": "Import-time cleanup and scheduler behavior are unrelated." + "reason": "Import-time cleanup and scheduler kwarg handling are unrelated." }, { "left": "issue:43723", "right": "issue:44393", "accept": false, - "reason": "Tokenizer loading in v5 and Qwen3-VL bounding-box output errors are unrelated." + "reason": "Tokenizer loading in v5 is unrelated to Qwen3-VL bounding-box output errors." }, { "left": "issue:44655", "right": "issue:44861", "accept": false, - "reason": "Saving pipeline objects and _get_tied_weight_keys crashing on a list are different failure modes." + "reason": "Saving Pipeline objects and tied-weight key handling do not share the same bug." }, { "left": "issue:43576", "right": "issue:44273", "accept": false, - "reason": "The env command issue is unrelated to lazy loading not working properly." + "reason": "Broken env command and lazy loading behavior are different surfaces." }, { "left": "issue:44360", "right": "issue:45468", "accept": false, - "reason": "A missing ReLU in the DSA indexer and Gemma-4 audio positional encoding are unrelated." + "reason": "DSA indexer ReLU discussion is unrelated to Gemma-4 audio positional encoding." }, { "left": "issue:45230", "right": "issue:45310", "accept": false, - "reason": "A generic bug report is not the same as a specific Qwen3.5 MoE from_pretrained error." + "reason": "A generic bug report is not enough to conclude it duplicates a specific Qwen3.5 MoE from_pretrained error." }, { "left": "issue:44273", "right": "issue:44908", "accept": false, - "reason": "Lazy loading failures and scheduler kwargs handling are unrelated." + "reason": "Lazy loading and scheduler kwargs are unrelated." }, { "left": "issue:41669", "right": "issue:43704", "accept": false, - "reason": "Import * cleanup/performance and VRAM leakage in dataloader threads are different issues." + "reason": "Import-side performance cleanup and VRAM leakage in multi-threaded dataloading are different problems." }, { "left": "issue:43519", "right": "issue:44485", "accept": false, - "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are separate model bugs." + "reason": "Timestamp calculation in Qwen3VL is unrelated to GLM-5 RoPE implementation." }, { "left": "issue:43519", "right": "issue:44908", "accept": false, - "reason": "Processor timestamp calculation and scheduler kwargs handling are unrelated." + "reason": "Timestamp calculation and scheduler kwargs are unrelated." }, { "left": "issue:31795", "right": "issue:34689", "accept": false, - "reason": "Documentation confusion and a Llama 3.2 model-loading regression are not the same bug." + "reason": "Forward documentation confusion does not match a model-loading breakage regression." }, { "left": "issue:31515", "right": "issue:31795", "accept": false, - "reason": "Slow checkpoint loading and forward-argument documentation confusion are unrelated." + "reason": "Slow checkpoint loading and forward-API documentation confusion are different issues." }, { "left": "issue:44315", "right": "issue:45092", "accept": false, - "reason": "Liger Kernel application with model_init and old checkpoint/meta-init incompatibility are different problems." + "reason": "Model_init not applying Liger Kernel and meta-init incompatibility for old InternVL2 are not the same bug." }, { "left": "issue:43704", "right": "issue:44485", "accept": false, - "reason": "VRAM leakage across dataloader threads is unrelated to GLM-5 RoPE implementation." + "reason": "VRAM leak under dataloader threads and RoPE implementation concerns are unrelated." }, { "left": "issue:44485", "right": "issue:45468", "accept": false, - "reason": "GLM-5 RoPE and Gemma-4 audio positional encoding are unrelated model-specific bugs." + "reason": "GLM-5 RoPE implementation and Gemma-4 audio positional encoding are different model-specific concerns." }, { "left": "issue:42371", "right": "issue:43704", "accept": false, - "reason": "TF32 settings and VRAM leakage in dataloader threads are unrelated." + "reason": "TF32 settings and VRAM leakage are unrelated." }, { "left": "issue:43502", "right": "issue:43519", "accept": false, - "reason": "Unexpected API requests with local_files_only and Qwen3VL timestamp calculation are unrelated." + "reason": "local_files_only network requests and Qwen3VL timestamp math are different bugs." }, { "left": "issue:43519", "right": "issue:44623", "accept": false, - "reason": "Processor timestamp math and missing files in processor.save_pretrained are different bugs." + "reason": "Timestamp math in a processor and missing files on processor save are unrelated." }, { "left": "issue:43519", "right": "issue:43704", "accept": false, - "reason": "Timestamp calculation and VRAM leakage across threads are unrelated." + "reason": "Processor timestamp math and multi-threaded VRAM leak are unrelated." }, { "left": "issue:44263", "right": "issue:45468", "accept": false, - "reason": "A torch.split return-value issue and Gemma-4 audio positional encoding are different bugs." + "reason": "torch.split return values in an indexer are unrelated to Gemma-4 audio positional encoding." }, { "left": "issue:44261", "right": "issue:45468", "accept": false, - "reason": "An MLA q_a_layernorm precision issue is unrelated to Gemma-4 audio positional encoding." + "reason": "MLA q_a_layernorm epsilon precision and Gemma-4 audio positional encoding are different issues." }, { "left": "issue:43606", "right": "issue:45468", "accept": false, - "reason": "CPU offload device mismatch and Gemma-4 audio positional encoding are unrelated." + "reason": "CPU offload device mismatch and audio positional encoding are unrelated." + }, + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Generic tokenizer behavior change and wrong tokenizer decoder type in v5 are related in theme but not clearly the same concrete bug." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "Test seeding instability and VRAM leakage are unrelated." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "Scheduler kwargs and an irrelevant placeholder issue are unrelated." }, { - "left": "issue:42890", - "right": "issue:43704", + "left": "issue:44861", + "right": "issue:45290", "accept": false, - "reason": "A flaky integration test and VRAM leakage in dataloader threads are unrelated." + "reason": "Tied-weight key handling and apply_chat_template crashes have different failure surfaces." }, { - "left": "issue:42913", - "right": "issue:43066", + "left": "issue:44998", + "right": "issue:45341", "accept": false, - "reason": "Both mention tokenizer changes in v5, but one is a broad behavior-difference report and the other is a specific decoder-type mismatch; not enough to call them the same bug." + "reason": "Unemployment is not a technical duplicate of a testing_utils bug." }, { - "left": "issue:44908", + "left": "issue:44492", "right": "issue:44998", "accept": false, - "reason": "A scheduler kwargs bug and an unrelated placeholder issue are not the same thing." + "reason": "Cache-strategy typo and unrelated placeholder issue are not duplicates." } ] }, @@ -13943,19 +13931,13 @@ "issue:43572", "issue:43618", "issue:44230", - "issue:44492", "issue:44556", "issue:44623", "issue:44861", "issue:44998", - "issue:45290", - "issue:45310", - "issue:45341" + "issue:45310" ], "soft_pairs": [ - "issue:44861|issue:45290", - "issue:44998|issue:45341", - "issue:44492|issue:44998", "issue:43618|issue:44998", "issue:44556|issue:44998", "issue:43525|issue:44861", @@ -13968,118 +13950,100 @@ "issue:31515|issue:33453", "issue:43298|issue:44230" ], - "prepared_review_unit_hash": "04281dd5a6716af263c30c1d3922adecaa5126f2841630e3c8d4107beac9bffc", + "prepared_review_unit_hash": "46450ed417b801420991bf8a4d5b421e4e6ce97b172dd4319aad7740d3d8e31f", "component_budget": { "node_count": 374, "item_count": 374, "soft_pair_count": 1395, - "serialized_chars": 720157, - "estimated_input_tokens": 180040, - "estimated_eval_tokens": 360336 + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 }, "budget": { - "node_count": 18, - "item_count": 18, - "soft_pair_count": 14, - "serialized_chars": 14255, - "estimated_input_tokens": 3564, - "estimated_eval_tokens": 7384 + "node_count": 15, + "item_count": 15, + "soft_pair_count": 11, + "serialized_chars": 11707, + "estimated_input_tokens": 2927, + "estimated_eval_tokens": 6110 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly mixed: the issues share superficial wording around crashes, loading, or config names, but they do not describe the same underlying bug. None of the soft pairs look safe to merge as duplicates.", - "confidence": 0.95, - "canonical_issue_reason": "No single canonical issue stands out as a true duplicate hub; the items span unrelated failures across loading, configs, templates, training, and misc. bugs.", + "summary": "This cluster is not a duplicate set; it contains several unrelated Transformers bugs that only loosely overlap around model loading/configuration. None of the soft-similarity pairs look like the same underlying bug or a mergeable single PR.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue fits this cluster: the items span distinct failure modes (checkpoint loading speed, tokenizer regression, config attribute errors, processor save gaps, FP8 support, tensor-parallel bugs, and an MLflow reporting issue).", "canonical_pr_reason": null, - "best_issue_reason": "issue:31515 is the closest thing to a representative because it is a concrete from_pretrained/loading report and has the most superficial overlap with other load-related items, but it still is not a true duplicate center.", + "best_issue_reason": "If a representative issue must be chosen, #44556 is the broadest checkpoint-reload regression, but it still does not cover the other issues in the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ - { - "left": "issue:44861", - "right": "issue:45290", - "accept": false, - "reason": "Both are crashes, but one is about tied weights internals and the other about chat template tool-call handling; different code paths and failure modes." - }, - { - "left": "issue:44998", - "right": "issue:45341", - "accept": false, - "reason": "'Unemployment' is unrelated to a small testing_utils bug; no shared underlying behavior." - }, - { - "left": "issue:44492", - "right": "issue:44998", - "accept": false, - "reason": "A cache-strategy typo and an unrelated issue title do not describe the same bug or change." - }, { "left": "issue:43618", "right": "issue:44998", "accept": false, - "reason": "CLIPOutput attentions assignment is a specific model-output regression, unrelated to the other item." + "reason": "Completely unrelated: CLIPOutput attentions regression vs an off-topic unemployment issue." }, { "left": "issue:44556", "right": "issue:44998", "accept": false, - "reason": "Checkpoint reload breakage after version upgrade is unrelated to the other issue; no concrete overlap." + "reason": "Checkpoint reload regression has no substantive relation to the unrelated unemployment issue." }, { "left": "issue:43525", "right": "issue:44861", "accept": false, - "reason": "Missing pad_token_id on Llama4Config and tied-weight-key AttributeError are different config/model bugs." + "reason": "Different bugs: missing pad_token_id on Llama4Config vs AttributeError in _get_tied_weight_keys for list-valued _tied_weights_keys." }, { "left": "issue:43572", "right": "issue:44861", "accept": false, - "reason": "StableLmConfig pad_token_idx regression is unrelated to the tied-weight-key list crash." + "reason": "StableLmConfig pad_token_idx regression and tied-weight key handling are separate code paths and different symptoms." }, { "left": "issue:31515", "right": "issue:38617", "accept": false, - "reason": "Slow checkpoint loading and an ImportError for layer_type_validation are distinct problems in different code paths." + "reason": "Slow from_pretrained checkpoint loading is unrelated to an ImportError for layer_type_validation." }, { "left": "issue:44230", "right": "issue:44623", "accept": false, - "reason": "FP8 support for a model family and missing processor.save_pretrained files are not the same bug." + "reason": "FP8 inference support for Qwen models is a different problem from processor.save_pretrained missing files." }, { "left": "issue:31515", "right": "issue:36296", "accept": false, - "reason": "from_pretrained loading slowness is unrelated to a tensor-parallel training bug." + "reason": "Checkpoint load slowness is not the same as a tensor-parallel training bug." }, { "left": "issue:30333", "right": "issue:31515", "accept": false, - "reason": "MLflow reporting on failed training jobs and checkpoint loading speed are unrelated concerns." + "reason": "MLflow job-state reporting failure is unrelated to from_pretrained checkpoint loading performance." }, { "left": "issue:44230", "right": "issue:45310", "accept": false, - "reason": "FP8 inference support for Qwen3-VL/MoE and a Qwen3.5 MoE from_pretrained error are different concrete issues." + "reason": "Both mention Qwen3.5, but one is FP8 inference support and the other is a from_pretrained error; different concrete bugs." }, { "left": "issue:31515", "right": "issue:33453", "accept": false, - "reason": "Tokenizer loading regression and checkpoint loading slowness are different failure modes, not the same duplicate." + "reason": "Tokenizer loading regression and slow checkpoint loading are different failure modes with different likely fixes." }, { "left": "issue:43298", "right": "issue:44230", "accept": false, - "reason": "Continuous batching support for audio models and FP8 support for Qwen models are separate feature/bug reports." + "reason": "Continuous batching audio-model support is a different feature gap than Qwen FP8 inference support." } ] }, @@ -14109,12 +14073,12 @@ "soft_pairs": [ "issue:26350|issue:33260" ], - "prepared_review_unit_hash": "e0ac32febd7dbe0a2803a61f6aec3c590807364ff4bccf125285591426951abd", + "prepared_review_unit_hash": "a9835bd4aabdbc5ad31a2d13ea6404649c591098bafe1fff971374f4cd5c8009", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1590, + "serialized_chars": 1589, "estimated_input_tokens": 398, "estimated_eval_tokens": 1052 }, @@ -14122,7 +14086,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1590, + "serialized_chars": 1589, "estimated_input_tokens": 398, "estimated_eval_tokens": 1052 }, @@ -14131,8 +14095,8 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two distinct community-contribution enhancement issues: one for Flash Attention 2 support on more architectures, and one for GGUF support on more architectures. They share only a broad theme and are not duplicates.", - "confidence": 0.98, + "summary": "These are two distinct feature requests that only share a broad \"support for more architectures\" theme: one is about Flash Attention 2, the other about GGUF. They do not describe the same underlying bug or change.", + "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, @@ -14142,7 +14106,7 @@ "left": "issue:26350", "right": "issue:33260", "accept": false, - "reason": "Different feature requests for different backends/code paths (Flash Attention 2 vs GGUF); same broad \u2018more architectures\u2019 framing is too generic to treat as the same issue." + "reason": "Different feature areas and likely different implementation paths: Flash Attention 2 support versus GGUF support. The similarity is only generic wording about architectures, not a shared concrete issue." } ] }, @@ -14179,12 +14143,12 @@ "issue:43834|issue:44365", "issue:44365|issue:44370" ], - "prepared_review_unit_hash": "ebff596b7c9a49a465b1291325e51513294e09c9226da383fa7f0bc2a055c984", + "prepared_review_unit_hash": "3b2f1d1d41167c32fdd1eb99a3f9b136134f7a497ce260d1cd85d3c95cbf1f53", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4455, + "serialized_chars": 4454, "estimated_input_tokens": 1114, "estimated_eval_tokens": 2484 }, @@ -14192,7 +14156,7 @@ "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4455, + "serialized_chars": 4454, "estimated_input_tokens": 1114, "estimated_eval_tokens": 2484 }, @@ -14201,55 +14165,58 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All items are about translating docs, but the placeholder-titled issues likely refer to different languages rather than the same request. The tightest near-duplicate group is the generic template issues (43834/44365/44370), while 27088 is a concrete Turkish-specific outlier.", + "summary": "The three 2026 templated i18n translation issues are near-verbatim duplicates of one another. The 2023 Turkish-specific request is similar in theme but distinct enough to keep separate.", "confidence": 0.78, - "canonical_issue_reason": "43834 is the earliest of the near-identical template-style issues and the best representative of the generic i18n translation-request pattern.", + "canonical_issue_reason": "issue:43834 is the earliest of the near-identical templated translation issues and is the best representative of that duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "43834 is the strongest cluster representative because it sits in the tightest similarity group and is the oldest item among the template-matching issues.", + "best_issue_reason": "issue:43834 is the cleanest canonical representative among the duplicate placeholder issues; the Turkish issue is a different concrete request.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:27088", "right": "issue:44370", "accept": false, - "reason": "Turkish-specific request vs. a generic placeholder language request; same topic area, but not enough evidence they are the same underlying issue." + "reason": "Turkish-specific request versus a generic placeholder-language translation request; not clearly the same concrete issue." }, { "left": "issue:27088", "right": "issue:44365", "accept": false, - "reason": "Different target language context; these look like separate translation requests, not duplicates." + "reason": "Turkish-specific request versus a generic placeholder-language translation request; not clearly the same concrete issue." }, { "left": "issue:27088", "right": "issue:43834", "accept": false, - "reason": "The Turkish issue is concrete, while 43834 is a generic template-style request; they do not appear to be the same issue." + "reason": "Turkish-specific request versus a generic placeholder-language translation request; not clearly the same concrete issue." }, { "left": "issue:43834", "right": "issue:44370", - "accept": false, - "reason": "Very similar template text, but likely different language targets, so they are not clearly the same underlying request." + "accept": true, + "reason": "Near-verbatim templated titles/bodies for the same translation-docs request; these look like duplicates." }, { "left": "issue:43834", "right": "issue:44365", - "accept": false, - "reason": "High textual similarity alone is not enough here; the placeholder language fields suggest separate translation requests." + "accept": true, + "reason": "Near-verbatim templated titles/bodies for the same translation-docs request; these look like duplicates." }, { "left": "issue:44365", "right": "issue:44370", - "accept": false, - "reason": "These may share the same template, but there is no evidence they target the same language or same request." + "accept": true, + "reason": "Near-verbatim templated titles/bodies for the same translation-docs request; these look like duplicates." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet. The three 2026 issues share the same templated title/body pattern, and choosing 43834 as the canonical representative is supported by its earliest creation date. The decision to keep issue:27088 separate is also conservative and consistent with its Turkish-specific title. The soft-edge verdicts do not overstate the evidence." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -14311,12 +14278,12 @@ "issue:43837|issue:45027", "issue:43837|issue:45177" ], - "prepared_review_unit_hash": "6bfbcd9cc0fffd721ccbbf98669bbd8dddcc1f3c86c5c206ed0d3c05d2935e36", + "prepared_review_unit_hash": "388ad34e057a0e6f01fbceea9f76130a726873f9171310f8ff23a236062db4cb", "component_budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 28, - "serialized_chars": 17646, + "serialized_chars": 17647, "estimated_input_tokens": 4412, "estimated_eval_tokens": 9080 }, @@ -14324,7 +14291,7 @@ "node_count": 14, "item_count": 14, "soft_pair_count": 28, - "serialized_chars": 17646, + "serialized_chars": 17647, "estimated_input_tokens": 4412, "estimated_eval_tokens": 9080 }, @@ -14333,180 +14300,180 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is mostly a set of separate model-addition requests, not duplicates. The similarity signal is driven by generic wording like \u201cadd model\u201d rather than the same underlying bug/change.", - "confidence": 0.95, - "canonical_issue_reason": "No single canonical issue: each item requests a different model or modality (BERT variants, audio/TTS/ASR, OCR, vision/text), so they are not duplicates.", + "summary": "All items are separate feature requests for different models or modalities (BERT variants, audio ASR/TTS, OCR, sparse transformer, etc.), so there is no true duplicate cluster here.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a true canonical duplicate; the requests target different models and code paths.", "canonical_pr_reason": null, - "best_issue_reason": "No strong duplicate candidate exists. If a representative issue were needed, issue 43671 is one of the more specific and active requests, but it is still not a duplicate of the others.", + "best_issue_reason": "issue:37015 is the broadest/most generic model-add request in the set, so it works best as a loose representative, but not as a duplicate canonical.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36817", "right": "issue:41211", "accept": false, - "reason": "Different model families (EuroBert vs DEIMv2); generic \u201cadd model\u201d similarity only." + "reason": "Different model families: EuroBert vs DEIMv2; separate integration requests." }, { "left": "issue:36817", "right": "issue:43671", "accept": false, - "reason": "EuroBert request is unrelated to Qwen3-TTS support; different model and task." + "reason": "EuroBert config request vs Qwen3-TTS audio support; unrelated model/task." }, { "left": "issue:37015", "right": "issue:43837", "accept": false, - "reason": "NeoBERT and Qwen3-ASR are different models with different modalities/tasks." + "reason": "NeoBERT vs Qwen3-ASR; different model and modality." }, { "left": "issue:37015", "right": "issue:44121", "accept": false, - "reason": "NeoBERT request is unrelated to OpenAI weight-sparse transformer support." + "reason": "NeoBERT vs OpenAI weight-sparse transformer; distinct architectures." }, { "left": "issue:37015", "right": "issue:43909", "accept": false, - "reason": "NeoBERT and LFM2.5 Audio are different model requests, not the same change." + "reason": "NeoBERT vs LFM2.5 Audio; unrelated model targets." }, { "left": "issue:37015", "right": "issue:42503", "accept": false, - "reason": "NeoBERT vs ModernVBERT: different model families; no shared concrete code path." + "reason": "NeoBERT vs ModernVBERT; separate model additions." }, { "left": "issue:37015", "right": "issue:45027", "accept": false, - "reason": "NeoBERT and Voxtral-4B-TTS are separate model additions, not duplicates." + "reason": "NeoBERT vs Voxtral TTS; different model/task and code path." }, { "left": "issue:41115", "right": "issue:43837", "accept": false, - "reason": "MiniCPM3 architecture support and Qwen3-ASR support are different models/tasks." + "reason": "MiniCPM3 vs Qwen3-ASR; different model families and support work." }, { "left": "issue:41115", "right": "issue:44121", "accept": false, - "reason": "MiniCPM3 and circuit-sparsity/circuitgpt are unrelated model requests." + "reason": "MiniCPM3 integration vs weight-sparse transformer request; not the same change." }, { "left": "issue:41115", "right": "issue:42503", "accept": false, - "reason": "MiniCPM3 vs ModernVBERT: different concrete model additions." + "reason": "MiniCPM3 vs ModernVBERT; separate model support requests." }, { "left": "issue:41115", "right": "issue:45027", "accept": false, - "reason": "MiniCPM3 and Voxtral-TTS are distinct model families and modalities." + "reason": "MiniCPM3 vs Voxtral TTS; distinct models and modalities." }, { "left": "issue:41211", "right": "issue:43671", "accept": false, - "reason": "DEIMv2 and Qwen3-TTS are different models with different target behaviors." + "reason": "DEIMv2 vs Qwen3-TTS; different model families and tasks." }, { "left": "issue:42503", "right": "issue:43837", "accept": false, - "reason": "ModernVBERT models and Qwen3-ASR support are unrelated model additions." + "reason": "ModernVBERT vs Qwen3-ASR; different model and modality." }, { "left": "issue:42503", "right": "issue:43671", "accept": false, - "reason": "ModernVBERT and Qwen3-TTS are different model families and tasks." + "reason": "ModernVBERT vs Qwen3-TTS; separate support requests." }, { "left": "issue:42503", "right": "issue:42738", "accept": false, - "reason": "ModernVBERT and generic BERT-like RoPE support are not the same underlying change." + "reason": "ModernVBERT vs BERT-like RoPE models; related theme but not the same concrete model addition." }, { "left": "issue:42738", "right": "issue:45027", "accept": false, - "reason": "BERT-like RoPE support and Voxtral TTS are different problem domains." + "reason": "Generic BERT-like RoPE request vs Voxtral TTS; unrelated changes." }, { "left": "issue:43671", "right": "issue:43837", "accept": false, - "reason": "Qwen3-TTS and Qwen3-ASR are related by name but are distinct models and separate support requests." + "reason": "Qwen3-TTS vs Qwen3-ASR are related vendor-wise, but they are different concrete models and likely different implementations." }, { "left": "issue:43671", "right": "issue:45027", "accept": false, - "reason": "Both are audio-related, but they target different models and integrations." + "reason": "Qwen3-TTS vs Voxtral TTS; both audio, but different model support work." }, { "left": "issue:43671", "right": "issue:45177", "accept": false, - "reason": "Qwen3-TTS support is unrelated to DeepSeek-OCR2." + "reason": "Qwen3-TTS vs DeepSeek-OCR2; different modalities and model code paths." }, { "left": "issue:43671", "right": "issue:44121", "accept": false, - "reason": "Qwen3-TTS and weight-sparse transformer support are different model changes." + "reason": "Audio TTS support vs weight-sparse transformer architecture; not the same underlying change." }, { "left": "issue:43671", "right": "issue:43909", "accept": false, - "reason": "Qwen3-TTS and LFM2.5 Audio 1.5B are separate audio-model requests." + "reason": "Qwen3-TTS vs LFM2.5 Audio; separate audio models and integrations." }, { "left": "issue:43837", "right": "issue:43909", "accept": false, - "reason": "Qwen3-ASR and LFM2.5 Audio are distinct audio-model support requests." + "reason": "Qwen3-ASR vs LFM2.5 Audio; different audio models and support requirements." }, { "left": "issue:43837", "right": "issue:45322", "accept": false, - "reason": "Qwen3-ASR support and EUPE are different models with no shared concrete bug/change." + "reason": "ASR support vs EUPE model request; unrelated model types." }, { "left": "issue:43837", "right": "issue:44121", "accept": false, - "reason": "ASR model support is unrelated to the weight-sparse transformer request." + "reason": "Qwen3-ASR vs sparse transformer; different architectures and tasks." }, { "left": "issue:43909", "right": "issue:44205", "accept": false, - "reason": "LFM2.5 Audio 1.5B and SAM3-LiteText are different model additions." + "reason": "LFM2.5 Audio vs SAM3-LiteText; audio model vs multimodal segmentation-related request." }, { "left": "issue:44121", "right": "issue:45322", "accept": false, - "reason": "Weight-sparse transformer/circuitgpt and EUPE are unrelated model requests." + "reason": "Weight-sparse transformer vs EUPE; distinct architectures and model targets." }, { "left": "issue:43837", "right": "issue:45027", "accept": false, - "reason": "Qwen3-ASR and Voxtral-4B-TTS are different speech-model support requests." + "reason": "Qwen3-ASR vs Voxtral TTS; both audio, but not the same model or fix." }, { "left": "issue:43837", "right": "issue:45177", "accept": false, - "reason": "Qwen3-ASR support is unrelated to DeepSeek-OCR2." + "reason": "Qwen3-ASR vs DeepSeek-OCR2; different modality and implementation." } ] }, @@ -14547,82 +14514,82 @@ "issue:43668|issue:44031", "issue:43668|issue:43813" ], - "prepared_review_unit_hash": "5b1f6386955b5ba32a0377b1a492ffb28f6b886be3f8b15dd0673e606ef09c45", + "prepared_review_unit_hash": "639f6eb9a2e9138d5296676d59ffad3cc1a0e4e0764a3d2c0b048980388849d5", "component_budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6288, - "estimated_input_tokens": 1572, - "estimated_eval_tokens": 3400 + "serialized_chars": 6290, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 }, "budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6288, - "estimated_input_tokens": 1572, - "estimated_eval_tokens": 3400 + "serialized_chars": 6290, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These items are a heterogeneous set of unrelated issues (model norm behavior, Mask2Former loss args, config typing, a typo, and tokenizer regex warnings). The soft-similarity signals are too weak to treat any pair as duplicates.", - "confidence": 0.98, - "canonical_issue_reason": null, + "summary": "The cluster is heterogeneous: these issues touch different models/components and describe different bugs or cleanup tasks. The soft similarities look superficial, so none of the candidate pairs should be merged as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": "No true canonical issue: the reports span unrelated problem areas (normalization semantics, Mask2Former loss handling, config typing, typo, tokenizer warning).", "canonical_pr_reason": null, - "best_issue_reason": null, + "best_issue_reason": "If one issue must anchor the set, 44031 is the strongest standalone report because it describes a broad regression with active discussion, but it is not a duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:37934", "right": "issue:43668", "accept": false, - "reason": "Different concerns: Llama4TextL2Norm implementation semantics vs ModernBERTConfig type-hint correctness." + "reason": "Different topics: Llama4TextL2Norm/RMS norm semantics vs ModernBERTConfig type-hint typing." }, { "left": "issue:42936", "right": "issue:43240", "accept": false, - "reason": "Mask2Former ignore_value handling is a model-specific bug; kwargs loss forwarding is a broader API wiring issue." + "reason": "Different bugs in the same broad area; one is Mask2Former ignore_value usage, the other is loss kwargs not being forwarded." }, { "left": "issue:42936", "right": "issue:44031", "accept": false, - "reason": "Unrelated subsystems: Mask2Former loss parameter usage vs tokenizer regex warning regression." + "reason": "Mask2Former loss/ignore_value bug is unrelated to tokenizer regex warning regression." }, { "left": "issue:42936", "right": "issue:43813", "accept": false, - "reason": "Different bug class and area: model loss config usage vs a typo in a quantization field name." + "reason": "Different components: Mask2Former model loss handling vs a typo in a quantization attribute string." }, { "left": "issue:42936", "right": "issue:43668", "accept": false, - "reason": "No shared underlying defect; one is runtime behavior in a model, the other is a config annotation issue." + "reason": "Unrelated issues: Mask2Former ignore_value behavior vs a ModernBERT config annotation problem." }, { "left": "issue:43240", "right": "issue:43668", "accept": false, - "reason": "Loss kwargs forwarding and incorrect type hints are separate issues with no common code-path." + "reason": "Loss kwargs forwarding and config type-hint correctness are separate concerns with no shared code path." }, { "left": "issue:43668", "right": "issue:44031", "accept": false, - "reason": "ModernBERT config typing is unrelated to tokenizer regex warnings." + "reason": "ModernBERTConfig typing issue is not the same bug as the tokenizer regex warning regression." }, { "left": "issue:43668", "right": "issue:43813", "accept": false, - "reason": "A type-hint mismatch and a typo in a field path are distinct, non-duplicate issues." + "reason": "Different fix types: incorrect type hint vs a typo in a quantization operation name." } ] }, @@ -14652,7 +14619,7 @@ "soft_pairs": [ "issue:41084|issue:43037" ], - "prepared_review_unit_hash": "ee99f254c5eef4357c55fdf3b8a5b18278fe1d7880966fd926d5d5b65a7f5df4", + "prepared_review_unit_hash": "ae58c05ba2b129c175b77656184d7a1ee0d77a7240ac6cbeb648be4b5c08b685", "component_budget": { "node_count": 2, "item_count": 2, @@ -14674,18 +14641,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two issues are unrelated: one requests block decoding behavior, the other requests DeepSeek v3.2 model support. They should not be clustered as duplicates.", - "confidence": 0.98, - "canonical_issue_reason": null, + "summary": "These two issues are not duplicates: one is a concrete block decoding change/bug, the other is a request for DeepSeek v3.2 model support. They touch different underlying problems with no shared implementation target.", + "confidence": 0.97, + "canonical_issue_reason": "Issue #41084 is the better canonical representative because it is a specific, actionable code-path problem rather than a broad model-support request.", "canonical_pr_reason": null, - "best_issue_reason": null, + "best_issue_reason": "Issue #41084 is the best issue in this cluster to keep as the representative item; #43037 is a separate feature request and not a duplicate.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41084", "right": "issue:43037", "accept": false, - "reason": "Different topics and likely different workstreams: block decoding vs DeepSeek v3.2 support; no evidence they describe the same underlying bug or feature." + "reason": "Reject: the titles and implied scopes are unrelated. #41084 concerns block decoding, while #43037 requests DeepSeek v3.2 support; there is no evidence they describe the same underlying bug or change." } ] }, @@ -14715,7 +14682,7 @@ "soft_pairs": [ "issue:43598|issue:43600" ], - "prepared_review_unit_hash": "4fc6b68ff6bdc51c8e2bf39fd4be5dacd45bd1756e920b0db6f2790f88bf47b1", + "prepared_review_unit_hash": "a6d6e202281dc164c14edb34667db2ac30d53f88d963e75eb5ab53da9bac2866", "component_budget": { "node_count": 2, "item_count": 2, @@ -14737,18 +14704,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two closed issues are about different Trainer-related customization problems: one concerns `remove_unused_column`, the other concerns FLOPS estimation for custom trainers. They share a broad area but not the same underlying bug or fix.", - "confidence": 0.95, - "canonical_issue_reason": null, + "summary": "The two issues are about different Trainer concerns: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They do not describe the same underlying bug or change.", + "confidence": 0.96, + "canonical_issue_reason": "No canonical issue: the issues address separate Trainer topics and are not duplicates.", "canonical_pr_reason": null, - "best_issue_reason": "Neither issue is a duplicate of the other. If one must be picked as the representative, issue #43598 is the more concrete Trainer customization problem, but the cluster does not justify a single canonical issue.", + "best_issue_reason": "If forced to pick one representative, #43598 is the more concrete Trainer behavior request, but neither issue represents the other.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43598", "right": "issue:43600", "accept": false, - "reason": "Both mention Trainer customization, but #43598 is about `remove_unused_column` behavior while #43600 is about estimating FLOPS for custom trainers. They describe different code paths and different fixes." + "reason": "Different problems: customizability of `remove_unused_column` vs FLOPS accounting for custom trainers. Same broad area, but not the same underlying issue." } ] }, @@ -14778,12 +14745,12 @@ "soft_pairs": [ "issue:43684|issue:44405" ], - "prepared_review_unit_hash": "581542f42486834982c1808003961f2e5b9891a7119ddd0536d4a8a1afa2cc0b", + "prepared_review_unit_hash": "67eb70049922307d8875099ed63951f0d4a1dadf3294482ef48ce485a5474e58", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1611, + "serialized_chars": 1612, "estimated_input_tokens": 403, "estimated_eval_tokens": 1062 }, @@ -14791,7 +14758,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1611, + "serialized_chars": 1612, "estimated_input_tokens": 403, "estimated_eval_tokens": 1062 }, @@ -14800,18 +14767,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two separate model-support requests: one for Qwen3-Omni auto-class registration, and one for Qwen3.5 sequence-classification support. They share the general theme of adding AutoModel support, but not the same concrete bug/change.", - "confidence": 0.97, - "canonical_issue_reason": null, + "summary": "Not duplicates: one asks for Qwen3-Omni registration in AutoModel/AutoModelForConditionalGeneration, the other asks for Qwen3.5 AutoModelForSequenceClassification support. Different model families and different auto-class targets.", + "confidence": 0.95, + "canonical_issue_reason": "No canonical issue: these are different feature requests aimed at different model/config families and different auto classes.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43684 is the broader auto-registration request and is the clearer representative of the Qwen3-Omni gap; issue 44405 is a distinct Qwen3.5 sequence-classification support request, not a duplicate.", + "best_issue_reason": "If forced to pick a representative, 43684 is the broader model-registration request, but it does not cover 44405\u2019s separate sequence-classification support request.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43684", "right": "issue:44405", "accept": false, - "reason": "Different model families and different auto classes: Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration vs Qwen3.5 sequence-classification support. They are related only at a high level, not the same underlying change." + "reason": "Different underlying changes: Qwen3-Omni auto-registration vs. Qwen3.5 sequence-classification support. They are not the same bug or code-path fix." } ] }, @@ -14848,70 +14815,70 @@ "issue:43845|issue:43859", "issue:43846|issue:43859" ], - "prepared_review_unit_hash": "4aceadcf55ba70db28212dd61d0de279a6ca17591306cd06ab01a0a2f3431209", + "prepared_review_unit_hash": "4198f11950bedec72a52d32cbac3a2e0b82ed5fbd0d35cbc6621ed93cea36305", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4265, - "estimated_input_tokens": 1067, - "estimated_eval_tokens": 2390 + "serialized_chars": 4264, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4265, - "estimated_input_tokens": 1067, - "estimated_eval_tokens": 2390 + "serialized_chars": 4264, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All four issues are extremely low-signal and mostly consist of vague or near-empty titles. Three share the same title ('huggingface '), while 'Fett' looks unrelated. There is not enough concrete content to confidently establish true duplicates, but 43845 is the best placeholder representative among the repeated-title issues.", - "confidence": 0.41, - "canonical_issue_reason": "Issue 43845 is the earliest of the repeated 'huggingface ' reports and is the cleanest placeholder representative for that cluster, though the bodies are too sparse to prove a real underlying bug.", + "summary": "All four issues are extremely low-information. The two identical \"huggingface \" issues look like the closest pair, but there isn\u2019t enough description-level evidence to confirm a true duplicate. I\u2019m rejecting all soft edges.", + "confidence": 0.61, + "canonical_issue_reason": "Issue 43845 is the earliest of the repeated \"huggingface \" entries, so it is the best representative placeholder among the near-identical issue titles.", "canonical_pr_reason": null, - "best_issue_reason": "43845 is the most suitable global issue representative because it is earliest among the near-identical 'huggingface ' issues and has no evidence of being different from 43846 or 43859.", + "best_issue_reason": "43845 is the most representative issue in this cluster because it is the earliest of the duplicate-looking \"huggingface \" reports; however, the evidence is weak.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43835", "right": "issue:43845", "accept": false, - "reason": "Titles are generic and unrelated ('Fett' vs 'huggingface '); no shared body details or targets to support a duplicate determination." + "reason": "Only weak title similarity; no body detail, targets, or other evidence that these are the same underlying issue." }, { "left": "issue:43835", "right": "issue:43846", "accept": false, - "reason": "Same as above: no concrete evidence that 'Fett' and 'huggingface ' describe the same underlying issue." + "reason": "Only weak title similarity; the reports are too underspecified to confirm a true duplicate." }, { "left": "issue:43835", "right": "issue:43859", "accept": false, - "reason": "No meaningful overlap beyond both being vague; insufficient evidence of the same bug." + "reason": "No concrete evidence beyond vague similarity; insufficient to treat as the same issue." }, { "left": "issue:43845", "right": "issue:43846", "accept": false, - "reason": "The titles match, but the issue bodies are too thin to verify they are the same underlying problem; identical generic titles alone are not enough." + "reason": "Identical short titles and close timestamps suggest a possible duplicate, but the bodies provide no confirmatory detail, so this is not safe to merge." }, { "left": "issue:43845", "right": "issue:43859", "accept": false, - "reason": "All we have is the same generic title; there are no concrete symptoms, targets, or code references to confirm duplication." + "reason": "Shared generic title is not enough to establish the same bug report." }, { "left": "issue:43846", "right": "issue:43859", "accept": false, - "reason": "Identical vague titles are suggestive, but the lack of substantive description makes a strict duplicate match unjustified." + "reason": "Shared generic title is not enough to establish the same bug report." } ] }, @@ -14952,7 +14919,7 @@ "issue:44572|issue:44937", "issue:44573|issue:44937" ], - "prepared_review_unit_hash": "4d32738a40266427712efb1b54036146fd7e4aa3c6838bca7f06614f037eb622", + "prepared_review_unit_hash": "790e3534d5202454802f099bc72f4f2c88261e2ba66f570823d83ec2f586a536", "component_budget": { "node_count": 5, "item_count": 5, @@ -14974,73 +14941,76 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All items look like unrelated low-signal/spam issues rather than duplicates of a real bug report. No soft edge should be merged.", - "confidence": 0.88, - "canonical_issue_reason": "issue:44371 is the simplest generic spam placeholder and the earliest item, so it is the least misleading representative for the cluster.", + "summary": "This cluster is mostly unrelated low-information spam/generic issues. The three exact \"\" reports look like the same duplicate placeholder; the documentation suggestion and \"Google LLC\" issue do not match that same underlying content.", + "confidence": 0.82, + "canonical_issue_reason": "issue:44371 is the earliest of the exact \"\" duplicates, so it is the best canonical representative for that small duplicate subgroup.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44371 is the best available issue representative because it is generic, minimal, and there is no substantive technical content to anchor a more specific choice.", + "best_issue_reason": "issue:44371 is the strongest issue representative because it is an exact title/body match with the other spam placeholders and was created first.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44371", "right": "issue:44572", - "accept": false, - "reason": "Both are spam-like placeholders, but there is no evidence they describe the same underlying report or change." + "accept": true, + "reason": "Exact same placeholder title/body pattern; these are indistinguishable spam duplicates." }, { "left": "issue:44371", "right": "issue:44573", - "accept": false, - "reason": "Same generic spam style, but no shared bug, feature, or reproduction details to justify duplication." + "accept": true, + "reason": "Exact same placeholder title/body pattern; these are indistinguishable spam duplicates." }, { "left": "issue:44371", "right": "issue:44418", "accept": false, - "reason": "One is a generic spam title and the other is a documentation suggestion; too little evidence they are the same issue." + "reason": "One is a spam placeholder, the other is a generic documentation suggestion; no evidence of the same underlying issue." }, { "left": "issue:44371", "right": "issue:44937", "accept": false, - "reason": "Completely different titles and no technical details tying them to the same underlying problem." + "reason": "Different titles and apparent intent; \"Google LLC\" is not the same placeholder spam report." }, { "left": "issue:44418", "right": "issue:44572", "accept": false, - "reason": "Documentation suggestion versus generic spam; not the same underlying issue." + "reason": "Generic documentation suggestion versus placeholder spam; not the same issue." }, { "left": "issue:44418", "right": "issue:44573", "accept": false, - "reason": "Different apparent intent and no shared concrete content beyond weak similarity signals." + "reason": "Generic documentation suggestion versus placeholder spam; not the same issue." }, { "left": "issue:44572", "right": "issue:44573", - "accept": false, - "reason": "Both are spam placeholders, but that is not enough to conclude they are duplicate reports of the same thing." + "accept": true, + "reason": "Exact same placeholder title/body pattern; these are indistinguishable spam duplicates." }, { "left": "issue:44572", "right": "issue:44937", "accept": false, - "reason": "Generic spam title versus a specific quoted name; no evidence of the same underlying issue." + "reason": "Different titles and likely different spam themes; no concrete shared issue." }, { "left": "issue:44573", "right": "issue:44937", "accept": false, - "reason": "No concrete overlap in problem statement, targets, or artifacts; reject as duplicate." + "reason": "Different titles and likely different spam themes; no concrete shared issue." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The clustering summary is grounded in the packet. The three issues are the only clearly near-identical items, and selecting the earliest as canonical is conservative and supported by creation time. The non- issues are correctly treated as separate, with the reject decisions staying cautious given only soft similarity evidence." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -15063,7 +15033,7 @@ "soft_pairs": [ "issue:45145|issue:45151" ], - "prepared_review_unit_hash": "350f17c51c7a1e5c256a646be320d3d7a061b73b25c701bc88548180a0457d1f", + "prepared_review_unit_hash": "4a6442540601583dcddf3e6fb5b4d8779d79f272d9c87b7d81eced31c009ae7e", "component_budget": { "node_count": 2, "item_count": 2, @@ -15085,27 +15055,27 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two closed issues appear to be likely duplicates or near-duplicate reports: they have the exact same title, similar body sizes, and a soft-similarity signal, with no distinguishing links, files, or targets shown. #45145 is the better representative because it was created earlier and has an inbound reference.", - "confidence": 0.88, - "canonical_issue_reason": "#45145 is the better canonical pick because it was created earlier and has an inbound reference, while #45151 appears to be a near-duplicate report with the same title.", + "summary": "These two issues appear to be duplicates of the same energy-optimization writeup, with identical titles and nearly identical body sizes/content signals. The earlier issue is the better canonical representative.", + "confidence": 0.99, + "canonical_issue_reason": "Issue #45145 was created earlier and already has an inbound reference, making it the better canonical duplicate target for this same report.", "canonical_pr_reason": null, - "best_issue_reason": "#45145 is the best issue to keep as the representative item because it is earlier and already has an inbound reference, making it the more established anchor for this duplicate pair.", + "best_issue_reason": "Issue #45145 is the clearest representative of the cluster because it is earlier and slightly more established in discussion metadata.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45145", "right": "issue:45151", "accept": true, - "reason": "Exact same title plus similar body length and a soft-similarity signal make these look like duplicate reports of the same underlying issue." + "reason": "Same title, same apparent topic, and highly similar body footprint; these look like duplicate reports of the same underlying issue." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The cluster call is a bit too strong for the evidence shown. The packet supports that these issues have the same title, similar sizes, and a soft-similarity signal, but it does not expose the bodies, so statements like \"highly similar content\" / \"substantially similar body content\" are not directly grounded. Also, the best-issue rationale overstates linkage/activity a bit: #45145 has the inbound reference, but it does not have more discussion activity than #45151. A more conservative summary would say they appear likely duplicate or near-duplicate reports, with #45145 as the better representative because it is earlier and has an inbound reference." + "accept": true, + "feedback": "The conclusion is grounded in the packet: the issues have identical titles, very similar body lengths, and a soft-similarity link, while issue 45145 is earlier and has an inbound reference. The canonical choice is conservative and the soft-edge verdict does not overreach beyond the available evidence." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -15167,7 +15137,7 @@ { "label": "pull_request", "component_index": 1, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15211,148 +15181,148 @@ "pull_request:43823|pull_request:45213", "pull_request:43251|pull_request:43254" ], - "prepared_review_unit_hash": "4a04e67c3f56402c39cfd7388722cbeb74f085903a1eaee1b16a634240919561", + "prepared_review_unit_hash": "466f1881ca618d9feb0f24683691a5f77c8dab46ddd1154641f611d35685933d", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 18, - "serialized_chars": 35176, - "estimated_input_tokens": 8794, - "estimated_eval_tokens": 17844 + "serialized_chars": 35181, + "estimated_input_tokens": 8796, + "estimated_eval_tokens": 17848 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 18, - "serialized_chars": 35176, - "estimated_input_tokens": 8794, - "estimated_eval_tokens": 17844 + "serialized_chars": 35181, + "estimated_input_tokens": 8796, + "estimated_eval_tokens": 17848 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Only one pair looks like a true duplicate: the two near-identical loss-utils PRs (43251/43254). The rest are unrelated same-subsystem changes, broad model additions, or docs/test-only updates and should not be merged as duplicates.", - "confidence": 0.93, + "summary": "This cluster is mostly heterogeneous. The only true duplicate-style pair is PR #43251 and PR #43254, both implementing the same `fixed_cross_entropy` kwargs passthrough change. All other soft links are topical overlap only and should be rejected.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 43251 is the best canonical representative of the duplicate loss-utils fix: it has the explicit issue link, directly patches fixed_cross_entropy, and is the clearer of the two near-identical implementations.", + "canonical_pr_reason": "PR #43251 is the best canonical PR for the real duplicate pair: it has the explicit issue target (#43240) and a complete, readable description of the same `fixed_cross_entropy` fix.", "best_issue_reason": null, - "best_pr_reason": "PR 43251 is the strongest overall PR in the cluster for duplicate-triage purposes because it most cleanly captures the shared bug in fixed_cross_entropy and has the most concrete issue alignment.", + "best_pr_reason": "PR #43251 is the best global PR candidate because it anchors the only genuine duplicate in the cluster and is the more explicit issue-linked implementation.", "soft_edge_verdicts": [ { "left": "pull_request:39895", "right": "pull_request:43424", "accept": false, - "reason": "Videoprism model addition vs executorch dynamic-shape test; different features and code paths." + "reason": "Unrelated PRs: VideoPrism model/docs addition vs ExecuTorch dynamic-shape test. Similarity is only superficial." }, { "left": "pull_request:41855", "right": "pull_request:43823", "accept": false, - "reason": "Mistral tokenizer methods vs adding MobileLLM; unrelated changes with only superficial tokenizer/model overlap." + "reason": "Different model work: Mistral tokenizer methods vs adding MobileLLM. No shared concrete bug or change." }, { "left": "pull_request:41855", "right": "pull_request:43424", "accept": false, - "reason": "Tokenizer API additions vs executorch export test; no shared bug or implementation change." + "reason": "Tokenizer API fix vs ExecuTorch export test; these are unrelated changes." }, { "left": "pull_request:42668", "right": "pull_request:43823", "accept": false, - "reason": "Processor auto-loading robustness vs adding a new model; same broad area of model support, but not the same fix." + "reason": "Processor auto-loading robustness vs adding a new model. Same broad area, but not the same underlying change." }, { "left": "pull_request:43251", "right": "pull_request:43823", "accept": false, - "reason": "Loss-function kwargs forwarding vs new MobileLLM model files; completely different code paths." + "reason": "Loss-function kwargs passthrough vs MobileLLM model addition are unrelated." }, { "left": "pull_request:43251", "right": "pull_request:44827", "accept": false, - "reason": "Cross-entropy kwargs fix vs Mistral4 test/model refactor; unrelated bugs and changes." + "reason": "Loss utility kwargs fix and Mistral4 test/model changes are different code paths and different problems." }, { "left": "pull_request:43424", "right": "pull_request:44827", "accept": false, - "reason": "Executorch dynamic-shape test vs Mistral4 test/model refactor; different functionality and no common fix." + "reason": "ExecuTorch dynamic-shape testing and Mistral4 test/model changes are unrelated." }, { "left": "pull_request:43424", "right": "pull_request:43823", "accept": false, - "reason": "Executorch export testing vs MobileLLM model addition; unrelated changes." + "reason": "Dynamic export test vs MobileLLM model addition; no same bug or merged patch." }, { "left": "pull_request:43823", "right": "pull_request:43995", "accept": false, - "reason": "New MobileLLM model implementation vs Falcon output-collection refactor; different model families and problems." + "reason": "New MobileLLM model vs Falcon output-interface refactor; only broad model-related overlap." }, { "left": "pull_request:43823", "right": "pull_request:45622", "accept": false, - "reason": "MobileLLM model addition vs PEFT constructor fix; unrelated code paths." + "reason": "MobileLLM addition and PEFT constructor fix are unrelated." }, { "left": "pull_request:43995", "right": "pull_request:45213", "accept": false, - "reason": "Falcon refactor vs model-creation skill workflow; not the same bug or change." + "reason": "Falcon refactor vs model-creation skill/docs tooling; different artifact types and goals." }, { "left": "pull_request:43995", "right": "pull_request:44827", "accept": false, - "reason": "Falcon output interface refactor vs Mistral4 tests/compatibility fix; different models and goals." + "reason": "Falcon interface refactor and Mistral4 test fixes are different model-specific changes." }, { "left": "pull_request:44827", "right": "pull_request:45622", "accept": false, - "reason": "Mistral4 test/model fixes vs PEFT constructor fix; unrelated changes despite both touching framework internals." + "reason": "Mistral4 test/model updates vs PEFT constructor fix are not the same concrete code-path problem." }, { "left": "pull_request:44827", "right": "pull_request:45587", "accept": false, - "reason": "Mistral4-related fix vs continuous batching docs; one is code/tests, the other is documentation." + "reason": "Mistral4-related code changes vs continuous batching docs; only subsystem-level similarity." }, { "left": "pull_request:44827", "right": "pull_request:45170", "accept": false, - "reason": "Mistral4 test/model work vs CLIP-like weight-renaming fix; different subsystems and concrete issues." + "reason": "Mistral4 tests vs layrnorm->layernorm renaming across CLIP-like models are unrelated." }, { "left": "pull_request:44827", "right": "pull_request:45213", "accept": false, - "reason": "Mistral4 fix vs a draft model-creation skill PR; not the same underlying change." + "reason": "Mistral4 fixes and model-creation skill repo changes do not address the same bug/change." }, { "left": "pull_request:43823", "right": "pull_request:45213", "accept": false, - "reason": "MobileLLM model addition vs model-creation skill scaffolding; one creates a model, the other adds workflow tooling." + "reason": "Model implementation PR vs skill/tooling PR; no shared concrete fix." }, { "left": "pull_request:43251", "right": "pull_request:43254", "accept": true, - "reason": "Both PRs patch the same fixed_cross_entropy code path to forward supported cross_entropy kwargs; the diffs are near-identical and could be merged into one fix." + "reason": "Both PRs fix the same `fixed_cross_entropy` wrapper by adding/forwarding supported kwargs to `nn.functional.cross_entropy`; they are mergeable into one patch." } ] }, "evaluator_result": { "accept": true, - "feedback": "Grounded and conservative. The only accepted duplicate pair, 43251/43254, is supported by the shared loss_utils file, the shared issue target, and near-identical fixed_cross_entropy patches. The other pairs are reasonably rejected as different bugfixes, features, or docs/tests rather than mergeable duplicates." + "feedback": "The analysis is grounded in the packet: only PR #43251 and #43254 share the same issue target and the same `fixed_cross_entropy` kwargs passthrough fix, so treating them as mergeable duplicates is conservative. The other pairwise rejections stay within the evidence and do not overclaim duplication from topical similarity alone." }, "evaluator_used": true, "retried": false, @@ -15363,7 +15333,7 @@ { "label": "pull_request", "component_index": 2, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15379,12 +15349,12 @@ "soft_pairs": [ "pull_request:40546|pull_request:42781" ], - "prepared_review_unit_hash": "1584b7ac07cc91c523316348d74a8bea0ef16322c7d9b77f3ae17705ba132ace", + "prepared_review_unit_hash": "480dee09185110a432e6ef2f8c41027168791e026ee0105dfa98a332123a032f", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 7225, + "serialized_chars": 7226, "estimated_input_tokens": 1807, "estimated_eval_tokens": 3870 }, @@ -15392,7 +15362,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 7225, + "serialized_chars": 7226, "estimated_input_tokens": 1807, "estimated_eval_tokens": 3870 }, @@ -15401,18 +15371,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related VibeVoice PRs, but they are not duplicates: one is the core VibeVoice implementation, the other is a draft for VibeVoice Realtime that adds extra realtime-specific files and changes. The shared filenames mostly reflect the common model family, not the same exact change.", - "confidence": 0.93, + "summary": "These are related VibeVoice PRs but not duplicates: one is the base VibeVoice integration, the other adds VibeVoice Realtime plus an acoustic tokenizer and generation changes.", + "confidence": 0.91, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #40546 is the better canonical representative because it is the fuller, reviewed core VibeVoice implementation with broad model and docs integration.", + "canonical_pr_reason": "PR 40546 is the better canonical PR because it is the earlier, non-draft foundational VibeVoice integration and is more self-contained than the later realtime extension.", "best_issue_reason": null, - "best_pr_reason": "PR #40546 is the best overall representative of the cluster: it is the main implementation PR, while #42781 is a draft follow-up for a more specific realtime variant.", + "best_pr_reason": "PR 40546 is the best representative of the cluster since it establishes the core VibeVoice implementation; PR 42781 is a separate follow-on feature area.", "soft_edge_verdicts": [ { "left": "pull_request:40546", "right": "pull_request:42781", "accept": false, - "reason": "Reject: they share the VibeVoice codebase, but #42781 is a separate draft for VibeVoice Realtime with additional realtime-specific files and changes, not the same concrete code-path fix/change as #40546." + "reason": "Not the same underlying change: 40546 adds core VibeVoice support/docs, while 42781 adds VibeVoice Realtime with additional tokenizer and generation work. They overlap in model names and shared files but are distinct feature sets and would not plausibly be merged into one PR." } ] }, @@ -15426,7 +15396,7 @@ { "label": "pull_request", "component_index": 3, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15442,7 +15412,7 @@ "soft_pairs": [ "pull_request:40755|pull_request:41224" ], - "prepared_review_unit_hash": "c78a4e744c187bc6677f105e311280c04222ff551f2e406baf665f4f8b2f3f0e", + "prepared_review_unit_hash": "fc3b593469cccf8507d1d8cabdbfb385185f7be3fa86f433268455301b08dc56", "component_budget": { "node_count": 2, "item_count": 2, @@ -15464,18 +15434,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The pair is not a duplicate cluster: one PR adds TimesFM covariate forecasting, the other adds DINOv3 image classification support. They target different models and different code paths.", + "summary": "These are unrelated feature PRs: one adds covariate forecasting to TimesFM, the other adds DINOv3 image classification support. They share only a loose \u201cmodel capability expansion\u201d theme, not a concrete code path or fix.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 41224 is the better canonical representative because it is a focused, self-contained model feature with substantial review activity and a clear code/test surface.", + "canonical_pr_reason": "Neither PR is a clear duplicate canonical candidate for the other; they target different models, files, APIs, and use cases.", "best_issue_reason": null, - "best_pr_reason": "PR 41224 is the strongest standalone PR in the cluster: it is narrowly scoped, implementation-backed, and has more review evidence than the broader TimesFM feature PR.", + "best_pr_reason": "No issue is present; as PRs, neither should be treated as the canonical representative of the other because they address distinct changes in separate model families.", "soft_edge_verdicts": [ { "left": "pull_request:40755", "right": "pull_request:41224", "accept": false, - "reason": "Different features for different models (TimesFM covariates vs DINOv3 image classification); no shared bug or change to merge into one PR." + "reason": "Reject: TimesFM covariate forecasting and DINOv3 image classification are unrelated features with different code paths, documentation, and tests; they could not plausibly be merged into one PR as the same underlying change." } ] }, @@ -15489,7 +15459,7 @@ { "label": "pull_request", "component_index": 4, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15555,7 +15525,7 @@ { "label": "pull_request", "component_index": 5, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15571,12 +15541,12 @@ "soft_pairs": [ "pull_request:41797|pull_request:45186" ], - "prepared_review_unit_hash": "61c25d0acdfc8da32f5afbba697ca7ba09864588c591c5a3f93f2131a9c1dbff", + "prepared_review_unit_hash": "bdee097d586db57f55fa4957acd1a88c750427ef412d2af16e8b97a1ba5b8b30", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6722, + "serialized_chars": 6723, "estimated_input_tokens": 1681, "estimated_eval_tokens": 3618 }, @@ -15584,7 +15554,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6722, + "serialized_chars": 6723, "estimated_input_tokens": 1681, "estimated_eval_tokens": 3618 }, @@ -15593,18 +15563,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two distinct model-addition pull requests: one for DeepSeek OCR and one for Isaac. They share only common Transformers integration boilerplate, not the same underlying code change.", + "summary": "These are two separate model-integration PRs that share Transformers boilerplate, but they add different models (DeepSeek OCR vs Isaac) with different code paths, configs, and tests. They are not duplicates and should not be clustered together as the same change.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "No canonical PR: the PRs implement different model additions and do not fix or introduce the same underlying change.", "best_issue_reason": null, - "best_pr_reason": "PR 41797 is the stronger standalone representative of the cluster because it has broader implementation scope and more review activity, but it is not a duplicate of PR 45186.", + "best_pr_reason": "Neither PR is a better duplicate representative; both are independent additions for different models, so there is no single best PR for this cluster.", "soft_edge_verdicts": [ { "left": "pull_request:41797", "right": "pull_request:45186", "accept": false, - "reason": "Different models and different codepaths: DeepSeek OCR vs Isaac. Shared files are generic registry/docs plumbing, not evidence of the same fix or mergeable change." + "reason": "Reject: both PRs add new model support, but for different models and distinct implementation files. The overlap is generic framework scaffolding, not the same concrete bug or change." } ] }, @@ -15618,7 +15588,7 @@ { "label": "pull_request", "component_index": 6, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15634,12 +15604,12 @@ "soft_pairs": [ "pull_request:42166|pull_request:45426" ], - "prepared_review_unit_hash": "42e6a3ab10d2a44c0bd004cfe31c9983aa8b6a61bfd6c988d5a1f98e4a44d7f5", + "prepared_review_unit_hash": "0354163fc213dcc129350051495f5dfc250fd9ef9398e5de095b3aa5499414be", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5407, + "serialized_chars": 5408, "estimated_input_tokens": 1352, "estimated_eval_tokens": 2960 }, @@ -15647,7 +15617,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5407, + "serialized_chars": 5408, "estimated_input_tokens": 1352, "estimated_eval_tokens": 2960 }, @@ -15656,18 +15626,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two PRs are separate model-addition workstreams (InternVLFlash vs AXK1). They overlap only in shared registry/boilerplate files, not in the underlying change or code path.", + "summary": "These are two unrelated feature PRs that both touch Transformers auto-registration boilerplate, but they add different model families (InternVLFlash vs AXK1) and do not appear to fix the same underlying problem.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: these are different model integrations with different implementation files and different goals, so they are not duplicates.", + "canonical_pr_reason": "No canonical PR: the changes are for different model additions with different code paths, so they should not be deduplicated.", "best_issue_reason": null, - "best_pr_reason": "No single best PR for deduplication. If forced to pick a representative, 42166 is more mature because it includes docs and tests, but it is still unrelated to 45426.", + "best_pr_reason": "Neither PR is a good global representative for the cluster because the cluster is not actually a duplicate set; the overlap is just shared framework registration files.", "soft_edge_verdicts": [ { "left": "pull_request:42166", "right": "pull_request:45426", "accept": false, - "reason": "Shared auto-registry and package plumbing only; one adds InternVLFlash and the other adds AXK1, so they do not fix or implement the same underlying code-path change." + "reason": "Reject: these are distinct model-introduction PRs for different architectures and model packages, not the same bug or change. Shared auto files are expected boilerplate." } ] }, @@ -15681,7 +15651,7 @@ { "label": "pull_request", "component_index": 7, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15704,70 +15674,70 @@ "pull_request:43291|pull_request:44815", "pull_request:43665|pull_request:44815" ], - "prepared_review_unit_hash": "5f5636f86a017ef44cd3ec8295f538556a3761fb687246323de9490bc94a8e1a", + "prepared_review_unit_hash": "9a913c35724b9a68dfe4e6464493dfdd738b6ab5b8c52cb7dc494088a1841379", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 10169, - "estimated_input_tokens": 2543, - "estimated_eval_tokens": 5342 + "serialized_chars": 10164, + "estimated_input_tokens": 2541, + "estimated_eval_tokens": 5338 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 10169, - "estimated_input_tokens": 2543, - "estimated_eval_tokens": 5342 + "serialized_chars": 10164, + "estimated_input_tokens": 2541, + "estimated_eval_tokens": 5338 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs do not look like duplicates of the same underlying change. Two are Whisper-related, one is generic test noise, and one is an unrelated FP8/dequantization fix. Only the Whisper return-language PR appears to be a real bug fix; the others are too different in code path and intent.", - "confidence": 0.93, + "summary": "These are four unrelated pull requests with one weakly related Whisper pair. The strongest standalone candidate is the Whisper return-language fix; the others are test-only edits, a bogus test PR, and an unrelated FP8/model-loading change.", + "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 42227 is the clearest substantive fix: it targets a specific Whisper ASR bug, has an explicit issue link, and changes the actual pipeline code path rather than only tests or unrelated subsystems.", + "canonical_pr_reason": "PR #42227 is the clearest real bug fix: it directly addresses Whisper return-language propagation, has an explicit linked issue, and includes a focused code-path change in the ASR pipeline.", "best_issue_reason": null, - "best_pr_reason": "PR 42227 is the best representative artifact in this cluster because it is a concrete production fix with issue linkage and clear behavior change; the others are either test-only/junk-like or unrelated to the same bug.", + "best_pr_reason": "PR #42227 is the best representative of the cluster because it is the only substantive, targeted fix with a clear user-facing bug and explicit issue linkage. The others are either test scaffolding or unrelated changes.", "soft_edge_verdicts": [ { "left": "pull_request:42227", "right": "pull_request:43291", "accept": false, - "reason": "Both are Whisper-related, but they fix different code paths: 42227 propagates return_language through the ASR pipeline, while 43291 changes tokenizer decode/timestamp handling and tests. They do not look mergeable as one duplicate PR." + "reason": "Both are Whisper-related, but they fix different concrete problems: return-language plumbing vs. tokenizer decode/test behavior. Too broad to treat as the same underlying change." }, { "left": "pull_request:42227", "right": "pull_request:43665", "accept": false, - "reason": "42227 is a real Whisper pipeline bug fix; 43665 is just placeholder test edits across CLIP/ViT and does not address the same underlying change." + "reason": "No shared bug or code path. #43665 is just artificial test edits across CLIP and ViT, unrelated to Whisper return-language handling." }, { "left": "pull_request:42227", "right": "pull_request:44815", "accept": false, - "reason": "These are unrelated subsystems and problems: Whisper ASR parameter propagation versus FP8/dequant/model-loading changes. No shared concrete bug." + "reason": "Completely different subsystems and goals: Whisper ASR pipeline vs. FP8 dequant/model loading changes." }, { "left": "pull_request:43291", "right": "pull_request:43665", "accept": false, - "reason": "43291 modifies Whisper tokenizer behavior; 43665 is test noise in vision model tests. Different intent and no shared underlying bug." + "reason": "#43291 is a Whisper tokenizer fix/test PR; #43665 is unrelated test noise in vision model tests. Not the same underlying issue." }, { "left": "pull_request:43291", "right": "pull_request:44815", "accept": false, - "reason": "Whisper tokenizer decode/timestamp logic is unrelated to FP8 dequantization/model loading. They do not fix the same issue." + "reason": "Different domains and fixes. Whisper decoding/tests do not match FP8 quantization/dequantization changes." }, { "left": "pull_request:43665", "right": "pull_request:44815", "accept": false, - "reason": "One is placeholder test edits, the other is a substantive quantization/dequant fix. Not the same bug or change." + "reason": "No substantive overlap: fabricated model tests versus a real quantization/loading fix." } ] }, @@ -15781,7 +15751,7 @@ { "label": "pull_request", "component_index": 8, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15797,12 +15767,12 @@ "soft_pairs": [ "pull_request:42975|pull_request:42976" ], - "prepared_review_unit_hash": "92ef2a134638e3629d7417b08d4d00d2953b55d7b40a09cfb4cf2a4f1ad25b20", + "prepared_review_unit_hash": "29384b8a8304f31ef7d82932c8ad8088b7f76a4eb0446634842820456ea1de44", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5935, + "serialized_chars": 5934, "estimated_input_tokens": 1484, "estimated_eval_tokens": 3224 }, @@ -15810,7 +15780,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5935, + "serialized_chars": 5934, "estimated_input_tokens": 1484, "estimated_eval_tokens": 3224 }, @@ -15819,18 +15789,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related workflow-maintenance PRs, but they change different GitHub Actions dependencies and solve different upgrade concerns. They are similar in theme, not the same underlying change.", - "confidence": 0.89, + "summary": "Two unrelated workflow-maintenance PRs: one upgrades several JS-based GitHub Actions for Node 24 compatibility, the other bumps docker/build-push-action and a few other workflow actions to newer versions. They overlap in some workflow files but do not fix the same underlying problem.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #42975 is the broader, more specific workflow-runtime upgrade: it updates several core actions for Node 24 compatibility and touches more workflow entry points. It is a better umbrella representative of this maintenance cluster than the narrower Docker-action bump in #42976.", + "canonical_pr_reason": "PR 42975 is the better cluster anchor because its title and diff clearly describe a concrete compatibility upgrade across many workflows, making the intent more specific and representative.", "best_issue_reason": null, - "best_pr_reason": "PR #42975 is the best single representative because its title and diff clearly describe the main compatibility-driven Actions upgrade, while #42976 is a smaller, separate dependency refresh focused on docker/build-push-action.", + "best_pr_reason": "PR 42975 is the best representative PR for this cluster because it is the more clearly scoped Actions-runtime compatibility update and touches the broadest set of affected workflows.", "soft_edge_verdicts": [ { "left": "pull_request:42975", "right": "pull_request:42976", "accept": false, - "reason": "Both are GitHub Actions version bumps, but they update different actions in different workflows (core checkout/cache/upload-artifact vs docker/build-push-action). They are related maintenance, not the same concrete bug or change, so I would not merge them as duplicates." + "reason": "Both are general GitHub Actions version bumps, but they change different action families and address different concerns (Node 24 compatibility vs docker/build-push-action updates). Shared workflow filenames alone are not enough to treat them as the same change." } ] }, @@ -15844,7 +15814,7 @@ { "label": "pull_request", "component_index": 9, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -15878,7 +15848,7 @@ "pull_request:45490|pull_request:45630", "pull_request:45626|pull_request:45630" ], - "prepared_review_unit_hash": "f1e612125d6dfcc47d82456fe0eeb0b1ba89e20ee1de622adcdaabf7fb26103a", + "prepared_review_unit_hash": "02a4edf8b96b1885e6b0966fa32e36e16aea8de3b8b6a695ac5a8529461e7f63", "component_budget": { "node_count": 9, "item_count": 9, @@ -15900,84 +15870,84 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs are not duplicates: they cover different model additions and one distributed-loading feature, with only shared boilerplate in docs/auto registration. All soft edges should be rejected.", + "summary": "The soft matches are superficial: they mostly reflect boilerplate for new model integrations or unrelated infrastructure work. No pair appears to be the same underlying bug/change, so none should be merged as duplicates.", "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "No single canonical PR fits this cluster; the items are distinct feature/model additions rather than the same underlying change.", + "canonical_pr_reason": "No canonical PR emerges; the items split into unrelated PRs (different model integrations, PP-loading infra, GGUF support), so none is a duplicate of the others.", "best_issue_reason": null, - "best_pr_reason": "No PR is a good global representative because the similarities are superficial (docs, auto-mapping, tests) and the concrete code paths differ.", + "best_pr_reason": "45626 is the broadest single PR by scope, but it is still a distinct model addition and not a duplicate of the others.", "soft_edge_verdicts": [ { "left": "pull_request:42978", "right": "pull_request:43448", "accept": false, - "reason": "Different models (ViT NEPA vs Molmo); overlap is only generic model-registration/docs boilerplate." + "reason": "Different model additions (ViT NEPA vs Molmo); shared auto/docs files are standard scaffolding, not the same change." }, { "left": "pull_request:42978", "right": "pull_request:43451", "accept": false, - "reason": "Different models (ViT NEPA vs Molmo2) with distinct files and code paths." + "reason": "Different model additions (ViT NEPA vs Molmo2); overlap is only in generic registration/docs files." }, { "left": "pull_request:42978", "right": "pull_request:45490", "accept": false, - "reason": "ViT NEPA model addition vs CTSM model addition; not the same bug/change." + "reason": "ViT NEPA and CTSM are distinct model integrations with different files and behavior." }, { "left": "pull_request:43448", "right": "pull_request:43451", "accept": false, - "reason": "Molmo and Molmo2 are separate model support PRs, not one fix split across two changes." + "reason": "Molmo and Molmo2 are separate model families; similar file patterns do not make them duplicates." }, { "left": "pull_request:43448", "right": "pull_request:45490", "accept": false, - "reason": "Molmo vs CTSM are unrelated model additions; shared auto/model-doc edits are boilerplate." + "reason": "Molmo and CTSM are unrelated model additions." }, { "left": "pull_request:43451", "right": "pull_request:45490", "accept": false, - "reason": "Molmo2 and CTSM are different models with different implementation files and targets." + "reason": "Molmo2 and CTSM are different model integrations, not one concrete fix." }, { "left": "pull_request:43973", "right": "pull_request:45283", "accept": false, - "reason": "Lfm2Audio model support and Qwen3.5 GGUF loading are different feature areas and code paths." + "reason": "Lfm2 audio support and Qwen3.5 GGUF loading support are unrelated code paths." }, { "left": "pull_request:44601", "right": "pull_request:45626", "accept": false, - "reason": "Pipeline-parallel infrastructure work is unrelated to PP-FormulaNet model support." + "reason": "Native pipeline-parallel loading support is infrastructure work; PP-FormulaNet is a model integration, not the same change." }, { "left": "pull_request:45283", "right": "pull_request:45626", "accept": false, - "reason": "GGUF loading support for Qwen3.5 and PP-FormulaNet model integration are distinct changes." + "reason": "GGUF loading support and PP-FormulaNet model support address different subsystems and problems." }, { "left": "pull_request:45490", "right": "pull_request:45626", "accept": false, - "reason": "CTSM model support and PP-FormulaNet model support are separate model additions." + "reason": "CTSM model support and PP-FormulaNet model support are distinct model additions." }, { "left": "pull_request:45490", "right": "pull_request:45630", "accept": false, - "reason": "CTSM and Kimi2-6 are different models; similarity is limited to standard auto/docs scaffolding." + "reason": "CTSM and Kimi2-6 are separate model additions with different architectures and files." }, { "left": "pull_request:45626", "right": "pull_request:45630", "accept": false, - "reason": "PP-FormulaNet and Kimi2-6 are unrelated model support PRs with different implementation files." + "reason": "PP-FormulaNet and Kimi2-6 are different model integrations; shared auto-registration is boilerplate." } ] }, @@ -15991,7 +15961,7 @@ { "label": "pull_request", "component_index": 10, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16010,7 +15980,7 @@ "pull_request:43488|pull_request:45476", "pull_request:43532|pull_request:45476" ], - "prepared_review_unit_hash": "2d6920781c3cc68e5504d58e25f0d34fe74baf8a6f33939e3ddfa6159af59186", + "prepared_review_unit_hash": "80aefc7aff3baf6b2f2da1b615ae283146cac32da69af74feb870edf468f7fa7", "component_budget": { "node_count": 3, "item_count": 3, @@ -16032,30 +16002,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All three are unrelated synthetic \"don't merge\" PRs that only overlap on broad test/workflow areas and a shared vit test file; none look like the same underlying change or bug fix.", - "confidence": 0.96, + "summary": "Three open synthetic \"[don't merge]\" PRs with unrelated changes: one about setup/version/dependency metadata, one about comment/CI workflow automation, and one about CI workflow calling plus placeholder edits. The shared vit test file is incidental and does not indicate the same underlying fix.", + "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: these are separate, non-duplicative bot/demo changes with different purposes (setup/versioning, workflow display tweaks, and CI/workflow invocation plus unrelated code noise).", + "canonical_pr_reason": "PR #43488 is the clearest standalone bot-check style PR: it is small, explicitly labeled as a repo-bot format test, and has the most coherent single-purpose change set.", "best_issue_reason": null, - "best_pr_reason": "No single PR is a good cluster representative for deduplication because the overlap is superficial and the concrete code paths differ.", + "best_pr_reason": "PR #43488 is the best representative of the cluster because it is the most self-contained and clearly intentional test/dummy change, whereas the others mix broader workflow or CI edits with unrelated placeholder modifications.", "soft_edge_verdicts": [ { "left": "pull_request:43488", "right": "pull_request:43532", "accept": false, - "reason": "Shared vit test filename is not enough: 43488 is a setup/dependency/version-formatting PR, while 43532 changes workflow automation and testing utilities. Different underlying changes." + "reason": "Different change intents: #43488 tweaks setup/version/dependency metadata, while #43532 rewrites CI/comment workflow automation and testing utilities. Shared vit test edits look incidental." }, { "left": "pull_request:43488", "right": "pull_request:45476", "accept": false, - "reason": "43488 only tweaks setup metadata and dependency listing; 45476 adds a workflow caller and unrelated tokenization/test noise. They do not fix the same concrete problem." + "reason": "#43488 is a setup/version/dependency bot-check PR; #45476 adds a PR-CI caller workflow and unrelated tokenization/test placeholder edits. They do not fix the same concrete problem." }, { "left": "pull_request:43532", "right": "pull_request:45476", "accept": false, - "reason": "Both are synthetic PRs with some overlapping test/workflow files, but they touch different code paths and goals; no evidence they could be merged into one fix." + "reason": "Both touch workflows/tests, but the concrete changes are unrelated: #43532 changes self-comment/self-scheduled/slack-report logic, while #45476 adds a CI caller workflow plus placeholder code/test edits. Not mergeable as one PR." } ] }, @@ -16069,7 +16039,7 @@ { "label": "pull_request", "component_index": 11, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16085,7 +16055,7 @@ "soft_pairs": [ "pull_request:43612|pull_request:43613" ], - "prepared_review_unit_hash": "04e50e9bc2e5702c735c83fcbae1eb6f68ec7ee18f74340263f330d25072ba0d", + "prepared_review_unit_hash": "142996ed3743430afe4b1d6ea878cfd8f92c76f051eb71905a5acd3910aee3a4", "component_budget": { "node_count": 2, "item_count": 2, @@ -16107,18 +16077,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two closely named but distinct pipeline additions: one for Promptable Concept Segmentation and one for Promptable Visual Segmentation. They share documentation and registry plumbing, but the actual pipeline implementations, model integrations, and tests target different tasks, so they should not be deduplicated.", - "confidence": 0.94, + "summary": "Two separate PRs add different segmentation pipelines: one for promptable concept segmentation and one for promptable visual segmentation. They share scaffolding/docs patterns but target different tasks, models, and tests, so they are not duplicates.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "No true duplicate PR pair here; if selecting a single representative, PR 43613 is the stronger/broader one because it adds the visual-segmentation pipeline with multiple model integrations and dedicated tests/docs.", + "canonical_pr_reason": "No canonical PR: the two pull requests implement distinct pipeline additions with different task scopes and model integrations.", "best_issue_reason": null, - "best_pr_reason": "PR 43613 is the best representative of the cluster because it is the more complete feature branch and has clearer task scope with broader supporting changes.", + "best_pr_reason": "PR #43613 is the broader representative of the cluster because it integrates more model/test coverage and the more general visual segmentation pipeline, but it is still not the same change as #43612.", "soft_edge_verdicts": [ { "left": "pull_request:43612", "right": "pull_request:43613", "accept": false, - "reason": "Different underlying features and code paths: concept segmentation vs visual segmentation. Shared docs/registry updates and similar naming are not enough to treat them as the same change, and they would not plausibly be merged as one PR." + "reason": "Different underlying features: concept segmentation vs visual segmentation. They share documentation and pipeline-registration boilerplate, but the changed filenames, model integrations, and tests show separate code paths and not a single mergeable PR." } ] }, @@ -16132,7 +16102,7 @@ { "label": "pull_request", "component_index": 12, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16148,40 +16118,40 @@ "soft_pairs": [ "pull_request:43651|pull_request:43663" ], - "prepared_review_unit_hash": "e3384a568fbb81b6b0f7327a98b63eb89495861b768e696e2a60b7b6abd621a2", + "prepared_review_unit_hash": "b5f1211e5d86db489e9e70df2946784fef578760d5245ba7018183e9b38dd805", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4293, - "estimated_input_tokens": 1074, - "estimated_eval_tokens": 2404 + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4293, - "estimated_input_tokens": 1074, - "estimated_eval_tokens": 2404 + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two open PRs in the same trainer file, but they address unrelated Trainer extensibility points: gradient-accumulation loss scaling vs. signature-column filtering. The soft similarity is file-level only, not a shared underlying bug/change.", - "confidence": 0.98, + "summary": "Both PRs touch `trainer.py` and add extensibility hooks for custom trainers, but they change different code paths: one addresses gradient-accumulation loss scaling, the other column filtering based on model signatures. They are not duplicates.", + "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR #43663 is the cleaner canonical representative: it introduces a focused helper for signature-column filtering, exposing a clear override point in `Trainer`.", "best_issue_reason": null, - "best_pr_reason": "PR #43651 is the more concrete change: it fixes a specific training-step loss-scaling behavior and introduces a narrowly scoped hook. PR #43663 is also valid, but it targets a different Trainer concern.", + "best_pr_reason": "PR #43663 is the best representative of the cluster\u2019s theme of trainer extensibility, with a narrowly scoped API change in a single code path.", "soft_edge_verdicts": [ { "left": "pull_request:43651", "right": "pull_request:43663", "accept": false, - "reason": "Different code paths and behaviors: #43651 changes gradient accumulation loss normalization, while #43663 changes how Trainer derives signature columns for input filtering. They are not the same bug or mergeable into one fix." + "reason": "Different underlying changes: #43651 alters gradient-accumulation loss normalization, while #43663 refactors signature-based column filtering. They do not fix the same bug or implement the same feature." } ] }, @@ -16195,7 +16165,7 @@ { "label": "pull_request", "component_index": 13, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16213,63 +16183,63 @@ "pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842" ], - "prepared_review_unit_hash": "466e6bdca13e43bf071fdca69e92afa07d3be700cbbd2cdb5a0544abdf7fc0d3", + "prepared_review_unit_hash": "400b49e4a193c37c3db695614eab29ec57c14a6c89896a34fc69088b25a4af63", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 5993, - "estimated_input_tokens": 1499, - "estimated_eval_tokens": 3254 + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 5993, - "estimated_input_tokens": 1499, - "estimated_eval_tokens": 3254 + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "PR #43842 is the strongest canonical choice: it makes the most targeted fix for the `TypeAdapter` NameError in `serve.py`. PRs #43656 and #43836 are related in the same area, but the packet does not conservatively establish that they are the same underlying fix, so they should be treated as possibly overlapping rather than definitively duplicate.", - "confidence": 0.87, + "summary": "All three PRs address the same CLI TypeAdapter NameError / annotation-evaluation problem in `src/transformers/cli/serve.py`, with 43842 being the cleanest minimal fix.", + "confidence": 0.82, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #43842 is the cleanest minimal fix: it directly addresses the `TypeAdapter` runtime/import issue with a small, explicit change in `src/transformers/cli/serve.py` and avoids the broader refactoring present in the other PRs.", + "canonical_pr_reason": "PR 43842 is the most focused and explicit fix for the runtime `TypeAdapter` NameError when pydantic is absent, with a minimal change set and a clear title.", "best_issue_reason": null, - "best_pr_reason": "PR #43842 is the best representative of the cluster because it most narrowly and directly fixes the concrete `TypeAdapter` NameError case.", + "best_pr_reason": "PR 43842 best represents the cluster because it directly targets the concrete failure mode with the smallest, most surgical workaround.", "soft_edge_verdicts": [ { "left": "pull_request:43656", "right": "pull_request:43842", - "accept": false, - "reason": "The packet shows only shared filename and broad thematic similarity; the different explicit issue target and limited diff evidence do not establish the same concrete failure mode." + "accept": true, + "reason": "Both PRs are fixing the same underlying runtime annotation/name-resolution bug in the CLI serve module, and the changes are plausibly mergeable into one implementation." }, { "left": "pull_request:43836", "right": "pull_request:43842", - "accept": false, - "reason": "Although both target the same issue, #43836 appears to be a much broader `serve.py` rewrite, so the evidence is insufficient to say it is the same specific bugfix or that the PRs could cleanly merge as one change." + "accept": true, + "reason": "These appear to be alternative workarounds for the same `TypeAdapter` NameError in the same file and issue scope, so they represent the same underlying bug/change." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The main conclusion that PR #43842 is the cleanest/minimal fix is grounded. However, the soft-edge verdicts are too strong. For 43656 vs 43842, the packet only shows soft filename similarity plus a different explicit issue target; it does not establish that both PRs fix the same TypeAdapter failure mode. For 43836 vs 43842, they do share the same issue target, but the preview shows 43836 is a much broader serve.py rewrite, so calling it simply another variant of the same bugfix is not sufficiently conservative. The summary should frame these as related or possibly overlapping, not definitively the same underlying fix." + "accept": true, + "feedback": "The cluster summary is grounded in the packet: all three PRs touch `src/transformers/cli/serve.py` and are clearly annotation/name-resolution fixes around `TypeAdapter`, with 43842 being the most surgical change. The soft-edge accepts are conservative enough, and the paired PRs do look mergeable as alternative fixes for the same bug family." }, "evaluator_used": true, - "retried": true, - "accepted_nontrivial_soft_edge": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 14, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16335,7 +16305,7 @@ { "label": "pull_request", "component_index": 15, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 2, "status": "reviewed", @@ -16448,352 +16418,334 @@ "pull_request:44044|pull_request:44154", "pull_request:44024|pull_request:44076" ], - "prepared_review_unit_hash": "709cb79e419cbe2e4b361692350a70867de96495b094ce811b93d9ea4b8ea596", + "prepared_review_unit_hash": "7620acdd8b47a3867613ef1ee4b0cbd05747e0bdb34e685eebca5b9558d167d3", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 77, - "serialized_chars": 84502, - "estimated_input_tokens": 21126, - "estimated_eval_tokens": 42508 + "serialized_chars": 84505, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 }, "budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 72, - "serialized_chars": 76936, - "estimated_input_tokens": 19234, - "estimated_eval_tokens": 38724 + "serialized_chars": 76938, + "estimated_input_tokens": 19235, + "estimated_eval_tokens": 38726 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a set of model-specific output-tracing refactors toward `capture_outputs`/`can_return_tuple`, not one shared bug. The only clear duplicate PR pairs are the GPT-J/CodeGen refactor set and the GPT-Neo refactor set; the rest are separate ports to different model codepaths.", - "confidence": 0.95, + "summary": "This cluster is dominated by model-specific refactors to the new standardized output tracing/capture_outputs interface. Most PRs are only thematically related, not true duplicates; the clearest duplicates are within the GPT-J/CodeGen line and the GPT-Neo line.", + "confidence": 0.84, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44066 is the best anchor: it is explicitly issue-linked, touches the core GPT-J/CodeGen output-tracing refactor, and is the clearest representative of the shared change that later GPT-J PRs repeat.", + "canonical_pr_reason": "PR #44722 is the strongest representative of the cluster\u2019s core theme: it explicitly targets the shared tracking issue, touches the same GPT-J/CodeGen output-tracing code path, and appears to be the most complete/latest version of that specific refactor.", "best_issue_reason": null, - "best_pr_reason": "PR 44066 is the most representative and broadly scoped standardized-output-tracing PR in the cluster; it captures the main concrete codepath change better than the more fragmented model-by-model follow-ups.", + "best_pr_reason": "PR #44722 best represents the underlying change family because it is explicit about the shared issue, spans the central GPT-J/CodeGen refactor, and is the closest match to the duplicated GPT-J-related PRs in the cluster.", "soft_edge_verdicts": [ { "left": "pull_request:43996", "right": "pull_request:44085", "accept": false, - "reason": "Different model families and codepaths (CVT/FNet vs GPT-J); same umbrella refactor theme is not enough." + "reason": "Different model families and different code paths; only the broad output-tracing theme is shared." }, { "left": "pull_request:43996", "right": "pull_request:44044", "accept": false, - "reason": "Different models and implementations (CVT/FNet vs DeBERTa); not the same underlying change." + "reason": "Both are output-tracing refactors, but they target different architectures and concrete implementations." }, { "left": "pull_request:44066", "right": "pull_request:44085", "accept": true, - "reason": "Both target GPT-J output tracing on the same concrete block codepath; 44066 is the broader GPT-J/CodeGen refactor and 44085 is the overlapping GPT-J-only variant." + "reason": "Both are GPT-J output-tracing refactors on the same file/code path and look like overlapping implementations of the same change." }, { "left": "pull_request:44007", "right": "pull_request:44072", "accept": false, - "reason": "Both are output-tracing refactors, but they touch unrelated model stacks (RegNet/ResNet/RT-DETR vs EfficientNet)." + "reason": "Same broad standardized-output refactor theme, but different models and no shared concrete bug/change path." }, { "left": "pull_request:44072", "right": "pull_request:44722", "accept": false, - "reason": "EfficientNet and GPT-J/CodeGen are unrelated codepaths; only the refactor pattern is similar." + "reason": "EfficientNet output tracing and GPT-J/CodeGen output tracing are unrelated concrete changes." }, { "left": "pull_request:44066", "right": "pull_request:44072", "accept": false, - "reason": "Different model families and concrete implementations; no shared bug/change." + "reason": "Different model implementations; similarity is only at the refactor pattern level." }, { "left": "pull_request:44013", "right": "pull_request:44044", "accept": false, - "reason": "MobileNetV2 and DeBERTa are unrelated codepaths; same tracking issue does not make them duplicates." + "reason": "MobileNetV2 and DeBERTa are separate model-specific refactors with no shared code path." }, { "left": "pull_request:44066", "right": "pull_request:44086", "accept": false, - "reason": "Different models (GPT-J/CodeGen vs MGP-STR); only the standardized-output-tracing theme is shared." + "reason": "GPT-J/CodeGen versus MGP-STR is not the same underlying bug or change." }, { "left": "pull_request:44066", "right": "pull_request:44071", "accept": false, - "reason": "Different codepaths and model families (GPT-J/CodeGen vs MPT)." + "reason": "Same refactor style, but MPT and GPT-J/CodeGen are distinct implementations." }, { "left": "pull_request:44018", "right": "pull_request:44068", "accept": true, - "reason": "Same GPT-Neo refactor, same files, and same output-tracing change; these are duplicate implementations of the same concrete fix." + "reason": "Both are GPT-Neo output-tracing refactors on the same model/code path, so they plausibly duplicate the same change." }, { "left": "pull_request:44066", "right": "pull_request:44068", "accept": false, - "reason": "GPT-J/CodeGen and GPT-Neo are separate model implementations; not the same PR change." + "reason": "GPT-J/CodeGen and GPT-Neo are different models; shared issue target alone is not enough." }, { "left": "pull_request:44018", "right": "pull_request:44066", "accept": false, - "reason": "GPT-Neo vs GPT-J/CodeGen are distinct refactors on different codepaths." + "reason": "Different model families and separate concrete implementations." }, { "left": "pull_request:44019", "right": "pull_request:44722", "accept": false, - "reason": "ResNet and GPT-J/CodeGen are unrelated model codepaths." + "reason": "ResNet output tracing and GPT-J/CodeGen output tracing are unrelated changes." }, { "left": "pull_request:44086", "right": "pull_request:44722", "accept": false, - "reason": "MGP-STR and GPT-J/CodeGen are different concrete implementations." + "reason": "MGP-STR and GPT-J/CodeGen are different code paths with only thematic overlap." }, { "left": "pull_request:44019", "right": "pull_request:44085", "accept": false, - "reason": "ResNet and GPT-J are unrelated; same output-tracing pattern is not enough." + "reason": "ResNet and GPT-J are distinct models; not the same underlying fix." }, { "left": "pull_request:44018", "right": "pull_request:44019", "accept": false, - "reason": "GPT-Neo and ResNet are different model stacks with different fixes." + "reason": "Both are refactors, but they affect different models and cannot be merged as one concrete fix." }, { "left": "pull_request:44019", "right": "pull_request:44071", "accept": false, - "reason": "ResNet and MPT are unrelated codepaths." + "reason": "ResNet and MPT are unrelated model-specific changes." }, { "left": "pull_request:44068", "right": "pull_request:44116", "accept": false, - "reason": "GPT-Neo and Flaubert are different models; these are separate standardized-output-tracing ports." + "reason": "GPT-Neo and Flaubert are separate code paths; no shared concrete bug." }, { "left": "pull_request:44068", "right": "pull_request:44722", "accept": false, - "reason": "GPT-Neo and GPT-J/CodeGen are separate model refactors." + "reason": "GPT-Neo is a different model from GPT-J/CodeGen, despite similar decorator refactoring." }, { "left": "pull_request:44071", "right": "pull_request:44722", "accept": false, - "reason": "MPT and GPT-J/CodeGen are unrelated concrete codepaths." + "reason": "MPT and GPT-J/CodeGen are different implementations, not one duplicate fix." }, { - "left": "pull_request:44059", - "right": "pull_request:44086", + "left": "pull_request:44018", + "right": "pull_request:44116", "accept": false, - "reason": "GPT-2 and MGP-STR are unrelated model implementations." + "reason": "GPT-Neo and Flaubert do not share the same underlying change." }, { - "left": "pull_request:44085", + "left": "pull_request:44018", "right": "pull_request:44086", "accept": false, - "reason": "GPT-J and MGP-STR are different codepaths; only the refactor style matches." + "reason": "GPT-Neo and MGP-STR are unrelated concrete refactors." }, { - "left": "pull_request:44059", - "right": "pull_request:44722", + "left": "pull_request:44018", + "right": "pull_request:44085", "accept": false, - "reason": "GPT-2 and GPT-J/CodeGen are separate output-tracing refactors." + "reason": "GPT-Neo versus GPT-J is only a loose theme match." }, { - "left": "pull_request:44098", - "right": "pull_request:44101", + "left": "pull_request:44068", + "right": "pull_request:44161", "accept": false, - "reason": "ViLT and Flaubert are unrelated model codepaths." + "reason": "GPT-Neo and LongT5 are separate model-specific implementations." }, { "left": "pull_request:44018", "right": "pull_request:44071", "accept": false, - "reason": "GPT-Neo and MPT are different models with separate changes." - }, - { - "left": "pull_request:44017", - "right": "pull_request:44722", - "accept": false, - "reason": "Segformer and GPT-J/CodeGen are unrelated concrete implementations." - }, - { - "left": "pull_request:44017", - "right": "pull_request:44161", - "accept": false, - "reason": "Segformer and LongT5 are different model families; not a duplicate change." + "reason": "Different models and no shared concrete code-path problem." }, { - "left": "pull_request:44017", - "right": "pull_request:44073", + "left": "pull_request:44059", + "right": "pull_request:44086", "accept": false, - "reason": "Segformer and VisualBert are separate model codepaths." + "reason": "GPT2 and MGP-STR are unrelated beyond using output-tracing helpers." }, { - "left": "pull_request:44068", - "right": "pull_request:44071", + "left": "pull_request:44085", + "right": "pull_request:44086", "accept": false, - "reason": "GPT-Neo and MPT are unrelated implementations." + "reason": "GPT-J and MGP-STR are different model refactors." }, { - "left": "pull_request:44071", - "right": "pull_request:44086", + "left": "pull_request:44059", + "right": "pull_request:44722", "accept": false, - "reason": "MPT and MGP-STR are different codepaths." + "reason": "GPT2 and GPT-J/CodeGen are distinct concrete changes." }, { - "left": "pull_request:43997", - "right": "pull_request:44073", + "left": "pull_request:44098", + "right": "pull_request:44101", "accept": false, - "reason": "RegNet and VisualBert are unrelated model families." + "reason": "ViLT and Flaubert/XLM are not the same underlying bug or change." }, { - "left": "pull_request:43997", - "right": "pull_request:44017", + "left": "pull_request:44073", + "right": "pull_request:44161", "accept": false, - "reason": "RegNet and Segformer are separate model implementations." + "reason": "VisualBert and LongT5 are separate model-specific output refactors." }, { - "left": "pull_request:44010", - "right": "pull_request:44071", + "left": "pull_request:44017", + "right": "pull_request:44018", "accept": false, - "reason": "SqueezeBert and MPT are unrelated codepaths." + "reason": "SegFormer and GPT-Neo are different models; shared issue target is not enough." }, { - "left": "pull_request:44010", - "right": "pull_request:44073", + "left": "pull_request:44017", + "right": "pull_request:44056", "accept": false, - "reason": "SqueezeBert and VisualBert are different model families." + "reason": "SegFormer and MPNet are unrelated concrete changes." }, { - "left": "pull_request:44018", - "right": "pull_request:44129", + "left": "pull_request:44071", + "right": "pull_request:44076", "accept": false, - "reason": "GPT-Neo and SpeechT5 are separate concrete refactors." + "reason": "MPT and ImageGPT are distinct model paths." }, { "left": "pull_request:44074", "right": "pull_request:44101", "accept": false, - "reason": "TextNet and Flaubert are unrelated implementations." + "reason": "TextNet and Flaubert/XLM are unrelated model-specific refactors." }, { "left": "pull_request:44056", "right": "pull_request:44161", "accept": false, - "reason": "MPNet and LongT5 are different codepaths." + "reason": "MPNet and LongT5 do not appear to be the same bug or change." }, { "left": "pull_request:43997", "right": "pull_request:44018", "accept": false, - "reason": "RegNet and GPT-Neo are unrelated model refactors." + "reason": "RegNet and GPT-Neo are different model implementations." }, { "left": "pull_request:43997", "right": "pull_request:44161", "accept": false, - "reason": "RegNet and LongT5 are distinct model implementations." + "reason": "RegNet and LongT5 are unrelated concrete changes." }, { "left": "pull_request:44071", - "right": "pull_request:44129", - "accept": false, - "reason": "MPT and SpeechT5 are unrelated concrete codepaths." - }, - { - "left": "pull_request:43996", - "right": "pull_request:44161", + "right": "pull_request:44085", "accept": false, - "reason": "CVT/FNet and LongT5 are different models; not the same change." + "reason": "MPT and GPT-J are different code paths." }, { "left": "pull_request:43996", "right": "pull_request:44017", "accept": false, - "reason": "CVT/FNet and Segformer are unrelated implementations." + "reason": "CVT/FNet and SegFormer are unrelated model-specific refactors." }, { "left": "pull_request:44010", - "right": "pull_request:44044", + "right": "pull_request:44071", "accept": false, - "reason": "SqueezeBert and DeBERTa are separate model codepaths." + "reason": "SqueezeBert and MPT are different concrete implementations." }, { - "left": "pull_request:44044", - "right": "pull_request:44074", + "left": "pull_request:44010", + "right": "pull_request:44073", "accept": false, - "reason": "DeBERTa and TextNet are unrelated models." + "reason": "SqueezeBert and VisualBert are separate model changes." }, { "left": "pull_request:44066", "right": "pull_request:44722", "accept": true, - "reason": "Same GPT-J/CodeGen output-tracing refactor; 44722 is a near-duplicate follow-up on the same concrete change." + "reason": "Same GPT-J/CodeGen output-tracing refactor on the same code path; these are plausible duplicates." }, { "left": "pull_request:44085", "right": "pull_request:44722", "accept": true, - "reason": "Both are GPT-J output-tracing refactors on the same underlying codepath and could plausibly be merged into one PR." + "reason": "Both touch GPT-J output tracing in the same area and are close variants of the same refactor." }, { "left": "pull_request:44085", "right": "pull_request:44129", "accept": false, - "reason": "GPT-J and SpeechT5 are unrelated model implementations." + "reason": "GPT-J and SpeechT5 are unrelated model changes." }, { "left": "pull_request:43996", "right": "pull_request:44007", "accept": false, - "reason": "CVT/FNet and RegNet/ResNet/RT-DETR are different model families." + "reason": "CVT/FNet and RegNet/ResNet/RT-DETR are separate model-specific refactors." }, { "left": "pull_request:43996", "right": "pull_request:44129", "accept": false, - "reason": "CVT/FNet and SpeechT5 are unrelated codepaths." + "reason": "CVT/FNet and SpeechT5 do not share the same concrete change." }, { "left": "pull_request:44044", "right": "pull_request:44129", "accept": false, - "reason": "DeBERTa and SpeechT5 are separate model implementations." + "reason": "DeBERTa and SpeechT5 are unrelated model implementations." }, { "left": "pull_request:44044", "right": "pull_request:44154", "accept": false, - "reason": "DeBERTa and ViT-S are unrelated model codepaths." + "reason": "DeBERTa and ViT-S are separate model-specific refactors." }, { "left": "pull_request:44024", "right": "pull_request:44076", "accept": false, - "reason": "FocalNet and ImageGPT are different model families; same output-tracing theme does not imply duplication." + "reason": "FocalNet and ImageGPT are different model code paths; only the decorator pattern overlaps." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is well grounded in the packet and stays conservative about the soft edges. The accepted pairs are all within the same concrete model/codepath families (GPT-Neo and GPT-J/CodeGen), and the rejections appropriately avoid treating shared output-tracing style or shared issue-targets as duplicates across unrelated models. No clear overstatement of evidence." + "feedback": "Grounded and conservative overall. The summary matches the packet: most PRs are thematic refactors around standardized output tracing, while the only clearly overlapping clusters are GPT-J/CodeGen and GPT-Neo. The selected representative PR #44722 is well supported by the shared issue target and overlapping code path. The true soft-edge verdicts are limited to pairs that look mergeable as the same bugfix family." }, "evaluator_used": true, "retried": false, @@ -16804,7 +16756,7 @@ { "label": "pull_request", "component_index": 15, - "component_count": 23, + "component_count": 24, "review_unit_index": 2, "review_unit_count": 2, "status": "reviewed", @@ -16828,20 +16780,20 @@ "pull_request:43997|pull_request:44010", "pull_request:44010|pull_request:44129" ], - "prepared_review_unit_hash": "126c7d405f80fdc7403b5cdc7097d4c584642317aed1ef568699a1d5cc227e4e", + "prepared_review_unit_hash": "5fec5a49e0f7ca5d9e596e94b63853f9de7732bfcbea958f544a6394cf882d20", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 77, - "serialized_chars": 84502, - "estimated_input_tokens": 21126, - "estimated_eval_tokens": 42508 + "serialized_chars": 84505, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 }, "budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 5, - "serialized_chars": 13066, + "serialized_chars": 13067, "estimated_input_tokens": 3267, "estimated_eval_tokens": 6790 }, @@ -16850,42 +16802,42 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These PRs are all model-specific refactors for standardized output tracing/capture, but they touch different architectures and code paths, so they should not be deduplicated as the same change.", - "confidence": 0.92, + "summary": "These PRs all implement the same broad pattern\u2014migrating different model files to standardized output capture\u2014but they are separate model-specific refactors, not duplicates of the same concrete change.", + "confidence": 0.91, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #44129 is the strongest representative: it has an explicit issue target and a concrete, self-contained SpeechT5 tracing refactor. That said, it is not a duplicate of the others.", + "canonical_pr_reason": "PR 44129 is the strongest representative: it has an explicit issue target, a larger and more self-contained output-tracing refactor, and the diff shows a concrete implementation change rather than just a small follow-up tweak.", "best_issue_reason": null, - "best_pr_reason": "PR #44129 is the best single PR to keep as the representative example of this refactor pattern because it is well-scoped and linked to an issue. The others are similarly scoped but model-specific.", + "best_pr_reason": "44129 is the best single PR to anchor the cluster because it is issue-linked and clearly expresses the standardized output-capture migration in SpeechT5; the other PRs are similar but model-specific variants of the same refactor pattern.", "soft_edge_verdicts": [ { "left": "pull_request:44073", "right": "pull_request:44074", "accept": false, - "reason": "Both are output-tracing refactors, but one is for VisualBert and the other for TextNet. Different models, different files, and different implementations; not the same underlying change." + "reason": "Both are standardized output-tracing refactors, but they touch different models (VisualBert vs TextNet) and distinct code paths, so they are not the same change and would not reasonably merge into one PR." }, { "left": "pull_request:44056", "right": "pull_request:44073", "accept": false, - "reason": "MPNet vs VisualBert. Same broad refactor theme, but they modify separate model code paths and are independent changes." + "reason": "Same refactor theme, but MPNet and VisualBert are separate implementations with different model internals and filenames; this is a pattern match, not a duplicate underlying fix." }, { "left": "pull_request:44056", "right": "pull_request:44074", "accept": false, - "reason": "MPNet and TextNet are distinct model implementations with unrelated refactors despite similar decorator/capture naming." + "reason": "These are parallel refactors in different model codebases, not the same bug or code-path fix. The shared capture_outputs migration is too broad to treat as a duplicate." }, { "left": "pull_request:43997", "right": "pull_request:44010", "accept": false, - "reason": "RegNet and SqueezeBert are unrelated model-specific output tracing updates; they do not appear mergeable into one PR for the same bug or change." + "reason": "RegNet and SqueezeBert are unrelated model implementations. Even though both adopt the same output-capture approach, they do not target the same underlying issue." }, { "left": "pull_request:44010", "right": "pull_request:44129", "accept": false, - "reason": "SqueezeBert vs SpeechT5, different model internals and different output-tracing logic. Similar theme only at a high level, not the same concrete fix." + "reason": "Both migrate output handling, but SqueezeBert and SpeechT5 are different models with different forward paths and no evidence of a shared concrete defect." } ] }, @@ -16899,7 +16851,7 @@ { "label": "pull_request", "component_index": 16, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -16965,268 +16917,268 @@ "pull_request:44028|pull_request:44030", "pull_request:44029|pull_request:44030" ], - "prepared_review_unit_hash": "636272954d30916491a475487422f1a8c03c8b5eaf656b8dca64a8cd3dbbe155", + "prepared_review_unit_hash": "83b95bb0cdb797375bfff98af281829e0ac4f9109bbe603be3d4ea0287cd294b", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 40, - "serialized_chars": 40173, - "estimated_input_tokens": 10044, - "estimated_eval_tokens": 20344 + "serialized_chars": 40169, + "estimated_input_tokens": 10043, + "estimated_eval_tokens": 20342 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 40, - "serialized_chars": 40173, - "estimated_input_tokens": 10044, - "estimated_eval_tokens": 20344 + "serialized_chars": 40169, + "estimated_input_tokens": 10043, + "estimated_eval_tokens": 20342 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is a set of separate, model-specific output-tracing refactors. They are thematically similar, but they do not describe the same concrete bug or change, so they should not be merged as duplicates.", - "confidence": 0.95, + "summary": "These PRs are thematically similar output-tracing refactors, but they target different model implementations and code paths. They are not duplicates, so the cluster should not be collapsed.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44025 is the best representative of the theme because it is a clean, self-contained output-tracing refactor and spans a closely related pair of files. That said, the cluster is not duplicate-equivalent overall; the PRs target different model implementations.", + "canonical_pr_reason": "No canonical PR: each pull request changes a different model or wrapper, so they are related by theme only, not by the same underlying change.", "best_issue_reason": null, - "best_pr_reason": "PR 44025 is the most representative hub for the cluster, but only as a thematic example rather than a duplicate canonical. It is not evidence that the other PRs are the same change.", + "best_pr_reason": "No single best PR for deduping; the set is a collection of separate model-specific refactors rather than one shared fix.", "soft_edge_verdicts": [ { "left": "pull_request:43998", "right": "pull_request:44002", "accept": false, - "reason": "Same refactor pattern, but timm_backbone and upernet are different model code paths." + "reason": "Different model files and code paths: timm_backbone vs upernet. Same refactor theme, not the same change." }, { "left": "pull_request:43998", "right": "pull_request:44000", "accept": false, - "reason": "Both are output-tracing refactors, but they touch unrelated model implementations." + "reason": "Different architectures and wrappers: timm_backbone vs vision_text_dual_encoder. Not a duplicate bug/change." }, { "left": "pull_request:43998", "right": "pull_request:44001", "accept": false, - "reason": "Different architectures and forward paths; not the same underlying change." + "reason": "Different model implementations: timm_backbone and univnet. Similar output-tracing cleanup only." }, { "left": "pull_request:43998", "right": "pull_request:44003", "accept": false, - "reason": "timm_backbone is unrelated to mamba/falcon_mamba; same theme, not same code path." + "reason": "Different subsystems and files: timm_backbone vs falcon_mamba/mamba. Not the same concrete fix." }, { "left": "pull_request:43998", "right": "pull_request:44029", "accept": false, - "reason": "Unrelated model families; these are not mergeable as one concrete fix." + "reason": "Backbone refactor vs RWKV recurrent model refactor. Shared wording, separate code paths." }, { "left": "pull_request:43999", "right": "pull_request:44001", "accept": false, - "reason": "Different model implementations; only the refactor pattern matches." + "reason": "MobileNetV1 vs UnivNet are unrelated model families; both refactor tracing, but not the same change." }, { "left": "pull_request:43999", "right": "pull_request:44002", "accept": false, - "reason": "MobileNetV1 and UperNet are separate model-specific changes." + "reason": "MobileNetV1 and UperNet touch different model stacks and outputs. Not duplicates." }, { "left": "pull_request:43999", "right": "pull_request:44025", "accept": false, - "reason": "Different model families and forward logic; not a duplicate." + "reason": "MobileNetV1 vs Depth Anything are different models with different output wrappers; only the refactor theme matches." }, { "left": "pull_request:43999", "right": "pull_request:44026", "accept": false, - "reason": "MobileNetV1 vs vision-encoder-decoder wrappers are distinct code paths." + "reason": "MobileNetV1 and VisionEncoderDecoder are unrelated implementations; no shared underlying bug." }, { "left": "pull_request:43999", "right": "pull_request:44027", "accept": false, - "reason": "Separate model wrappers; same refactor style is not enough to merge." + "reason": "MobileNetV1 vs SpeechEncoderDecoder are distinct code paths and model families." }, { "left": "pull_request:44000", "right": "pull_request:44001", "accept": false, - "reason": "Vision-text dual encoder and UnivNet are unrelated implementations." + "reason": "Vision-text dual encoder and UnivNet are unrelated; same refactor style only." }, { "left": "pull_request:44000", "right": "pull_request:44002", "accept": false, - "reason": "Different model subsystems; not the same bug or change." + "reason": "Vision-text dual encoder vs UperNet are different model wrappers and outputs." }, { "left": "pull_request:44000", "right": "pull_request:44003", "accept": false, - "reason": "Vision-text dual encoder and mamba are unrelated model paths." + "reason": "Vision-text dual encoder is unrelated to Mamba/Falcon-Mamba; not one shared fix." }, { "left": "pull_request:44000", "right": "pull_request:44025", "accept": false, - "reason": "These are separate model-specific refactors, not one concrete fix." + "reason": "Different model domains and files: vision-text dual encoder vs Depth Anything." }, { "left": "pull_request:44001", "right": "pull_request:44002", "accept": false, - "reason": "UnivNet and UperNet are different models with distinct forward paths." + "reason": "UnivNet vs UperNet are different model families; not the same bug or change." }, { "left": "pull_request:44001", "right": "pull_request:44003", "accept": false, - "reason": "Different architectures; the similarity is only at the refactor level." + "reason": "UnivNet and Mamba/Falcon-Mamba touch separate code paths, despite similar refactor language." }, { "left": "pull_request:44001", "right": "pull_request:44025", "accept": false, - "reason": "Separate model families; not a duplicate change." + "reason": "UnivNet vs Depth Anything are unrelated model implementations." }, { "left": "pull_request:44002", - "right": "pull_request:44003", + "right": "pull_request:44029", "accept": false, - "reason": "UperNet and mamba/falcon_mamba are unrelated code paths." + "reason": "UperNet and RWKV are unrelated architectures; no shared concrete change." }, { "left": "pull_request:44002", - "right": "pull_request:44029", + "right": "pull_request:44003", "accept": false, - "reason": "Different models; same output-tracing theme does not imply duplication." + "reason": "UperNet vs Mamba/Falcon-Mamba are distinct model stacks with different output behavior." }, { "left": "pull_request:44003", "right": "pull_request:44028", "accept": false, - "reason": "Mamba/falcon_mamba and SuperPoint are unrelated implementations." + "reason": "Mamba/Falcon-Mamba vs SuperPoint are different domains and files; not mergeable as one PR." }, { "left": "pull_request:44003", "right": "pull_request:44004", "accept": false, - "reason": "Mamba/falcon_mamba changes are not the same change as CodeGen output tracing." + "reason": "Mamba/Falcon-Mamba refactor is unrelated to CodeGen output handling." }, { "left": "pull_request:43998", "right": "pull_request:44004", "accept": false, - "reason": "timm_backbone and CodeGen are different model code paths." + "reason": "timm_backbone and CodeGen are unrelated model code paths; same theme only." }, { "left": "pull_request:44001", "right": "pull_request:44004", "accept": false, - "reason": "UnivNet and CodeGen are unrelated; no shared concrete bug." + "reason": "UnivNet and CodeGen are different models with different output semantics." }, { "left": "pull_request:44002", "right": "pull_request:44004", "accept": false, - "reason": "UperNet and CodeGen are distinct implementations with different fixes." + "reason": "UperNet and CodeGen are separate implementations; no common underlying bug." }, { "left": "pull_request:44025", "right": "pull_request:44026", "accept": false, - "reason": "DepthAnything and vision-encoder-decoder are different model paths; same refactor pattern only." + "reason": "Depth Anything and VisionEncoderDecoder are different model wrappers, not the same change." }, { "left": "pull_request:44025", "right": "pull_request:44027", "accept": false, - "reason": "DepthAnything and SpeechEncoderDecoder are unrelated concrete changes." + "reason": "Depth Anything vs SpeechEncoderDecoder are unrelated refactors across different code paths." }, { "left": "pull_request:44025", "right": "pull_request:44028", "accept": false, - "reason": "DepthAnything and SuperPoint do not fix the same underlying issue." + "reason": "Depth Anything and SuperPoint are distinct vision models; no shared bug fix." }, { "left": "pull_request:44025", "right": "pull_request:44029", "accept": false, - "reason": "DepthAnything and RWKV are unrelated code paths." + "reason": "Depth Anything and RWKV are unrelated model families." }, { "left": "pull_request:44025", "right": "pull_request:44030", "accept": false, - "reason": "DepthAnything and DPR are different model implementations." + "reason": "Depth Anything vs DPR are different model stacks and output contracts." }, { "left": "pull_request:44026", - "right": "pull_request:44029", + "right": "pull_request:44027", "accept": false, - "reason": "Vision-encoder-decoder and RWKV are unrelated; not the same fix." + "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are parallel wrappers, but they are separate implementations and PRs." }, { "left": "pull_request:44026", - "right": "pull_request:44027", + "right": "pull_request:44028", "accept": false, - "reason": "Both are encoder-decoder wrappers, but they are separate model-specific forward methods and not one concrete bug." + "reason": "VisionEncoderDecoder vs SuperPoint are unrelated models; not the same underlying fix." }, { "left": "pull_request:44026", - "right": "pull_request:44028", + "right": "pull_request:44029", "accept": false, - "reason": "Vision-encoder-decoder and SuperPoint are different code paths." + "reason": "VisionEncoderDecoder and RWKV change different code paths and output handling." }, { "left": "pull_request:44026", "right": "pull_request:44030", "accept": false, - "reason": "Encoder-decoder wrapper refactor vs DPR wrapper refactor are not the same change." + "reason": "VisionEncoderDecoder and DPR are separate model families and refactors." }, { "left": "pull_request:44027", "right": "pull_request:44028", "accept": false, - "reason": "SpeechEncoderDecoder and SuperPoint are unrelated implementations." + "reason": "SpeechEncoderDecoder and SuperPoint are unrelated model implementations." }, { "left": "pull_request:44027", "right": "pull_request:44029", "accept": false, - "reason": "SpeechEncoderDecoder and RWKV are unrelated model code paths." + "reason": "SpeechEncoderDecoder vs RWKV are different architectures; no duplicate change." }, { "left": "pull_request:44027", "right": "pull_request:44030", "accept": false, - "reason": "SpeechEncoderDecoder and DPR do not share the same concrete fix." + "reason": "SpeechEncoderDecoder and DPR touch different model code and output behavior." }, { "left": "pull_request:44028", "right": "pull_request:44029", "accept": false, - "reason": "SuperPoint and RWKV are distinct model-specific changes." + "reason": "SuperPoint and RWKV are unrelated subsystems and cannot be merged as one PR." }, { "left": "pull_request:44028", "right": "pull_request:44030", "accept": false, - "reason": "SuperPoint and DPR are unrelated implementations." + "reason": "SuperPoint vs DPR are separate model implementations with different output paths." }, { "left": "pull_request:44029", "right": "pull_request:44030", "accept": false, - "reason": "RWKV and DPR are different models with different forward logic." + "reason": "RWKV and DPR are distinct models; similar refactor theme does not make them duplicates." } ] }, @@ -17240,7 +17192,7 @@ { "label": "pull_request", "component_index": 17, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17259,52 +17211,52 @@ "pull_request:44662|pull_request:45075", "pull_request:45075|pull_request:45401" ], - "prepared_review_unit_hash": "29283d69ec866496e4c2137c385e87156608f0b8d667abd94b444c4d1d5f92c3", + "prepared_review_unit_hash": "c617a66fbd9f69e1d3001ac8592df288a9d483bcc7bb3cf7516a13169b2ab085", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 11073, - "estimated_input_tokens": 2769, - "estimated_eval_tokens": 5794 + "serialized_chars": 10844, + "estimated_input_tokens": 2711, + "estimated_eval_tokens": 5678 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 11073, - "estimated_input_tokens": 2769, - "estimated_eval_tokens": 5794 + "serialized_chars": 10844, + "estimated_input_tokens": 2711, + "estimated_eval_tokens": 5678 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are three separate model-addition PRs for different models (PenguinVL, DeepSeek-OCR-2, and Voxtral TTS). The overlap is mostly shared Transformers boilerplate for new model integration, not the same underlying change.", - "confidence": 0.98, + "summary": "All three PRs are separate model integrations (PenguinVL, DeepSeek-OCR-2, and Voxtral TTS). They share only generic Transformers plumbing like auto-mapping and docs, not the same underlying code change or bug fix, so none should be clustered as duplicates.", + "confidence": 0.99, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: although the PRs share new-model scaffolding, they implement different models and are not duplicates.", + "canonical_pr_reason": null, "best_issue_reason": null, - "best_pr_reason": "No single PR is a best duplicate representative here; each PR targets a different model and could not be merged into one change.", + "best_pr_reason": "PR 45075 is the strongest standalone representative in this set: it is a complete model addition with substantial implementation/test coverage and high review activity, but it is still unrelated to the other two PRs.", "soft_edge_verdicts": [ { "left": "pull_request:44662", "right": "pull_request:45401", "accept": false, - "reason": "Different models and codepaths: PenguinVL vs Voxtral TTS. Shared auto-registration/docs files are generic new-model plumbing, not the same fix." + "reason": "Different model additions for PenguinVL vs Voxtral TTS; overlap is limited to shared framework registration/docs files, not the same concrete model codepath or fix." }, { "left": "pull_request:44662", "right": "pull_request:45075", "accept": false, - "reason": "Different model implementations: PenguinVL and DeepSeek-OCR-2. The overlap is only shared framework registration and docs, so they are not duplicates." + "reason": "PenguinVL and DeepSeek-OCR-2 are distinct model implementations with different architectures and files; shared auto-registration and docs plumbing is too generic to treat as a duplicate." }, { "left": "pull_request:45075", "right": "pull_request:45401", "accept": false, - "reason": "Different model additions for DeepSeek-OCR-2 and Voxtral TTS. Shared Transformers scaffolding does not indicate the same underlying bug or change." + "reason": "DeepSeek-OCR-2 and Voxtral TTS address different models and functionality; they only coincide in generic Transformers integration points, so they are not the same change." } ] }, @@ -17318,7 +17270,7 @@ { "label": "pull_request", "component_index": 18, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17334,7 +17286,7 @@ "soft_pairs": [ "pull_request:44979|pull_request:45363" ], - "prepared_review_unit_hash": "5c60281b491d0b5f0ac2a5454cc92f029ee68617c43e1f78eb08002e3a18f362", + "prepared_review_unit_hash": "caf349f15c8fc438109ff978706c49231c840c6091f556ab2bf075a853469268", "component_budget": { "node_count": 2, "item_count": 2, @@ -17356,18 +17308,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related fusion-feature PRs, but they target different layers and are not duplicates: one adds a standalone Module Fusion API, while the other wires n-to-1 kernel fusion through KernelConfig and model loading internals.", - "confidence": 0.87, + "summary": "These PRs are related in the fusion/kernels area, but they are not the same change: one adds a generic module-fusion API scaffold, while the other wires `KernelConfig`-driven n-to-1 fusion into model loading and runtime usage.", + "confidence": 0.93, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR 45363 is the more concrete, end-user-facing change: it integrates `KernelConfig` fusion into `modeling_utils`/integrations and includes a usage example, while PR 44979 is the lower-level API foundation.", "best_issue_reason": null, - "best_pr_reason": "PR 45363 is the broader, more end-to-end change for kernel fusion, touching configuration, integrations, and model loading; it is the better representative of the cluster's main theme.", + "best_pr_reason": "PR 45363 is the best representative for the cluster because it covers the actual functional behavior and integration path, not just the API scaffolding.", "soft_edge_verdicts": [ { "left": "pull_request:44979", "right": "pull_request:45363", "accept": false, - "reason": "Different concrete changes and code paths: 44979 introduces a new Module Fusion API module and tests, while 45363 adds KernelConfig-based n-to-1 kernel fusion plumbing, integration hooks, and an example. Related topic, but not the same mergeable change." + "reason": "Different layers of the feature: 44979 introduces a generic `Module Fusion API` file and tests, while 45363 implements `KernelConfig`-based n-to-1 kernel fusion and runtime wiring. They are related, but not the same concrete fix and would not be merged as one PR." } ] }, @@ -17381,7 +17333,7 @@ { "label": "pull_request", "component_index": 19, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17397,7 +17349,7 @@ "soft_pairs": [ "pull_request:45086|pull_request:45317" ], - "prepared_review_unit_hash": "220462e86df87d807814f14bebe628ba16f9a3c29fb55dc25ad5cb568d840aa5", + "prepared_review_unit_hash": "b1960996ce8915af7052847dd1bacfe742a8c1f1f004d781769b53c64306558d", "component_budget": { "node_count": 2, "item_count": 2, @@ -17419,18 +17371,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both PRs fix the same Mistral regex patching AttributeError in the same file and target the same issue; 45317 looks like the fuller follow-up with tests.", + "summary": "Both PRs fix the same Mistral regex patching AttributeError in the same function and file; 45317 looks like the fuller follow-up with tests, while 45086 is the smaller earlier variant.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45317 is the better canonical PR because it covers the same fix as 45086 but is more complete and includes test coverage.", + "canonical_pr_reason": "PR 45317 is the better canonical PR because it addresses the same bug as 45086 but includes tests and appears to be the more complete implementation of the fix.", "best_issue_reason": null, - "best_pr_reason": "PR 45317 is the best representative of the cluster: same concrete bug fix, clearer title, and additional tests make it the strongest standalone artifact.", + "best_pr_reason": "PR 45317 is the best representative because it contains the same code-path fix plus test coverage, making it the most complete and reviewable version.", "soft_edge_verdicts": [ { "left": "pull_request:45086", "right": "pull_request:45317", "accept": true, - "reason": "Same underlying AttributeError fix in _patch_mistral_regex, same target issue, and the diff shows the same code-path correction; 45317 is essentially the more complete version of 45086." + "reason": "Same underlying AttributeError in _patch_mistral_regex, same file and code path, with nearly identical fix; these could plausibly be merged into one PR." } ] }, @@ -17444,7 +17396,7 @@ { "label": "pull_request", "component_index": 20, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17460,7 +17412,7 @@ "soft_pairs": [ "pull_request:45115|pull_request:45569" ], - "prepared_review_unit_hash": "150f3d80335a91aecbd7cabd6fe0d8c8114de376ef811205a0309fa3248fd650", + "prepared_review_unit_hash": "11d4f8f78672ff43078b60613d4b6bdc3c1101c1bedf6357332df1bc8e2a9c45", "component_budget": { "node_count": 2, "item_count": 2, @@ -17482,18 +17434,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two open PRs both touch Nemotron H internals, but they target different changes: one refactors NemotronH to inherit GraniteMoeHybrid and add conversion mapping, while the other splits Nemotron H into dense/sparse model variants and updates docs/tests. They are related but not the same fix/change.", + "summary": "These two PRs are both about NemotronH, but they tackle different changes: PR 45115 is a refactor/compatibility and conversion-mapping update for the existing NemotronH architecture, while PR 45569 introduces separate NemotronH Dense/Sparse model variants, docs, and auto/model wiring. They are related by subsystem, not the same underlying change.", "confidence": 0.93, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR 45569 is the better representative of the cluster because it is the broader, more user-visible NemotronH family change and includes the new dense/sparse model split plus docs and integration updates.", "best_issue_reason": null, - "best_pr_reason": "PR #45569 is the better cluster representative because it covers the larger, end-to-end Nemotron H architecture change, including model split, auto-mapping, docs, and tests. PR #45115 is a narrower refactor focused on inheritance and weight conversion.", + "best_pr_reason": "PR 45569 is the strongest standalone candidate in this cluster: it covers the larger end-to-end model-family addition, not just an internal refactor/conversion mapping adjustment.", "soft_edge_verdicts": [ { "left": "pull_request:45115", "right": "pull_request:45569", "accept": false, - "reason": "Shared Nemotron H files alone are not enough: #45115 is a structural refactor/conversion-mapping PR, while #45569 introduces separate dense/sparse model classes plus docs/tests. They do not look like the same concrete change and would not plausibly be merged as one PR." + "reason": "They share NemotronH code paths, but 45115 is a refactor/conversion-mapping change and 45569 is a separate feature adding Dense/Sparse model variants and docs; they are not the same concrete fix or mergeable into one PR." } ] }, @@ -17507,7 +17459,7 @@ { "label": "pull_request", "component_index": 21, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17523,12 +17475,12 @@ "soft_pairs": [ "pull_request:45332|pull_request:45333" ], - "prepared_review_unit_hash": "a924295be58fe005afd8cdcea24072d977f42e21d22780879e9c84a8c45df93f", + "prepared_review_unit_hash": "913328da88e1fbcd0f191981b78ae4cf4ea3c169e065253c3acee43d28eb662c", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5398, + "serialized_chars": 5397, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17536,7 +17488,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5398, + "serialized_chars": 5397, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17545,18 +17497,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related but distinct PRs: one adds per-layer heterogeneous config plumbing, while the other adds broader heterogeneous model/cache support built on top of that config work. They look complementary rather than duplicate.", - "confidence": 0.82, + "summary": "These are related heterogeneous-support PRs, but they are not duplicates: one introduces per-layer config plumbing, the other wires that config into cache/modeling behavior. The pair should stay separate.", + "confidence": 0.88, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45332 is the broader, more central change: it implements heterogeneous model support across cache/modeling code and includes the config hooks it needs.", + "canonical_pr_reason": "PR 45332 is the broader end-to-end feature implementation, covering model/cache/masking integration on top of heterogeneous config support.", "best_issue_reason": null, - "best_pr_reason": "PR #45332 is the best representative of the cluster because it spans the full code-path impact and is the more complete end-to-end implementation.", + "best_pr_reason": "PR 45332 is the best representative of the overall feature cluster because it is the most complete user-facing implementation; 45333 is a narrower foundation PR.", "soft_edge_verdicts": [ { "left": "pull_request:45332", "right": "pull_request:45333", "accept": false, - "reason": "They share heterogeneity plumbing, but #45333 is config-only while #45332 extends model/cache behavior; they are related groundwork and follow-on work, not the same concrete change that should be merged as one PR." + "reason": "Related but distinct: 45333 adds the per-layer config API, while 45332 applies heterogeneous behavior across model/cache code paths. They share infrastructure, but they are not the same underlying change and would not naturally collapse into one PR." } ] }, @@ -17570,7 +17522,7 @@ { "label": "pull_request", "component_index": 22, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17595,12 +17547,12 @@ "pull_request:45592|pull_request:45615", "pull_request:45596|pull_request:45615" ], - "prepared_review_unit_hash": "40f8da80ebff371273d5c3387a02dc195c9aa0368513048dff465def359654eb", + "prepared_review_unit_hash": "67b18b0b30292a5c02b9c8c41d899a9ab90f81b9fd36847271240838d8055795", "component_budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 7, - "serialized_chars": 13274, + "serialized_chars": 13273, "estimated_input_tokens": 3319, "estimated_eval_tokens": 6894 }, @@ -17608,7 +17560,7 @@ "node_count": 5, "item_count": 5, "soft_pair_count": 7, - "serialized_chars": 13274, + "serialized_chars": 13273, "estimated_input_tokens": 3319, "estimated_eval_tokens": 6894 }, @@ -17617,54 +17569,54 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs are not true duplicates overall: one substantive model-parallel/code-path fix (45487), one XCLIP test/model-parallel fix (45394), one FastVLM test-padding fix (45592), and two separate XPU expectation updates for unrelated models (45596, 45615).", + "summary": "This cluster is mostly a bundle of unrelated model-specific fixes and test expectation updates. The only substantive code fix is the AltCLIP/ChineseCLIP model-parallel patch, but the other PRs target different models and different failure modes, so they should not be merged together.", "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45487 is the strongest representative fix: it changes actual model-parallel behavior in AltCLIP/ChineseCLIP (device placement for buffered token type IDs and split-module lists), whereas the others are narrower model-specific test updates or unrelated fixes.", + "canonical_pr_reason": null, "best_issue_reason": null, - "best_pr_reason": "PR 45487 is the best standalone PR in the set because it addresses a concrete runtime bug in shared code paths and is more substantive than the test-only expectation changes.", + "best_pr_reason": "PR #45487 is the strongest canonical candidate because it makes an actual code-path fix for model-parallel behavior, whereas the others are narrower test-only expectation updates or model-specific patches.", "soft_edge_verdicts": [ { "left": "pull_request:45394", "right": "pull_request:45596", "accept": false, - "reason": "Different models and different problems: XCLIP split-module/test failures vs BLT XPU expectation updates." + "reason": "Different models and different changes: XCLIP no_split_modules fix vs BLT XPU test expectation update. Same general theme, but not the same bug or mergeable PR." }, { "left": "pull_request:45487", "right": "pull_request:45592", "accept": false, - "reason": "Both mention device/test failures, but 45487 is an AltCLIP/ChineseCLIP model-parallel code fix while 45592 is only a FastVLM test padding-side adjustment." + "reason": "AltCLIP/ChineseCLIP model-parallel code fix is not the same as FastVLM test padding-side adjustment. Different code paths and failure modes." }, { "left": "pull_request:45487", "right": "pull_request:45596", "accept": false, - "reason": "Different model families and change types; 45487 fixes AltCLIP/ChineseCLIP runtime behavior, 45596 only updates BLT XPU test expectations." + "reason": "Model-parallel fixes in altclip/chinese_clip are unrelated to BLT XPU expected-output updates. Test-only expectation changes do not match the same underlying bug." }, { "left": "pull_request:45487", "right": "pull_request:45615", "accept": false, - "reason": "Not the same bug: 45487 is a model code fix, while 45615 only adds XPU expectations for Qianfan OCR tests." + "reason": "AltCLIP/ChineseCLIP model code fix and Qianfan OCR XPU expectation additions are different artifacts with different behavior and targets." }, { "left": "pull_request:45592", "right": "pull_request:45596", "accept": false, - "reason": "Both are test-related, but they target different models and distinct failures; no shared concrete code-path." + "reason": "Both are test-related, but they address different models and different issues: FastVLM padding-side handling vs BLT XPU generation expectations." }, { "left": "pull_request:45592", "right": "pull_request:45615", "accept": false, - "reason": "Different test suites and outputs; these are separate device-specific expectations, not one underlying change." + "reason": "FastVLM test padding-side fix and Qianfan OCR XPU expectations are unrelated model-specific test changes." }, { "left": "pull_request:45596", "right": "pull_request:45615", "accept": false, - "reason": "Both add XPU expectations, but for unrelated models and outputs, so they are not the same underlying bug or change." + "reason": "Both add XPU expectations, but for different models and outputs; they are separate test updates, not one underlying bug or change." } ] }, @@ -17678,7 +17630,7 @@ { "label": "pull_request", "component_index": 23, - "component_count": 23, + "component_count": 24, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", @@ -17694,7 +17646,7 @@ "soft_pairs": [ "pull_request:45415|pull_request:45425" ], - "prepared_review_unit_hash": "8b4294fa11631aa382e969d55cd3cce9d4c25c2c9eecc76d5582adbf0c0f7cd4", + "prepared_review_unit_hash": "0c2b974bcafabe9e8847de5f9f175733aa8506fb487246b23fc7c6406e0e70f3", "component_budget": { "node_count": 2, "item_count": 2, @@ -17716,18 +17668,81 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both PRs are typing-related, but they address different scopes: one is a broad type-checking sweep across many Transformers modules, while the other adds typing helper protocols and a narrower modeling_utils-related cleanup. They share some files, but not the same concrete change.", - "confidence": 0.88, + "summary": "Two open PRs both touch typing/type-checking, but they address different changes: one is a broad repo-wide typing pass, the other adds specific typing aliases/protocols for modeling_utils and related modules. They are not the same underlying fix.", + "confidence": 0.93, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45425 is the better canonical representative because it is a focused typing infrastructure change with a coherent purpose, whereas PR #45415 is a much broader multi-file type-checking sweep with mixed, unrelated edits.", + "canonical_pr_reason": "PR 45425 is the more focused, self-contained typing change; PR 45415 is a broad sweep across many files and looks like a separate umbrella cleanup.", "best_issue_reason": null, - "best_pr_reason": "PR #45425 is the best overall fit for the cluster because it has the clearest single change theme and the least heterogeneous scope.", + "best_pr_reason": "PR 45425 is the best representative of the cluster because it has a clearer concrete typing objective and narrower scope than the generalized type-checking pass in PR 45415.", "soft_edge_verdicts": [ { "left": "pull_request:45415", "right": "pull_request:45425", "accept": false, - "reason": "Reject: the overlap is only in typing-related files and shared cleanup areas, but the PRs do not appear to fix the same concrete code-path problem or be safely mergeable into one PR." + "reason": "Both are about typing, but 45415 is a broad type-checking sweep across many files while 45425 adds specific typing protocols/aliases for modeling_utils and related code paths. They do not look like the same concrete bug/change and would not plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45639-2", + "nodes": [ + "pull_request:45639", + "pull_request:45645" + ], + "soft_pairs": [ + "pull_request:45639|pull_request:45645" + ], + "prepared_review_unit_hash": "857f1f468105e11437eef78c31ec91e8db5fd1b95cc91b6a8f3ce560954095a9", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4589, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4589, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs fix the same xdist collision around captured_info artifacts; PR 45645 is the broader superset and includes the core testing_utils change from 45639 plus CI log-handling updates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45645 is the better canonical PR because it includes the core xdist-safe captured_info fix from 45639 and also updates the CI workflow to handle multiple per-worker log files, making it the more complete change.", + "best_issue_reason": null, + "best_pr_reason": "PR 45645 best represents the cluster since it subsumes the underlying debug-log collision fix and extends it into the artifact collection path; 45639 is a narrower subset of the same fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45639", + "right": "pull_request:45645", + "accept": true, + "reason": "Accepted: 45645 includes the same concrete xdist-safe captured_info fix in testing_utils and tests, and expands it with workflow changes; this is plausibly one combined PR for the same bug." } ] }, diff --git a/analysis/current/manifest.json b/analysis/current/manifest.json index 24be8725030cb7ae653728a41e35cba71c71931f..8e00f167989d49c061a313d118f0eef0138b7c8c 100644 --- a/analysis/current/manifest.json +++ b/analysis/current/manifest.json @@ -1,8 +1,8 @@ { - "analysis_id": "hybrid-model-20260425t060019z", + "analysis_id": "hybrid-model-20260425t120019z", "archived_artifacts": { - "hybrid": "snapshots/20260425T060019Z/analysis-runs/hybrid-model-20260425t060019z/analysis-report-hybrid.json", - "hybrid_reviews": "snapshots/20260425T060019Z/analysis-runs/hybrid-model-20260425t060019z/analysis-report-hybrid.llm-reviews.json" + "hybrid": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.llm-reviews.json" }, "artifacts": { "hybrid": "analysis/current/analysis-report-hybrid.json", @@ -10,9 +10,9 @@ }, "channel": "canonical", "model": null, - "published_at": "2026-04-25T06:10:12Z", + "published_at": "2026-04-25T12:06:36Z", "repo": "huggingface/transformers", "schema_version": 1, - "snapshot_id": "20260425T060019Z", + "snapshot_id": "20260425T120019Z", "variant": "hybrid" } diff --git a/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.json b/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.json new file mode 100644 index 0000000000000000000000000000000000000000..74b79bb2efa0dd7bf8324c02f96c294342ff4a9d --- /dev/null +++ b/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.json @@ -0,0 +1,1964 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260425T120019Z", + "generated_at": "2026-04-25T12:06:34Z", + "evidence_quality": "full", + "llm_enrichment": true, + "meta_bugs": [ + { + "cluster_id": "cluster-43979-11", + "summary": "Cluster of 1 issues and 10 PRs centered on issue #43979.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43979, + "canonical_pr_number": 44007, + "issue_numbers": [ + 43979 + ], + "pr_numbers": [ + 43996, + 44007, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43996, + "right_pr_number": 44007, + "code_similarity": 0.179, + "size_similarity": 0.576, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.429, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44013, + "code_similarity": 0.122, + "size_similarity": 0.318, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.392, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44044, + "code_similarity": 0.245, + "size_similarity": 0.864, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.479, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44066, + "code_similarity": 0.225, + "size_similarity": 0.818, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.408, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44072, + "code_similarity": 0.14, + "size_similarity": 0.303, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.528, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44085, + "code_similarity": 0.216, + "size_similarity": 0.783, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44129, + "code_similarity": 0.163, + "size_similarity": 0.643, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.229, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44154, + "code_similarity": 0.153, + "size_similarity": 0.535, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.31, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44722, + "code_similarity": 0.225, + "size_similarity": 0.848, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.368, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44013, + "code_similarity": 0.19, + "size_similarity": 0.553, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.531, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44044, + "code_similarity": 0.186, + "size_similarity": 0.667, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.354, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44066, + "code_similarity": 0.188, + "size_similarity": 0.704, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.315, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44072, + "code_similarity": 0.212, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.708, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44085, + "code_similarity": 0.195, + "size_similarity": 0.735, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.318, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44129, + "code_similarity": 0.103, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.191, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44154, + "code_similarity": 0.238, + "size_similarity": 0.93, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.344, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44722, + "code_similarity": 0.178, + "size_similarity": 0.679, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.28, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44044, + "code_similarity": 0.126, + "size_similarity": 0.368, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.351, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44066, + "code_similarity": 0.127, + "size_similarity": 0.389, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.325, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44072, + "code_similarity": 0.29, + "size_similarity": 0.952, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.667, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44085, + "code_similarity": 0.131, + "size_similarity": 0.406, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.329, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44129, + "code_similarity": 0.07, + "size_similarity": 0.205, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.192, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44154, + "code_similarity": 0.177, + "size_similarity": 0.594, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.389, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44722, + "code_similarity": 0.118, + "size_similarity": 0.375, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.287, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44066, + "code_similarity": 0.25, + "size_similarity": 0.947, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.404, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44072, + "code_similarity": 0.136, + "size_similarity": 0.351, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.442, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44085, + "code_similarity": 0.24, + "size_similarity": 0.906, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.394, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44129, + "code_similarity": 0.147, + "size_similarity": 0.555, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.243, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44154, + "code_similarity": 0.17, + "size_similarity": 0.62, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.306, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44722, + "code_similarity": 0.257, + "size_similarity": 0.982, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.402, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44072, + "code_similarity": 0.133, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.393, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44085, + "code_similarity": 0.763, + "size_similarity": 0.957, + "file_overlap": 0.5, + "area_overlap": 0.825, + "patch_similarity": 0.887, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ] + } + ] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44129, + "code_similarity": 0.145, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.263, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44154, + "code_similarity": 0.174, + "size_similarity": 0.654, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.286, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44722, + "code_similarity": 0.907, + "size_similarity": 0.964, + "file_overlap": 1.0, + "area_overlap": 0.808, + "patch_similarity": 0.874, + "shared_filenames": [ + "src/transformers/models/codegen/modeling_codegen.py", + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/codegen/modeling_codegen.py", + "left_ranges": [ + [ + 245, + 266 + ] + ], + "right_ranges": [ + [ + 228, + 234 + ] + ] + }, + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44085, + "code_similarity": 0.137, + "size_similarity": 0.387, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44129, + "code_similarity": 0.074, + "size_similarity": 0.195, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.231, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44154, + "code_similarity": 0.175, + "size_similarity": 0.566, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.414, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44722, + "code_similarity": 0.124, + "size_similarity": 0.357, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.347, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44129, + "code_similarity": 0.141, + "size_similarity": 0.503, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.272, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44154, + "code_similarity": 0.18, + "size_similarity": 0.684, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.289, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44722, + "code_similarity": 0.728, + "size_similarity": 0.923, + "file_overlap": 0.5, + "area_overlap": 0.791, + "patch_similarity": 0.78, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44154, + "code_similarity": 0.099, + "size_similarity": 0.344, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.199, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44722, + "code_similarity": 0.146, + "size_similarity": 0.545, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.247, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44154, + "right_pr_number": 44722, + "code_similarity": 0.164, + "size_similarity": 0.631, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.255, + "shared_filenames": [], + "shared_file_areas": [] + } + ] + }, + { + "cluster_id": "cluster-41211-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41211.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 41211, + "canonical_pr_number": 44339, + "issue_numbers": [ + 41211 + ], + "pr_numbers": [ + 41356, + 44339 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 41356, + "right_pr_number": 44339, + "code_similarity": 0.155, + "size_similarity": 0.094, + "file_overlap": 0.273, + "area_overlap": 0.078, + "patch_similarity": 0.183, + "shared_filenames": [ + "docs/source/en/model_doc/deimv2.md", + "src/transformers/models/deimv2/__init__.py", + "src/transformers/models/deimv2/configuration_deimv2.py", + "src/transformers/models/deimv2/modeling_deimv2.py", + "tests/models/deimv2/__init__.py", + "tests/models/deimv2/test_modeling_deimv2.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/deimv2.md", + "left_ranges": [ + [ + 1, + 132 + ] + ], + "right_ranges": [ + [ + 1, + 65 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/__init__.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/configuration_deimv2.py", + "left_ranges": [ + [ + 1, + 74 + ] + ], + "right_ranges": [ + [ + 1, + 266 + ] + ] + }, + { + "filename": "tests/models/deimv2/test_modeling_deimv2.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 1734 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43656-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43656, + "issue_numbers": [ + 43824 + ], + "pr_numbers": [ + 43656, + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43656, + "right_pr_number": 43836, + "code_similarity": 0.36, + "size_similarity": 0.176, + "file_overlap": 1.0, + "area_overlap": 0.051, + "patch_similarity": 0.048, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ] + } + ] + }, + { + "left_pr_number": 43656, + "right_pr_number": 43842, + "code_similarity": 0.405, + "size_similarity": 0.5, + "file_overlap": 1.0, + "area_overlap": 0.0, + "patch_similarity": 0.036, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + }, + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45081-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45081, + "canonical_pr_number": 45317, + "issue_numbers": [ + 45081 + ], + "pr_numbers": [ + 45086, + 45317 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45086, + "right_pr_number": 45317, + "code_similarity": 0.589, + "size_similarity": 0.34, + "file_overlap": 0.5, + "area_overlap": 1.0, + "patch_similarity": 0.136, + "shared_filenames": [ + "src/transformers/tokenization_utils_tokenizers.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/tokenization_utils_tokenizers.py", + "left_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ], + "right_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43698-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43698.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43698, + "canonical_pr_number": 43779, + "issue_numbers": [ + 43698 + ], + "pr_numbers": [ + 43779, + 43816 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43779, + "right_pr_number": 43816, + "code_similarity": 0.418, + "size_similarity": 0.538, + "file_overlap": 1.0, + "area_overlap": 0.02, + "patch_similarity": 0.02, + "shared_filenames": [ + "src/transformers/integrations/integration_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/integration_utils.py", + "left_ranges": [ + [ + 2227, + 2241 + ], + [ + 2303, + 2309 + ] + ], + "right_ranges": [ + [ + 2278, + 2291 + ], + [ + 2309, + 2322 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45561-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45561.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45561, + "canonical_pr_number": 45639, + "issue_numbers": [ + 45561 + ], + "pr_numbers": [ + 45639, + 45645 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45639, + "right_pr_number": 45645, + "code_similarity": 0.532, + "size_similarity": 0.619, + "file_overlap": 0.5, + "area_overlap": 0.543, + "patch_similarity": 0.451, + "shared_filenames": [ + "src/transformers/testing_utils.py", + "tests/utils/test_testing_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/testing_utils.py", + "left_ranges": [ + [ + 3529, + 3536 + ], + [ + 3753, + 3787 + ] + ], + "right_ranges": [ + [ + 3525, + 3558 + ], + [ + 3782, + 3788 + ] + ] + }, + { + "filename": "tests/utils/test_testing_utils.py", + "left_ranges": [ + [ + 1, + 86 + ] + ], + "right_ranges": [ + [ + 1, + 114 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] + } + ], + "duplicate_issues": [], + "duplicate_prs": [ + { + "cluster_id": "cluster-41211-3", + "canonical_pr_number": 44339, + "duplicate_pr_numbers": [ + 41356 + ], + "target_issue_number": 41211, + "reason": "PRs in cluster-41211-3 are treated as duplicates because they converge on issue #41211 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43240-3", + "canonical_pr_number": 43251, + "duplicate_pr_numbers": [ + 43254 + ], + "target_issue_number": 43240, + "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43656-4", + "canonical_pr_number": 43656, + "duplicate_pr_numbers": [ + 43836, + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-43656-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-43698-3", + "canonical_pr_number": 43779, + "duplicate_pr_numbers": [ + 43816 + ], + "target_issue_number": 43698, + "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43979-11", + "canonical_pr_number": 44007, + "duplicate_pr_numbers": [ + 43996, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "target_issue_number": 43979, + "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, + { + "cluster_id": "cluster-45081-3", + "canonical_pr_number": 45317, + "duplicate_pr_numbers": [ + 45086 + ], + "target_issue_number": 45081, + "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-45561-3", + "canonical_pr_number": 45639, + "duplicate_pr_numbers": [ + 45645 + ], + "target_issue_number": 45561, + "reason": "PRs in cluster-45561-3 are treated as duplicates because they converge on issue #45561 with closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "best_issue": { + "cluster_id": "cluster-41211-3", + "issue_number": 41211, + "reason": "Issue #41211 is the strongest global issue candidate because it is open, belongs to a cluster with 3 artifacts, and carries 6 discussion comments plus 6 inbound references.", + "score": 148.0 + }, + "best_pr": { + "cluster_id": "cluster-43979-11", + "pr_number": 44007, + "reason": "PR #44007 is the strongest global PR candidate because it is open, belongs to a cluster with 11 artifacts, links to 1 issues, and carries 2 review/discussion events.", + "score": 238.886 + } +} diff --git a/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.llm-reviews.json b/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.llm-reviews.json new file mode 100644 index 0000000000000000000000000000000000000000..70a202f8787d9d6c96a588ac653317f1aaf799fd --- /dev/null +++ b/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.llm-reviews.json @@ -0,0 +1,17757 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260425T120019Z", + "generated_at": "2026-04-25T12:06:34Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "5261026724ccac7c81c613ac10f734b77b78697cd1065111eb9eb96e35682f9a", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are not a cohesive duplicate set: one is about Tapas tokenization, one about a private-model auth/download error, and one about a generic model-loading failure. The only somewhat related pair is the two loading-related issues, but they still appear to describe different failure modes.", + "confidence": 0.35, + "canonical_issue_reason": "Issue #16618 is the best representative only because it is the broadest and most central to model-loading behavior, with the strongest discussion signal among the three.", + "canonical_pr_reason": null, + "best_issue_reason": "#16618 is the clearest cluster anchor: it is the most general model-loading report and is more representative than the highly specific tokenization issue or the private-download auth error.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Tapas tokenization mismatch is a different problem from a failure to load bert-base-uncased; they do not look like the same underlying bug." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "Both involve model loading/download, but one is specifically about unauthenticated access to private models and the other is a generic 'can't load the model' report; too different to treat as the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34634", + "issue:34928", + "issue:35545", + "issue:36010", + "issue:36331", + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:42915", + "issue:43039", + "issue:43296", + "issue:43366", + "issue:43404", + "issue:43425", + "issue:43531", + "issue:43541", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43792", + "issue:43827", + "issue:43828", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:43975", + "issue:44292", + "issue:44322", + "issue:44457", + "issue:44509", + "issue:44512", + "issue:44560", + "issue:44661", + "issue:44805", + "issue:44841", + "issue:44863", + "issue:44918", + "issue:45070", + "issue:45081", + "issue:45084", + "issue:45161", + "issue:45237", + "issue:45362", + "issue:45464", + "issue:45507" + ], + "soft_pairs": [ + "issue:44322|issue:45464", + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:45237|issue:45507", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:35545|issue:36010", + "issue:43638|issue:44805", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918", + "issue:44661|issue:44805", + "issue:30277|issue:43638", + "issue:34634|issue:35545", + "issue:43596|issue:43638", + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43975|issue:45237", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:43716|issue:45507", + "issue:39290|issue:45081", + "issue:43828|issue:45161", + "issue:43425|issue:44292", + "issue:44918|issue:45161", + "issue:34928|issue:41720", + "issue:43541|issue:43716", + "issue:43828|issue:44292", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:43039|issue:44918", + "issue:41762|issue:43638", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43716|issue:44918", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805", + "issue:34928|issue:36010", + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:39290|issue:41720", + "issue:43039|issue:44560", + "issue:43866|issue:44863", + "issue:43854|issue:43866", + "issue:43425|issue:45070", + "issue:41093|issue:43638", + "issue:39290|issue:43531", + "issue:43404|issue:43866", + "issue:45362|issue:45464", + "issue:43792|issue:45070", + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762", + "issue:43296|issue:43366" + ], + "prepared_review_unit_hash": "be3ae08a2bf4057898a5d5072f602548ad6ce94141ebb4d78562a0c18088b8bb", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 63, + "serialized_chars": 46481, + "estimated_input_tokens": 11621, + "estimated_eval_tokens": 23498 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: many unrelated model/config/docs regressions were grouped by loose textual similarity. The only clear duplicate pair is the DeepSpeed ZeRO-3 BERT index-out-of-bounds report (43596/43638).", + "confidence": 0.91, + "canonical_issue_reason": "issue:43596 is the best canonical issue because it is the clearest representative of the only true duplicate pair in the set, with issue:43638 describing the same DeepSpeed ZeRO-3 BERT index-0-out-of-bounds failure.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43596 is the most suitable global issue representative: it is concrete, reproducible, and its duplicate (43638) confirms the same underlying bug. The rest of the cluster is too mixed to serve as a single canonical topic.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44322", + "right": "issue:45464", + "accept": false, + "reason": "Both are Qwen3.5-related, but one is a missing config field and the other is a streaming chat/completions failure; different bugs." + }, + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Different models and different failure contexts; both mention model/GPU issues but not the same underlying bug." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "Both are runtime dtype/GPU-adjacent errors, but on different model families and code paths." + }, + { + "left": "issue:45237", + "right": "issue:45507", + "accept": false, + "reason": "Different problems: AMD GPU execution failure vs invalid method call in GraniteMoEHybrid." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 error and GPT-OSS AMD GPU failure are unrelated." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "DeepSpeed collective mismatch vs ONNX export error; no shared concrete bug." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "Different model/export failure modes: ONNX export vs FineGrainedFP8Config load failure." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "ONNX export error and auto device mapping cudaErrorAssert are unrelated." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs issues for v5 removals, but they reference different removed features and not the same concrete change." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "ONNX export and GenerationMixin import error are separate regressions." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Both are index/shape errors, but one is DeepSpeed ZeRO-3 BERT loading and the other is a mask-shape mismatch; too different to merge." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "Different error signatures and code paths; only the DeepSpeed BERT pair is a true duplicate." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "Mask/tensor shape mismatch vs GPT-OSS tensor-parallel failure are not the same bug." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Both involve tensor-shape-like failures, but the Qwen3.5 SFT issue is distinct from the mask index error." + }, + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Tokenizer mapping/name-generation bug vs mask shape mismatch are unrelated." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "Different DeepSpeed-related failures with different models and symptoms." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "BarkProcessor voice preset issue is unrelated to ONNX export." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "Same concrete DeepSpeed ZeRO-3 bug: index 0 out of bounds during BERT loading/non-pretrained BERT initialization." + }, + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Both mention detokenization / LoRA, but they are distinct language/model-weight issues." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Liger Kernel cross_entropy routing and GPT-OSS tensor-parallel support are unrelated." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime failure and LoRA merge/save mismatch are different bugs." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Activation-checkpointing/FSDP recompute mismatch is not the same as a mask-shape indexing error." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Both involve DeepSpeed, but the failures are different: weight dimensionality vs collective mismatch." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "Unrelated problems: detokenization bug vs AMD GPU model loading failure." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "DeepSpeed BERT index error and GPT-OSS tensor-parallel issue are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "FSDP activation checkpointing tensor mismatch vs CustomTrainer API signature change are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "Different models and different failure modes." + }, + { + "left": "issue:43716", + "right": "issue:45507", + "accept": false, + "reason": "Both are model-related, but one is dtype mismatch and the other is an invalid method call." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Qwen3/Gemma sliding-window config problems and tokenizer regex patch crash are different bugs." + }, + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Autocast dtype mismatch in Phi-tiny-MoE is unrelated to GPT-OSS tensor-parallel failure." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "TRL SFT embedding unpacking failure and GPT-OSS TP issue are different code paths." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "FSDP/activation-checkpointing recompute mismatch is unrelated to device-mapping cudaErrorAssert." + }, + { + "left": "issue:43827", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs regressions from v5 removals, but they concern different removed items and should stay separate." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Different models and different symptoms: image-preprocessor dtype mismatch vs video StopIteration." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Dtype mismatch and embedding unpacking failure are not the same underlying bug." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Liger Kernel cross-entropy issue and autocast dtype mismatch are separate regressions." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch and trainer API change are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Liger Kernel cross_entropy call and mask-shape indexing error are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "FSDP checkpointing mismatch vs GenerationMixin import failure are unrelated." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Gemma3 DeepSpeed ZeRO-3 load failure and BERT ZeRO-3 index error are different model-specific bugs." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Docs about return_all_scores and docs about the removed run command are different documentation issues." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Different model families and different runtime failures." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Corrupted checkpoint vs model implementation inability to load checkpoints are not the same issue." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "Unit-test load failure for GLM-4.7-Flash is unrelated to Ovis2 checkpoint corruption." + }, + { + "left": "issue:43425", + "right": "issue:44292", + "accept": false, + "reason": "Torch version compatibility and Qwen NVFP4 runtime failure are unrelated." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Both have index/shape errors, but the masked-tensor mismatch and the DeepSpeed BERT zero-size index error are distinct." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Different sliding-window-related config bugs on different model families; not the same concrete defect." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Untied lm_head weights and checkpoint corruption are unrelated." + }, + { + "left": "issue:45362", + "right": "issue:45464", + "accept": false, + "reason": "Both are Qwen3.5 issues, but one is chat-template crash and the other is streaming chat/completions failure." + }, + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Whisper model runtime failure and pydantic PretrainedConfig field breakage are unrelated." + }, + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Grouped_mm tracing failure in Mixtral and autocast dtype mismatch in Phi-tiny-MoE are different code paths." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "GenerationMixin import regression and Gemma3 sliding-window attribute error are unrelated." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Processor failure for Voxtral and template-node compilation error are different bugs." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Similar IndexError wording, but different models and likely different tensor shapes/causes." + }, + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "PaddleOCR-VL load failure in vLLM and GGUF gpt-oss support are distinct issues." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet and stays conservative. The only accepted soft edge (43596/43638) is clearly supported by matching DeepSpeed ZeRO-3 index-out-of-bounds reports on BERT, and the other pairings are appropriately rejected without overclaiming. The heterogeneity assessment is also consistent with the titles and available evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 2, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:36331", + "issue:39186", + "issue:39290", + "issue:39692", + "issue:40990", + "issue:41093", + "issue:41553", + "issue:41762", + "issue:42915", + "issue:43054", + "issue:43366", + "issue:43404", + "issue:43531", + "issue:43541", + "issue:43572", + "issue:43596", + "issue:43644", + "issue:43645", + "issue:43742", + "issue:43828", + "issue:43866", + "issue:43873", + "issue:43883", + "issue:43950", + "issue:43975", + "issue:43994", + "issue:44291", + "issue:44292", + "issue:44360", + "issue:44387", + "issue:44403", + "issue:44492", + "issue:44512", + "issue:44530", + "issue:44534", + "issue:44560", + "issue:44589", + "issue:44661", + "issue:44863", + "issue:44918", + "issue:44928", + "issue:44960", + "issue:45005", + "issue:45070", + "issue:45161", + "issue:45399", + "issue:45464", + "issue:45507" + ], + "soft_pairs": [ + "issue:43572|issue:45070", + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:43828|issue:45464", + "issue:44291|issue:45070", + "issue:43366|issue:45464", + "issue:43531|issue:45070", + "issue:44661|issue:45464", + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:43541|issue:44589", + "issue:41093|issue:43596", + "issue:42915|issue:43541", + "issue:40990|issue:44960", + "issue:39186|issue:44918", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005", + "issue:43873|issue:45005", + "issue:44360|issue:44512", + "issue:42915|issue:45399", + "issue:44918|issue:45507", + "issue:44492|issue:44512", + "issue:43975|issue:44292", + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:44292|issue:45507", + "issue:39692|issue:43054", + "issue:43828|issue:44928", + "issue:41553|issue:43883", + "issue:43866|issue:44403", + "issue:44560|issue:45507", + "issue:36331|issue:41762" + ], + "prepared_review_unit_hash": "d9988ee5495cddd227294a48716725dc65d6bb305c7964d95ea3544d053aa32b", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37571, + "estimated_input_tokens": 9393, + "estimated_eval_tokens": 19042 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated issue reports. The only clear duplicate group is the Transformers 5.x non-persistent-buffer corruption regression: #43644, #43950, and #44534 are the same underlying bug. The rest are only loosely similar by model family, API area, or v5 regression theme and should stay separate.", + "confidence": 0.91, + "canonical_issue_reason": "#43950 is the clearest representative of the shared non-persistent-buffer regression: it names the failure mode directly and matches the duplicate chain with #43644 and #44534.", + "canonical_pr_reason": null, + "best_issue_reason": "#43950 is the best overall issue to anchor this cluster because it most precisely describes the shared bug and is the most explicit of the duplicate reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "Both are v5/config regressions, but one is StableLm pad_token_idx and the other is a generic PretrainedConfig/pydantic field break; different bugs." + }, + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "Different failures: int4 quantization/OOM versus tied-weights behavior on translation models." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Both mention MoE/GPT-OSS-style models, but one is a grouped_mm tracing error and the other is a tensor-parallel setup issue." + }, + { + "left": "issue:43828", + "right": "issue:45464", + "accept": false, + "reason": "Unrelated symptoms and code paths: autocast dtype mismatch versus streaming chat/completions failure." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "Both touch v5 initialization/config internals, but one is init_empty_weights/_is_hf_initialized and the other is a pydantic PretrainedConfig field regression." + }, + { + "left": "issue:43366", + "right": "issue:45464", + "accept": false, + "reason": "Different architectures and failures; one is GGUF gpt-oss support, the other is Qwen3.5 streaming inference." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is a different bug from the PretrainedConfig/pydantic regression." + }, + { + "left": "issue:44661", + "right": "issue:45464", + "accept": false, + "reason": "Tokenizer mapping/add-new-model-like issue is unrelated to the Qwen3.5 streaming API failure." + }, + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Different models and stack traces; Gemma3 vLLM attribute error is not the same as MobileLLM key error." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both are SigLIP2-related, but one reports degraded embeddings and the other reports nonsensical AutoModel/pipeline outputs; too little evidence of the same root cause." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Different MoE/quantization/runtime failures; no shared concrete code path." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Both are IndexErrors, but they occur in different model-loading paths and don't indicate the same bug." + }, + { + "left": "issue:42915", + "right": "issue:43541", + "accept": false, + "reason": "Different MoE issues: FineGrainedFP8Config versus torch dynamo grouped_mm tracing failure." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "Unrelated model/perplexity report versus a generic GLM5 issue title." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP weight-shape error is a different training-time failure from TRL unpacking embeddings." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "Both involve tied weights, but the affected models and failure modes are different; not clearly the same bug." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Notebook custom-model initialization and flash-attn fallback blocking are unrelated." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash is unrelated to tied-weight regressions." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint loading and tied-weights translation regression are different issues." + }, + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Quantization/offloading behavior is not the same bug as tied weights." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "A DSA indexer bug and a docs typo about the removed run command are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "FineGrainedFP8Config on Qwen3-MoE is a different problem from flash-attn2 fallback logic." + }, + { + "left": "issue:44918", + "right": "issue:45507", + "accept": false, + "reason": "Different model families and failures: Qwen3.5 TRL unpacking versus GraniteMoEHybrid invalid method call." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "Both are documentation-ish, but they target different content and do not represent the same change." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "Different models and different symptoms; detokenization bug versus NVFP4 runtime error." + }, + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Same Transformers 5.x non-persistent-buffer corruption regression, just described with different wording." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Same underlying bug: Transformers 5.x corrupts non-persistent buffers during load/save behavior." + }, + { + "left": "issue:44292", + "right": "issue:45507", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime error and GraniteMoEHybrid invalid-method call are unrelated." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "Both mention SigLIP2, but one is a broken documentation example and the other is a model quality complaint." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Different models and different failures; dtype mismatch under autocast versus Qwen3.5 RLHF gradient explosion." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Bad AutoTokenizer error messaging for Voxtral is unrelated to Molmo tied-weights attribute failure." + }, + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "Checkpoint corruption and noisy loading output are not the same bug." + }, + { + "left": "issue:44560", + "right": "issue:45507", + "accept": false, + "reason": "Qwen3-vl video StopIteration and GraniteMoEHybrid invalid method call are different code paths." + }, + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "CustomTrainer unexpected-argument TypeError and Gemma3 ZeRO-3 IndexError are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is conservative and grounded in the packet. The only clearly supported duplicate set is the Transformers 5.x non-persistent-buffer regression around #43644, #43950, and #44534, and the other pairwise rejections stay on the safe side of the evidence. No overclaiming stands out." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 3, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:38175", + "issue:41553", + "issue:41762", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43054", + "issue:43257", + "issue:43278", + "issue:43404", + "issue:43493", + "issue:43643", + "issue:43742", + "issue:43782", + "issue:43792", + "issue:43824", + "issue:43827", + "issue:43854", + "issue:43856", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:43940", + "issue:43950", + "issue:43975", + "issue:44220", + "issue:44291", + "issue:44368", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44509", + "issue:44589", + "issue:44661", + "issue:44863", + "issue:44928", + "issue:44960", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45305", + "issue:45313", + "issue:45356", + "issue:45399", + "issue:45520", + "issue:45588" + ], + "soft_pairs": [ + "issue:41553|issue:43881", + "issue:43872|issue:44589", + "issue:38175|issue:43054", + "issue:42947|issue:44387", + "issue:43054|issue:43493", + "issue:43881|issue:43883", + "issue:44403|issue:44451", + "issue:43975|issue:45520", + "issue:43643|issue:45020", + "issue:42947|issue:43856", + "issue:43782|issue:44960", + "issue:44403|issue:44488", + "issue:42491|issue:44368", + "issue:41762|issue:42915", + "issue:43975|issue:44451", + "issue:45020|issue:45356", + "issue:44661|issue:45020", + "issue:43054|issue:43278", + "issue:43824|issue:43872", + "issue:44991|issue:45399", + "issue:43827|issue:44509", + "issue:43792|issue:44220", + "issue:45081|issue:45399", + "issue:43404|issue:43950", + "issue:43257|issue:43866", + "issue:42915|issue:45020", + "issue:43742|issue:45399", + "issue:41553|issue:44991", + "issue:43742|issue:45020", + "issue:43940|issue:45313", + "issue:43872|issue:44291", + "issue:43854|issue:44863", + "issue:44928|issue:45588", + "issue:41553|issue:44488", + "issue:42915|issue:43872", + "issue:43257|issue:45305" + ], + "prepared_review_unit_hash": "429445cb02b3bf45d9726756c2d41c585d01862f6dd72e4c4bd0dc4ac17d42b3", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38599, + "estimated_input_tokens": 9650, + "estimated_eval_tokens": 19556 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose set of Transformers 5.x regressions around model/tokenizer loading and a few documentation/reporting issues. The only clear duplicate subgroups are the SigLIP2 discrepancy reports and the pipeline-docs regression; most other pairs are only superficially similar and should stay separate.", + "confidence": 0.46, + "canonical_issue_reason": "issue:45020 is the broadest and most representative open issue in the set: it captures the recurring theme of recent-version regressions affecting remote_code/model loading.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 best represents the cluster\u2019s main user-facing failure mode and is the most useful umbrella issue to anchor related reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Both are loading failures, but they involve different models and different failure modes; not the same bug." + }, + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Different root causes: bitsandbytes init kwargs vs missing torch storage class." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": true, + "reason": "Both report SigLIP2 output/quality regressions and point to the same underlying implementation discrepancy." + }, + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both are memory-related, but one is about LoRA/gradient checkpointing behavior and the other about int4 quantization OOM." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": true, + "reason": "Both concern SigLIP2 behaving differently from the expected/original implementation, so they look like the same underlying bug." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different model families and different crashes; no shared concrete bug." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "Generic loading noise vs a specific model load failure are not the same issue." + }, + { + "left": "issue:43975", + "right": "issue:45520", + "accept": false, + "reason": "Tokenizer detokenization regression and a Python 3.13 import_utils KeyError are unrelated." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "43643 is a specific AutoConfig trust_remote_code field-loss bug; 45020 is a broad umbrella for remote_code breakages, not the same concrete defect." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Different Qwen3-related training/memory problems with different symptoms and likely different code paths." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different model families and different loading failures; no concrete overlap." + }, + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "One is a generic log-noise complaint, the other is a specific model loading failure." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "The LoRA compatibility regression and the tie_word_embeddings warning are related to Qwen3 fine-tuning, but not the same bug." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Different models and different DeepSpeed/FP8 failure modes." + }, + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "Different user-visible regressions: detokenization vs model loading." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Broad remote_code regressions vs a specific tokenizer codec/fix_mistral_regex regression are not the same defect." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "add-new-model-like / TOKENIZER_MAPPING_NAMES failure is a separate registry bug, not the same as remote_code loading breakage." + }, + { + "left": "issue:43054", + "right": "issue:43278", + "accept": false, + "reason": "SigLIP2 embedding quality and BF16->FP32 dtype drift are different symptoms with different likely causes." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "Importing Qwen2.5-VL and bitsandbytes Int8Params initialization are unrelated." + }, + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer loading for est-roberta and flash-attn fallback gating are unrelated." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Both are the same documentation regression: stale pipeline task references after v5 removals." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "Whisper failing to run and a low-level fbank feature helper issue are not clearly the same bug from the available evidence." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Mistral regex patch crash and flash-attn fallback gating are different code paths." + }, + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "Tied lm_head weights and non-persistent buffer corruption are distinct model state issues." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed and an Ovis2 checkpoint corruption report are different problems." + }, + { + "left": "issue:42915", + "right": "issue:45020", + "accept": false, + "reason": "FineGrainedFP8Config failure is a specific Qwen3 MoE training issue, not the same as broad remote_code regressions." + }, + { + "left": "issue:43742", + "right": "issue:45399", + "accept": false, + "reason": "MobileLLM config/key loading and flash-attn fallback gating are unrelated." + }, + { + "left": "issue:41553", + "right": "issue:44991", + "accept": false, + "reason": "Tokenizer error messaging and est-roberta tokenizer loading failure are different bugs." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "Different failure classes: config/key error vs broad remote_code breakage." + }, + { + "left": "issue:43940", + "right": "issue:45313", + "accept": false, + "reason": "Both are Qwen3 ZeRO-3 loading failures, but they affect different model variants and the available evidence is not enough to treat them as the same concrete bug." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": false, + "reason": "Both mention _is_hf_initialized / init_empty_weights style loading, but the reported failure modes differ enough to keep them separate." + }, + { + "left": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "GLM-4.7 test loading and NemotronH checkpoint loading are distinct model-specific failures." + }, + { + "left": "issue:44928", + "right": "issue:45588", + "accept": false, + "reason": "RLHF gradient explosion from SDPA/BF16 behavior and a flash_attention.py s_aux crash are unrelated." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Different models, different tokenizer/loading failures." + }, + { + "left": "issue:42915", + "right": "issue:43872", + "accept": false, + "reason": "DeepSpeed/FP8 Qwen3 training failure and bitsandbytes Int8Params construction error are unrelated." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Both involve Qwen3 and distributed training, but one is MoE weight loading and the other is gradient averaging with GAS." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet, and the soft-edge calls are generally conservative. The accepted duplicate groupings (SigLIP2 reports and the pipeline-docs regression) are reasonably supported, while the rejections avoid over-claiming shared root cause. No PR-mergeability issue applies here since these are issue pairs." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 4, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:41093", + "issue:42491", + "issue:42617", + "issue:42915", + "issue:43317", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43596", + "issue:43638", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43720", + "issue:43824", + "issue:43828", + "issue:43872", + "issue:43950", + "issue:43957", + "issue:43994", + "issue:44155", + "issue:44479", + "issue:44530", + "issue:44534", + "issue:44560", + "issue:44589", + "issue:44661", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44910", + "issue:44918", + "issue:44960", + "issue:44977", + "issue:45030", + "issue:45313", + "issue:45325", + "issue:45357", + "issue:45538", + "issue:45584" + ], + "soft_pairs": [ + "issue:36331|issue:41093", + "issue:41093|issue:44805", + "issue:43872|issue:45538", + "issue:44479|issue:44560", + "issue:44530|issue:44863", + "issue:43366|issue:43828", + "issue:24643|issue:30064", + "issue:44155|issue:45584", + "issue:43366|issue:43531", + "issue:43550|issue:44910", + "issue:43645|issue:43646", + "issue:43950|issue:45325", + "issue:44560|issue:44918", + "issue:43366|issue:44589", + "issue:43824|issue:43957", + "issue:42915|issue:43957", + "issue:42617|issue:43366", + "issue:38175|issue:43994", + "issue:43646|issue:43950", + "issue:44560|issue:44805", + "issue:36010|issue:42915", + "issue:43638|issue:44661", + "issue:44877|issue:45030", + "issue:43596|issue:44661", + "issue:43317|issue:44756", + "issue:43644|issue:44534", + "issue:45313|issue:45357", + "issue:38175|issue:39692", + "issue:42491|issue:43720", + "issue:44960|issue:44977" + ], + "prepared_review_unit_hash": "bef9b2e3f24c8f0879e639a11fadb4042e980950897c97ce9c0dea0fa2042104", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36154, + "estimated_input_tokens": 9039, + "estimated_eval_tokens": 18334 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated issues with two real duplicate candidates: the Transformers 5.0 custom model initialization regression, and the non-persistent buffer corruption regression. The rest are only broadly similar by subsystem or error shape and should stay separate.", + "confidence": 0.88, + "canonical_issue_reason": "Issue 43644 is the cleanest canonical issue in this set because it captures a concrete regression directly duplicated by 44534, with a clear symptom and the strongest duplicate signal.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43644 is the best representative issue here: it is the most exact, self-contained bug report among the true duplicates and can anchor triage better than the more specific or loosely related reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "Different failures: custom trainer loss signature mismatch vs. a mask/tensor shape IndexError. Same training area, but not the same bug." + }, + { + "left": "issue:41093", + "right": "issue:44805", + "accept": false, + "reason": "Both mention a generic mask-shape mismatch, but the reports are too underspecified to confirm the same underlying code path or model/repro." + }, + { + "left": "issue:43872", + "right": "issue:45538", + "accept": false, + "reason": "Completely different problems: bitsandbytes Int8Params initialization vs. CLIPTokenizer max_length handling." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": false, + "reason": "Both are Qwen video-related regressions, but they affect different model families and have different failure modes (video input regression vs StopIteration)." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "Unrelated: PagedAttentionCache linear_attention crash vs. NemotronH checkpoint loading failure." + }, + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "Different areas and symptoms: GGUF/gpt-oss support vs. autocast dtype mismatch in Phi-tiny-MoE." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "Unrelated: DeepSpeed weight dimensionality error vs. image processor void segmentation map handling." + }, + { + "left": "issue:44155", + "right": "issue:45584", + "accept": false, + "reason": "Different models and code paths: AudioFlamingo3 batch leakage vs. Whisper empty-transcription generation after token alignment." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "Both touch Qwen/large-model support, but one is GGUF gpt-oss support and the other is a sliding_window bug in Qwen3-MoE." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different concrete bugs: torch.compile+SDPA on Bamba vs. Flash Attention illegal memory access from 3D position_ids on Qwen3.5." + }, + { + "left": "issue:43645", + "right": "issue:43646", + "accept": true, + "reason": "Same underlying regression: custom model initialization breaks in Transformers 5.0.0; the notebook report is just a specific repro of the broader custom-model init failure." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Different bugs: non-persistent buffer corruption in from_pretrained vs. Qwen2.5-VL rope index scaling for still images." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "Both involve Qwen3.5/video or embeddings, but one is a video StopIteration regression and the other is a TRL SFT embedding-unpacking failure." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "Unrelated: GGUF gpt-oss support vs. missing Float8 storage class error." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Both are loading/import failures, but one is a missing Qwen2.5-VL symbol and the other is a meta-device loading regression for some models." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Different problems: Qwen3Moe FineGrainedFP8Config failure vs. meta-device loading regression." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "Not the same bug: 3D parallel script execution failure vs. GGUF gpt-oss support." + }, + { + "left": "issue:38175", + "right": "issue:43994", + "accept": false, + "reason": "Same model family, but the reports are not specific enough to prove the same underlying issue; one is zero probabilities, the other is nonsensical outputs with AutoModel/pipeline." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": false, + "reason": "Both are Transformers 5 regressions, but one is custom model initialization and the other is non-persistent buffer corruption; different code paths." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Different bugs despite similar-looking error text: Qwen3-vl-embedding video StopIteration vs. a generic mask-shape IndexError report." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "Unrelated: GenerationMixin import failure vs. Qwen3Moe FP8 config failure." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different loading/config issues: deepspeed zero3 Bert index error vs. add-new-model-like failing with TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": false, + "reason": "Both involve config validation, but they affect different model families and different failure conditions." + }, + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "Unrelated: zero3 Bert index-out-of-bounds vs. tokenizer mapping/add-new-model-like logic." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "Different symptoms and likely root causes: gpu/cpu offload loading failure vs. Strix Halo mmap OOM avoidance." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": true, + "reason": "Same underlying regression with effectively identical symptom: Transformers v5 fills non-persistent buffers with junk; 44534 is a duplicate phrasing of the same bug." + }, + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen3.5 issues, but one is ZeRO-3 weight loading and the other is save_pretrained writing wrong visual encoder keys." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "Related SigLIP2 area, but not the same bug: zero probabilities vs. a documentation example with multiple errors including model/processor mismatch and quantization failure." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "Different bugs: LoRA checkpoint compatibility across branches vs. packed-weight handling during accelerate loading." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Unrelated model families and failures: GLM5 vs. Qwen3.5 flash-attention generation issues." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The conclusions are conservative and supported by the packet. The two accepted pairs are strongly title-aligned, especially 43644\u219444534, and the rejections avoid overclaiming where the evidence is only thematic or underspecified. The canonical/best-issue choice is also grounded in the clearest duplicate signal." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 5, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:39692", + "issue:41720", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43257", + "issue:43381", + "issue:43404", + "issue:43425", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44075", + "issue:44155", + "issue:44164", + "issue:44291", + "issue:44292", + "issue:44387", + "issue:44402", + "issue:44451", + "issue:44488", + "issue:44661", + "issue:44683", + "issue:44743", + "issue:44756", + "issue:44910", + "issue:44912", + "issue:44928", + "issue:44960", + "issue:44991", + "issue:45216", + "issue:45276", + "issue:45292", + "issue:45335", + "issue:45362", + "issue:45406", + "issue:45588" + ], + "soft_pairs": [ + "issue:44451|issue:44488", + "issue:43708|issue:44075", + "issue:43854|issue:43931", + "issue:43856|issue:43873", + "issue:42491|issue:45276", + "issue:41720|issue:42915", + "issue:43257|issue:43854", + "issue:39692|issue:43994", + "issue:44488|issue:44991", + "issue:45362|issue:45406", + "issue:43856|issue:44387", + "issue:43644|issue:43645", + "issue:42915|issue:44756", + "issue:44661|issue:44683", + "issue:42491|issue:43931", + "issue:44910|issue:45588", + "issue:43404|issue:43454", + "issue:42491|issue:43856", + "issue:45276|issue:45335", + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:44402|issue:44488", + "issue:41720|issue:44155", + "issue:43873|issue:44387", + "issue:39692|issue:44960", + "issue:43425|issue:43957", + "issue:44387|issue:44756", + "issue:43854|issue:45216", + "issue:42947|issue:43381", + "issue:44292|issue:44912", + "issue:44291|issue:44756", + "issue:44743|issue:44960", + "issue:45276|issue:45292", + "issue:43381|issue:44928", + "issue:43957|issue:44756", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:45292|issue:45335" + ], + "prepared_review_unit_hash": "555da00e5d82820ebe83c154f72b0d6b407336d7d68e61822877c6e3c9384e56", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 38, + "serialized_chars": 38401, + "estimated_input_tokens": 9601, + "estimated_eval_tokens": 19458 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a collection of unrelated Transformers issues. The only clear duplicate subclusters are the resize_token_embeddings/output_embeddings reports and the missing lm_head weight-tying reports.", + "confidence": 0.63, + "canonical_issue_reason": "issue:45292 is the best representative because it states the underlying resize_token_embeddings/output_embeddings bug in the most general way and is directly supported by two very similar follow-on reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45292 is the strongest global issue candidate: it is general, centrally positioned in the only obvious duplicate subcluster, and its title matches the shared failure mode more broadly than the model-specific variants.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44451", + "right": "issue:44488", + "accept": true, + "reason": "Both report the same model-loading regression: current/latest Transformers versions fail to load specific BERT models." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Completely different bugs: checkpoint resumption/max_steps vs SGD optimizer arguments." + }, + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading failures, but for different models and different failure modes; no clear same underlying bug." + }, + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "One is MoE training memory usage, the other is quantization/offloading behavior; too broad to be the same bug." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Different problems: a qwen3_moe LoRA compatibility issue vs resize_token_embeddings not updating outputs." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Both mention Qwen3, but the failures are different: device mapping CUDA assert vs FineGrainedFP8 loading." + }, + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Unrelated model families and failure modes." + }, + { + "left": "issue:39692", + "right": "issue:43994", + "accept": false, + "reason": "Related model family, but one is a docs example with multiple issues and the other is a pipeline/AutoModel inference regression." + }, + { + "left": "issue:44488", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer/model loading complaints, but for different models and likely different root causes." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different subsystems and symptoms: chat crash vs transformers serve processor attribute error." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "Both involve memory, but one is training inefficiency and the other is int4 quantization OOM." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "Both are Transformers 5.0 regressions, but the specific bugs are distinct." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "Different issues: Qwen3Moe FP8 loading vs Strix Halo mmap OOM." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "No shared concrete bug; one is tokenizer mapping validation, the other is compiled flex_attention on new torch." + }, + { + "left": "issue:42491", + "right": "issue:43931", + "accept": false, + "reason": "Different model-specific load/compatibility problems." + }, + { + "left": "issue:44910", + "right": "issue:45588", + "accept": false, + "reason": "Both hit flash_attention, but one is a Qwen3.5 packed-sequence misparse and the other is an s_aux=None crash for sink-less models." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": true, + "reason": "Both describe the same underlying issue class: lm_head weights are not tied, causing bad generation in multimodal models." + }, + { + "left": "issue:42491", + "right": "issue:43856", + "accept": false, + "reason": "LoRA compatibility on a model-family/version boundary is unrelated to Qwen3 MoE memory usage." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": true, + "reason": "Same resize_token_embeddings bug, just reported for different model variants and output paths." + }, + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "ImportError for GenerationMixin is unrelated to Whisper return_language pipeline behavior." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "Both touch loading/saving, but one is extra_state handling and the other is init_empty_weights argument compatibility." + }, + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Tokenizer vocab-size mismatch vs a model-loading failure; not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "Different model paths and symptoms: Qwen3 device mapping vs AudioFlamingo3 batched inference leakage." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Related to quantization and memory, but offloading failure is not the same as increased reserved memory/OOM." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "Different model families and unrelated symptoms." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Torch version incompatibility is not the same as meta-device loading breakage." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "Different performance/memory regressions with no shared concrete code path." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "Different bugs: test-time model loading failure vs incorrect checkpoint saving in Qwen3.5." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Both mention gradient checkpointing, but one is ineffectiveness with PEFT LoRA and the other is an eval-mode restriction." + }, + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Different quantization formats and different models; not the same underlying defect." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "init_empty_weights TypeError and mmap OOM are unrelated." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "Different model implementations and failure modes." + }, + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "Same resize_token_embeddings/output_embeddings bug; 45292 is the generic formulation." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Both involve training behavior, but gradient checkpointing in eval mode is unrelated to the Qwen3.5 SDPA/BF16 crash." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Meta-device loading breakage vs mmap OOM are unrelated." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "save/from_pretrained extra_state handling is unrelated to Strix Halo mmap behavior." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom-model init regression vs trainer resume/max_steps bug are not the same issue." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Both are the same resize_token_embeddings bug manifested in different model components." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative: the only clearly supported duplicate subclusters are the resize_token_embeddings/output_embeddings reports and the lm_head weight-tying reports. The accepted soft-edge pairs are plausible, and the rejected pairs avoid overstating shared root causes." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 6, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:32090", + "issue:36032", + "issue:39404", + "issue:40990", + "issue:42947", + "issue:43257", + "issue:43299", + "issue:43421", + "issue:43425", + "issue:43452", + "issue:43582", + "issue:43596", + "issue:43611", + "issue:43632", + "issue:43644", + "issue:43645", + "issue:43716", + "issue:43828", + "issue:43856", + "issue:43883", + "issue:43901", + "issue:43906", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44079", + "issue:44164", + "issue:44265", + "issue:44402", + "issue:44451", + "issue:44479", + "issue:44488", + "issue:44509", + "issue:44530", + "issue:44589", + "issue:44671", + "issue:44683", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44898", + "issue:44912", + "issue:44918", + "issue:44991", + "issue:45081", + "issue:45084", + "issue:45161" + ], + "soft_pairs": [ + "issue:44479|issue:44877", + "issue:44402|issue:44451", + "issue:43645|issue:44164", + "issue:43257|issue:44863", + "issue:43425|issue:43828", + "issue:43828|issue:43957", + "issue:44589|issue:44756", + "issue:43931|issue:44863", + "issue:43906|issue:44530", + "issue:43611|issue:43645", + "issue:39404|issue:44912", + "issue:43425|issue:43716", + "issue:44530|issue:45084", + "issue:43901|issue:44509", + "issue:43299|issue:43931", + "issue:44265|issue:44898", + "issue:44479|issue:44805", + "issue:43994|issue:44079", + "issue:44671|issue:44991", + "issue:43856|issue:44756", + "issue:43425|issue:44918", + "issue:39404|issue:44671", + "issue:40990|issue:42947", + "issue:43596|issue:44683", + "issue:44589|issue:45084", + "issue:36032|issue:43452", + "issue:44991|issue:45081", + "issue:43582|issue:43632", + "issue:32090|issue:39404", + "issue:43957|issue:44164", + "issue:40990|issue:43421", + "issue:43883|issue:44488", + "issue:44164|issue:44479", + "issue:44488|issue:45081", + "issue:43425|issue:45161", + "issue:43611|issue:43644" + ], + "prepared_review_unit_hash": "682631a4c02b9fd01fbb98ddbef4d9956219e81f8ca02cefef009821ccfb29e5", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38485, + "estimated_input_tokens": 9622, + "estimated_eval_tokens": 19500 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly loose similarity around Transformers v5 regressions, tokenizer/model-loading failures, and unrelated doc/runtime issues. I found no soft-edge pair that is clearly the same underlying bug/change under a strict duplicate standard.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43611 is the clearest broad, concrete v5 regression in core model loading and is more central/actionable than the more model-specific or documentation-only reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43611 is the best representative issue for the cluster: it describes a specific loading regression with wide potential impact and a clear code-path compared with the other, narrower reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Different models and failures: Qwen video-input regression vs granite_speech config strictness." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Both are loading-related, but they describe different model-specific tokenizer/config problems." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "Custom model init in notebooks and extra_state save/load handling are different bugs." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Different architectures and failure modes: Qwen3 MoE conversion vs NemotronH checkpoint loading." + }, + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "General Torch 2.10 incompatibility is not the same as Phi-tiny-MoE autocast dtype mismatch." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Autocast dtype mismatch and meta-device loading are distinct code paths." + }, + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Float8 storage lookup error is unrelated to Strix Halo mmap OOM mitigation." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Different model families and problems: Qwen3-VL shape mismatch vs NemotronH checkpoint loading." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "No clear same bug; isolated reproduction of another issue vs PagedAttentionCache linear_attention crash." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both involve loading/init behavior, but one is base_model_prefix handling and the other is notebook-defined custom models." + }, + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline language return regression and GPT-OSS MXFP4 quantization fallback are unrelated." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "Torch version compatibility and Mistral-3 image dtype mismatch are different issues." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "PagedAttentionCache Qwen3.5 crash and template-node compilation error are unrelated." + }, + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Both are docs complaints, but they cover different removed/changed pipeline behaviors." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Same broad Qwen3-VL family, but not the same concrete failure: MoE loading vs weight-shape mismatch." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "Different model classes and failures: torch.export/torch_compilable_check vs Perceiver interpolation." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "Video-input regression and mask shape mismatch are not the same bug." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "Incorrect Siglip2 outputs and ModelOutput None-key assignment are unrelated." + }, + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "CamemBERT prediction regression and tokenizer load failure for est-roberta are separate issues." + }, + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Training memory usage and AppleSilicon mmap OOM are different performance/runtime problems." + }, + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Torch compatibility and Qwen3.5 embedding unpacking in TRL are unrelated." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper pipeline behavior and CamemBERT masked LM predictions do not match." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "Perplexity regression and gradient checkpointing/LoRA effectiveness are different bugs." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "Zero3 init indexing error and compiled flex_attention torch-version failure are unrelated." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "Float8 storage lookup and template compilation errors are different failures." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "Tokenizer method name conflict and gguf_file from_pretrained breakage are unrelated." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer loading failure for est-roberta and Mistral regex patch crash are different code paths." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "AppleSilicon warmup TypeError and _is_hf_initialized flag regression are unrelated." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer GPU broadcast TypeError and Whisper return_language regression are different bugs." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "Meta-device loading breakage and extra_state save/from_pretrained handling are separate issues." + }, + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "Perplexity regression and runtime special-token post-processor update are unrelated." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Missing all_tied_weights_keys and sleng-bert load failure are different model-loading problems." + }, + { + "left": "issue:44479", + "right": "issue:45081", + "accept": false, + "reason": "Qwen video-input regression and Mistral regex patch crash are unrelated." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer load failure and backend_tokenizer attribute crash are different failures." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Base_model_prefix loading breakage and non-persistent buffer junk are different regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 7, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:34567", + "issue:36032", + "issue:39404", + "issue:40444", + "issue:41553", + "issue:42915", + "issue:42947", + "issue:43262", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43452", + "issue:43482", + "issue:43540", + "issue:43541", + "issue:43582", + "issue:43643", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43716", + "issue:43819", + "issue:43825", + "issue:43883", + "issue:43994", + "issue:44206", + "issue:44220", + "issue:44403", + "issue:44451", + "issue:44466", + "issue:44488", + "issue:44530", + "issue:44534", + "issue:44610", + "issue:44683", + "issue:44756", + "issue:44811", + "issue:44849", + "issue:44987", + "issue:44991", + "issue:45081", + "issue:45084", + "issue:45216", + "issue:45325", + "issue:45357", + "issue:45507", + "issue:45584" + ], + "soft_pairs": [ + "issue:44811|issue:45584", + "issue:44206|issue:44220", + "issue:44488|issue:44987", + "issue:43716|issue:45084", + "issue:34567|issue:40444", + "issue:43883|issue:44991", + "issue:42947|issue:44756", + "issue:43644|issue:43646", + "issue:43643|issue:44530", + "issue:44451|issue:44987", + "issue:43452|issue:43482", + "issue:43317|issue:43482", + "issue:43541|issue:45084", + "issue:36032|issue:42915", + "issue:39404|issue:43825", + "issue:42915|issue:44530", + "issue:44849|issue:45325", + "issue:43404|issue:44466", + "issue:43262|issue:43377", + "issue:36032|issue:43645", + "issue:44987|issue:44991", + "issue:45084|issue:45507", + "issue:43646|issue:44534", + "issue:43994|issue:44610", + "issue:43645|issue:44534", + "issue:45216|issue:45357", + "issue:43322|issue:43540", + "issue:43377|issue:43819", + "issue:44403|issue:45081", + "issue:44451|issue:45081", + "issue:41553|issue:43582", + "issue:44220|issue:44683" + ], + "prepared_review_unit_hash": "340d289e745eb0d1d6a0b500eb6cbaf8e0f0147f18325fb46bb007f2630876be", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37207, + "estimated_input_tokens": 9302, + "estimated_eval_tokens": 18860 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous noise: many items are only loosely related by subsystem/model family, not by the same underlying defect. The only strong duplicate-like pair is the Qwen3.5 `save_pretrained` regression pair (45216/45357), which appears to describe the same checkpoint-serialization bug with different symptom detail.", + "confidence": 0.82, + "canonical_issue_reason": "No single canonical issue for the whole cluster; the items span many unrelated bugs and regressions across different models and APIs.", + "canonical_pr_reason": null, + "best_issue_reason": "If a representative is needed, issue 45216 is the broader regression report; 45357 reads like a narrower follow-up about the same Qwen3.5 `save_pretrained` serialization problem.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44811", + "right": "issue:45584", + "accept": false, + "reason": "Both involve Whisper, but one is about `batch_decode(skip_special_tokens)` and the other about empty-generation behavior after `align_special_tokens`; different code paths and symptoms." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": false, + "reason": "Both are audio-feature related, but the titles point to different failures (`center` arg regression vs. a generic `_torch_extract_fbank_features()` problem) without enough evidence they are the same bug." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Both are loading failures, but they concern different models and likely different tokenizer/model compatibility issues; too broad to treat as one duplicate." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Completely different problems: Mistral-3 image dtype mismatch vs. a template compilation error." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "Trainer token accounting and Qwen2.5-VL multi-image IterableDataset failure are unrelated." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Different model-loading/tokenizer issues for different models; same broad area, but not the same concrete bug." + }, + { + "left": "issue:42947", + "right": "issue:44756", + "accept": false, + "reason": "LoRA gradient-checkpointing behavior and Strix Halo mmap OOM are unrelated." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Both touch Transformers v5 initialization behavior, but one is about non-persistent buffers and the other about custom model initialization; not the same underlying defect." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "Missing fields from `AutoConfig.from_pretrained(trust_remote_code=True)` is unrelated to Qwen3.5 PagedAttentionCache group-type crashes." + }, + { + "left": "issue:39404", + "right": "issue:43825", + "accept": false, + "reason": "Whisper `return_language` pipeline behavior and a bad pipeline error message are different issues." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": false, + "reason": "Both involve GGUF loading, but one is a generic `gguf_file` regression and the other is a specific Qwen2.5-GGUF failure; not enough evidence of one bug." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "Different loading paths and failures: dequantized device-map offload vs. Qwen2.5-GGUF/v5 loading." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Torch dynamo/grouped_mm tracing failure is unrelated to template-node compilation errors." + }, + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "T5 tokenizer method-name conflict and Qwen3MoE FP8 failure are unrelated." + }, + { + "left": "issue:39404", + "right": "issue:43825", + "accept": false, + "reason": "Same as above: different Whisper/pipeline bug versus error-message wording issue." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "Fine-grained FP8 config failure and Qwen3.5 linear_attention cache crash are unrelated." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Qwen3.5 hidden-states output bug and Qwen2.5-VL rope-index scaling bug affect different models and code paths." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "Both mention tied weights/lm_head, but one is a missing tie in a model class and the other is inconsistent serialization behavior; not the same bug." + }, + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Audio chat-template sampling-rate defaulting and MIMI padding-mask batch discrepancy are different issues." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer method conflict and notebook custom-model initialization are unrelated." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Both are model-loading regressions, but for different models and likely different failure causes." + }, + { + "left": "issue:45084", + "right": "issue:45507", + "accept": false, + "reason": "Template compilation error and GraniteMoEHybrid invalid-method call are unrelated." + }, + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Both concern Transformers v5 custom model behavior, but one is about initialization and the other about non-persistent buffer values; insufficient evidence of one concrete bug." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "SigLIP2 bad outputs and OmDet-Turbo image-size mismatch are different model/preprocessor problems." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Notebook custom-model initialization and junk-filled buffers are separate v5 regressions." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Same model family and same `save_pretrained` checkpoint-serialization regression; the later issue just narrows the symptom to incorrect visual encoder keys." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Segfault on Llava Next model loading and Qwen3OmniMoe video-processing ValueError are unrelated multimodal failures." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "Both are codec/model inference bugs, but MIMI padding-mask batching and DAC latent/STE mismatch are different code paths." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "Unnecessary loading noise and Mistral regex-patching crash are unrelated." + }, + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "Different tokenizer-loading failures for different models; not the same defect." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "Bad AutoTokenizer error message for Voxtral and Apple Silicon allocator TypeError are unrelated." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Audio fbank-feature extraction and compiled flex_attention on torch 2.9 are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative about weak links. The only accepted soft-edge pair, 45216/45357, is plausibly the same Qwen3.5 save_pretrained serialization regression, while the rest of the pairwise rejections are appropriately cautious. No obvious overstatement or unsupported canonicalization." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 8, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:40990", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43296", + "issue:43317", + "issue:43404", + "issue:43421", + "issue:43425", + "issue:43482", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43742", + "issue:43792", + "issue:43818", + "issue:43856", + "issue:44062", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44403", + "issue:44479", + "issue:44534", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44683", + "issue:44756", + "issue:44811", + "issue:44829", + "issue:44877", + "issue:44987", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45084", + "issue:45127", + "issue:45325", + "issue:45357", + "issue:45479", + "issue:45538", + "issue:45542" + ], + "soft_pairs": [ + "issue:43632|issue:44756", + "issue:44265|issue:44610", + "issue:43404|issue:45127", + "issue:42915|issue:45084", + "issue:44987|issue:45081", + "issue:43296|issue:43531", + "issue:44479|issue:45325", + "issue:44062|issue:44987", + "issue:42915|issue:44291", + "issue:40990|issue:44811", + "issue:43611|issue:44534", + "issue:43317|issue:43856", + "issue:36032|issue:43742", + "issue:42175|issue:45542", + "issue:40444|issue:45325", + "issue:43632|issue:44534", + "issue:43421|issue:44568", + "issue:43818|issue:45357", + "issue:43531|issue:44877", + "issue:42915|issue:44589", + "issue:36032|issue:41720", + "issue:43425|issue:44403", + "issue:44683|issue:45020", + "issue:43482|issue:45538", + "issue:44829|issue:45479", + "issue:33666|issue:40444", + "issue:44403|issue:44991", + "issue:43792|issue:44589", + "issue:44079|issue:44534", + "issue:35707|issue:40444" + ], + "prepared_review_unit_hash": "f822eae9c530972cd221112cf845f84ff76f09b56602ccad09de2cd5f669e661", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36158, + "estimated_input_tokens": 9040, + "estimated_eval_tokens": 18336 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated Transformer regressions across different models/subsystems; I do not see any true duplicate pairs to merge. If one issue must represent the cluster, #45020 is the broadest umbrella for the repeated recent-version breakage theme.", + "confidence": 0.89, + "canonical_issue_reason": "Issue #45020 is the broadest umbrella for the repeated 'recent transformers versions break models' theme, though the cluster overall is not a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "#45020 is the most representative and open issue here: it captures broad version-regression breakage across models, even if it does not subsume the other reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Different failures: `_is_hf_initialized` init handling vs Strix Halo mmap/OOM behavior." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "torch.export/torch_compilable_check is unrelated to a processor input-size mismatch in OmDet-Turbo." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "One is a tied lm_head bug; the other is LoRA merge collapse with extended vocab. Not the same bug." + }, + { + "left": "issue:42915", + "right": "issue:45084", + "accept": false, + "reason": "Qwen3MoE FP8 config failure and a non-template-node compile error are different code paths." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer loading failure for one model is not the same as the Mistral regex patch crash." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "PaddleOCR-VL loading and Qwen3-MoE sliding_window issues are unrelated." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen VL-temporal topics, but one is a broad video-input regression and the other is a still-image RoPE scaling bug." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "AddedToken special-arg duplication is unrelated to a specific model loading regression." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "FP8 config handling and unexpected `_is_hf_initialized` argument errors are distinct problems." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Perplexity regression on GPT-OSS is unrelated to Whisper batch_decode special-token handling." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "`base_model_prefix` loading breakage and non-persistent buffer corruption are different failures." + }, + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "Device-map/offload loading failure is not the same as MoE training memory inefficiency." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "T5Tokenizer special-token method conflict and MobileLLM key errors do not share the same root cause." + }, + { + "left": "issue:42175", + "right": "issue:45542", + "accept": false, + "reason": "Packaging omission of TensorFlow is different from the undefined tf backend error caused by tensorboard-only installs." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "Multi-image iterable fine-tuning failure is not the same as RoPE temporal position scaling in Qwen2.5-VL." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "`_is_hf_initialized` flag behavior and non-persistent buffer filling are separate regressions." + }, + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Runtime post-processor updates and BOS/EOS insertion on tokenization are related only broadly, not the same bug." + }, + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Video-LLaVA temporal-attention/weight-sharing bug and Qwen3.5 save_pretrained key bug are unrelated." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and granite_speech strict-config loading are different issues." + }, + { + "left": "issue:42915", + "right": "issue:44589", + "accept": false, + "reason": "FP8 config failure and missing Float8 storage type are distinct float8-related problems." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "Tokenizer special-token conflict is unrelated to Qwen3 auto device-mapping CUDA asserts." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Torch version incompatibility and noisy loading output are not the same underlying bug." + }, + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "Compiled flex_attention on torch>=2.9 and remote_code loading regressions are separate compatibility problems." + }, + { + "left": "issue:43482", + "right": "issue:45538", + "accept": false, + "reason": "GGUF loading failure and CLIPTokenizer max-length behavior are unrelated." + }, + { + "left": "issue:44829", + "right": "issue:45479", + "accept": false, + "reason": "Flash-attention-induced degenerate training and single-label zero-loss behavior are different classification bugs." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Qwen2-VL multi-GPU training and multi-image-per-prompt iterable-dataset failure are not the same bug." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "A general loading-noise complaint does not match a concrete tokenizer loading failure." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper runtime failure is unrelated to the Float8 storage lookup issue." + }, + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "ModelOutput key assignment and non-persistent buffer junk are different subsystems." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL multi-image fine-tuning failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 9, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:40444", + "issue:40990", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43262", + "issue:43296", + "issue:43317", + "issue:43377", + "issue:43454", + "issue:43482", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43644", + "issue:43646", + "issue:43701", + "issue:43746", + "issue:43749", + "issue:43792", + "issue:43828", + "issue:43873", + "issue:43881", + "issue:43883", + "issue:43957", + "issue:43994", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44387", + "issue:44466", + "issue:44488", + "issue:44589", + "issue:44617", + "issue:44683", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45081", + "issue:45325", + "issue:45381", + "issue:45491", + "issue:45563", + "issue:45584" + ], + "soft_pairs": [ + "issue:43377|issue:45491", + "issue:43482|issue:43525", + "issue:44291|issue:44589", + "issue:43749|issue:44079", + "issue:44079|issue:45072", + "issue:43644|issue:44079", + "issue:40444|issue:45381", + "issue:43632|issue:45072", + "issue:44265|issue:45072", + "issue:43881|issue:44488", + "issue:45325|issue:45381", + "issue:43531|issue:44291", + "issue:43611|issue:43646", + "issue:41720|issue:45081", + "issue:42175|issue:43296", + "issue:43454|issue:44466", + "issue:43994|issue:44898", + "issue:44387|issue:44617", + "issue:43828|issue:44683", + "issue:42915|issue:43482", + "issue:43526|issue:43883", + "issue:43792|issue:43957", + "issue:43701|issue:45325", + "issue:43262|issue:43746", + "issue:43632|issue:45563", + "issue:43881|issue:44991", + "issue:40990|issue:45584", + "issue:43317|issue:43873", + "issue:34634|issue:35707" + ], + "prepared_review_unit_hash": "600b3bd80c17b76c461ab3af243a1e58c629366bd461739fdf24a77d56978737", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36393, + "estimated_input_tokens": 9099, + "estimated_eval_tokens": 18454 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous, but it is dominated by Transformers v5 model-loading/initialization regressions. Only one soft pair looks like the same concrete bug family (Qwen2.5-VL position-id generation).", + "confidence": 0.72, + "canonical_issue_reason": "issue:43646 is the broadest representative of the dominant theme: Transformers 5.0.0 breaking custom model initialization and related loading paths.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43646 is the best cluster representative because it is the most generic, central v5 regression title and subsumes several nearby load/init failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Different models and symptoms; one is MIMI batching/padding-mask behavior, the other is Gemma3 sliding-window NaNs." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Unrelated failures: Qwen2.5-GGUF loading vs Llama4Config missing pad_token_id." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "Both are Transformers v5 regressions, but one is init_empty_weights/_is_hf_initialized handling and the other is an fp8 storage lookup error." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "Different subsystems: FSDP CPU RAM-efficient loading vs ModelOutput key assignment." + }, + { + "left": "issue:44079", + "right": "issue:45072", + "accept": false, + "reason": "ModelOutput key bookkeeping is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Non-persistent buffer serialization junk is a different bug from ModelOutput key assignment." + }, + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Both involve Qwen2.5-VL, but one is finetuning with multiple images and the other is video position ids; not the same concrete bug." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "_is_hf_initialized flag regression is unrelated to bfloat16 dtype mismatch issues." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is a different code path than dtype mismatch inference bugs." + }, + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "Different model-loading failures affecting different models and likely different root causes." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": true, + "reason": "Same Qwen2.5-VL position-id / rope-index code path: one reports still-image temporal position scaling, the other video vision_position_ids being wrong." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to init_empty_weights argument handling." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 loading regressions, but base_model_prefix handling and custom model initialization are not the same concrete bug." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert and Mistral tokenizer regex patch crash are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Packaging/backend dependency issue is unrelated to PaddleOCR-VL model loading." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "Missing lm_head weight tying and device-dependent serialization are different tied-weight bugs." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "Different vision-model inference failures with different models and different failure modes." + }, + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Int4 quantization CUDA memory growth is unrelated to Sam3Video OOM." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Autocast dtype mismatch and compiled flex_attention on torch>=2.9 are different issues." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "Qwen3MoE FP8 config failure is unrelated to Qwen2.5-GGUF loading." + }, + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug is unrelated to Molmo tied-weight attribute errors." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "Whisper runtime failure and meta-device loading regressions are different problems." + }, + { + "left": "issue:43701", + "right": "issue:45325", + "accept": false, + "reason": "Checkpoint resume key mismatch is unrelated to Qwen2.5-VL position-id generation." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio chat-template sample-rate defaulting is unrelated to PEFT checkpoint loading." + }, + { + "left": "issue:43632", + "right": "issue:45563", + "accept": false, + "reason": "_is_hf_initialized regression is unrelated to a stale warning in paged generate()." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "Different tokenizer/model-loading failures for different models." + }, + { + "left": "issue:40990", + "right": "issue:45584", + "accept": false, + "reason": "High perplexity on GPT-OSS and Whisper empty-transcription generation failure are unrelated." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both mention offloading/quantization, but they are not clearly the same concrete failure mode." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "Completely unrelated: BarkProcessor voice_preset vs progressive generation with inputs_embeds/past_key_values." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays appropriately conservative. The cluster is indeed heterogeneous, with a visible v5 loading/initialization theme, and the one accepted soft pair (issue:45325 vs issue:45381) is a plausible same-family position-id/rope-index match. The rejected soft pairs are also phrased cautiously and do not overclaim shared root causes." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 10, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:35707", + "issue:36331", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43296", + "issue:43316", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43454", + "issue:43526", + "issue:43550", + "issue:43577", + "issue:43606", + "issue:43708", + "issue:43720", + "issue:43749", + "issue:43792", + "issue:43819", + "issue:43873", + "issue:43881", + "issue:44186", + "issue:44265", + "issue:44291", + "issue:44423", + "issue:44466", + "issue:44479", + "issue:44534", + "issue:44610", + "issue:44671", + "issue:44683", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44898", + "issue:44913", + "issue:44918", + "issue:44977", + "issue:44991", + "issue:45072", + "issue:45325", + "issue:45357", + "issue:45406" + ], + "soft_pairs": [ + "issue:43606|issue:44265", + "issue:44291|issue:44877", + "issue:44186|issue:44265", + "issue:43606|issue:43819", + "issue:44610|issue:45325", + "issue:35707|issue:41720", + "issue:43377|issue:44898", + "issue:43454|issue:43550", + "issue:43377|issue:44610", + "issue:43454|issue:45072", + "issue:39692|issue:40444", + "issue:44466|issue:45357", + "issue:43749|issue:44534", + "issue:43296|issue:44991", + "issue:43792|issue:44683", + "issue:43296|issue:44877", + "issue:41720|issue:43322", + "issue:43454|issue:43720", + "issue:43404|issue:45357", + "issue:41720|issue:44977", + "issue:44423|issue:45406", + "issue:43526|issue:43881", + "issue:44186|issue:45072", + "issue:43873|issue:44756", + "issue:43577|issue:43708", + "issue:42617|issue:42915", + "issue:44877|issue:44991", + "issue:44610|issue:44898", + "issue:43550|issue:43720", + "issue:44479|issue:44918", + "issue:44186|issue:44610", + "issue:36331|issue:42915", + "issue:36331|issue:42617", + "issue:43316|issue:44913", + "issue:44671|issue:44743", + "issue:42915|issue:43317", + "issue:43550|issue:43606", + "issue:41720|issue:42617" + ], + "prepared_review_unit_hash": "4e2e513f44155689223314896d0f7d4953d76890b4b43ae9228e4b9da9f10695", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 38, + "serialized_chars": 38633, + "estimated_input_tokens": 9659, + "estimated_eval_tokens": 19574 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is very heterogeneous: the pairs mostly share broad themes like loading, quantization, offloading, multimodal models, or config APIs, but not the same concrete bug or code path. I would reject every soft edge here; none looks safe to merge as a duplicate/one-fix pair.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43873 is the broadest representative of the set (offloading + quantization + loading), but it is only a weak cluster anchor because the surrounding issues are not true duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43873 is the most generic and central issue to use as a representative triage anchor, but only because the cluster lacks a real duplicate nucleus.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "One is a CPU offload device-mismatch failure in a specific model; the other is a torch.export/torch_compilable_check export failure. Different code paths." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "Unexpected `_is_hf_initialized` argument during init_empty_weights is unrelated to strict config blocking granite_speech loading." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "Tokenizer crashes on NER/padding are unrelated to export failures from torch_compilable_check." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "CPU offload device mismatch and DAC.from_latents STE mismatch are different model/runtime bugs." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "Processor resolution mismatch for OmDet-Turbo is unrelated to Qwen2.5-VL temporal rope indexing." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen3 auto device-map cuda asserts." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "MIMI padding-mask batching bug and Perceiver non-default-resolution failure are different model components and symptoms." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "Tied lm_head weights bug and Bamba torch.compile/SDPA failure are unrelated." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "Both involve model inputs, but one is MIMI batching behavior and the other is OmDet processor output size; not the same bug." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "lm_head tying/serialization issues do not match dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "SigLIP2 documentation errors and Qwen2.5-VL iterable-dataset finetuning failure are unrelated." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Inconsistent lm_head serialization by device is not the same as incorrect visual encoder keys being saved." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and non-persistent buffer junk are both v5/loading-adjacent, but not the same concrete bug." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "PaddleOCR-VL load failure in vLLM and tokenizer loading failure for est-roberta are separate model/tokenizer issues." + }, + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Whisper model runtime failure and compiled flex_attention on torch>=2.9 are unrelated." + }, + { + "left": "issue:43296", + "right": "issue:44877", + "accept": false, + "reason": "vLLM/PaddleOCR-VL loading failure is unrelated to strict config rejection of granite_speech." + }, + { + "left": "issue:41720", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segfault are both loading/runtime crashes but not the same underlying issue." + }, + { + "left": "issue:43454", + "right": "issue:43720", + "accept": false, + "reason": "lm_head tying is unrelated to BitNet packed-weight unpacking during accelerate loading." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "Mistral3 lm_head weight tying and Qwen3.5 visual encoder key serialization are different model families and fixes." + }, + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "Auto device-map cuda assert for Qwen3 and flash-attention generation regression for Qwen3.5 are distinct failures." + }, + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Continuous-batching multimodal crash with 'str' object has no attribute 'to' is unrelated to Gemma4Processor missing `_tokenizer`." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "BEiT fast processor label reduction bug and glm-4v-9b loading failure are unrelated." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer crash on NER/padding has nothing to do with dtype mismatches in inference." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/offload concerns, but one is quantization offloading behavior and the other is mmap OOM on Strix Halo." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "Blip2 dtype not propagating and Trainer resume/max_steps miscalculation are unrelated." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "3d_parallel.py execution failure and Qwen3Moe FineGrainedFP8Config failure are different problems." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config blocking granite_speech and tokenizer loading for est-roberta are unrelated." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "Processor/model resolution mismatch for OmDet-Turbo is not the same as Perceiver interpolation failure." + }, + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "torch.compile with SDPA on Bamba and BitNet packed-weight loading are separate issues." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Video-input regression in several Qwen models and TRL SFT embedding unpacking failure are different code paths." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "Tokenizer NER/padding crash and OmDet processor size mismatch are unrelated." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "CustomTrainer.compute_loss signature mismatch is unrelated to Qwen3Moe FP8 loading failure." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Trainer compute_loss API mismatch is unrelated to 3d_parallel.py runtime failure." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Gemma3TextConfig API discrepancy and GPTNeoX rotary_pct reload regression are both config issues but not the same bug." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression and Qwen3.5 recurrent-state reset are unrelated model behaviors." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure and dequantized model offload loading failure are different quantization/load paths." + }, + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure and CPU-offload device mismatch are unrelated." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Qwen3 auto device-map assert and 3d_parallel.py failure do not describe the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 11, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:30064", + "issue:33357", + "issue:34567", + "issue:34634", + "issue:35707", + "issue:36010", + "issue:36331", + "issue:41720", + "issue:42617", + "issue:42886", + "issue:42915", + "issue:43322", + "issue:43388", + "issue:43421", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43746", + "issue:43782", + "issue:43873", + "issue:43927", + "issue:44062", + "issue:44186", + "issue:44292", + "issue:44361", + "issue:44368", + "issue:44451", + "issue:44479", + "issue:44492", + "issue:44509", + "issue:44514", + "issue:44545", + "issue:44625", + "issue:44843", + "issue:44898", + "issue:44991", + "issue:45059", + "issue:45072", + "issue:45161", + "issue:45357", + "issue:45381", + "issue:45406", + "issue:45464", + "issue:45479" + ], + "soft_pairs": [ + "issue:43531|issue:43572", + "issue:44292|issue:44479", + "issue:41720|issue:45161", + "issue:43540|issue:43782", + "issue:43388|issue:45059", + "issue:44186|issue:44361", + "issue:41720|issue:43873", + "issue:42915|issue:43322", + "issue:36010|issue:42617", + "issue:45406|issue:45464", + "issue:44514|issue:44545", + "issue:36331|issue:41720", + "issue:33357|issue:35707", + "issue:41720|issue:43746", + "issue:30064|issue:34634", + "issue:43606|issue:43720", + "issue:44492|issue:44509", + "issue:43550|issue:43653", + "issue:44368|issue:45357", + "issue:44625|issue:45479", + "issue:43421|issue:43927", + "issue:43606|issue:43653", + "issue:43782|issue:45381", + "issue:34567|issue:35707", + "issue:34634|issue:41720", + "issue:44898|issue:45072", + "issue:42886|issue:44843", + "issue:43421|issue:43653", + "issue:44451|issue:44991", + "issue:44062|issue:44451" + ], + "prepared_review_unit_hash": "41c71546ee2fec9f153479152e8c3fc77f69fad772f36512eb02bf3403515694", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36742, + "estimated_input_tokens": 9186, + "estimated_eval_tokens": 18628 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous, but it contains a couple of clear duplicate pairs around tokenizer/offline loading. The strongest duplicate anchors are 44514\u219444545 and 42886\u219444843.", + "confidence": 0.63, + "canonical_issue_reason": "There isn\u2019t a single coherent cluster-wide bug here; if one issue must serve as the anchor, 44545 is the clearest canonical because it is an exact duplicate of 44514 and has slightly more activity/inbound references.", + "canonical_pr_reason": null, + "best_issue_reason": "44545 is the best issue anchor in this set: it is an exact title-match duplicate target for 44514 and the most clearly representative of a concrete bug among the near-duplicate pairs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Different bugs: Qwen3-MoE sliding-window behavior vs StableLmConfig pad_token_idx regression." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "Both are Qwen-related, but one is NVFP4 runtime failure and the other is a video-input regression for different models." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Different model/runtime failures: cudaErrorAssert on Qwen3 auto device mapping vs TP-only issue for GPT-OSS MoE." + }, + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Different code paths: Qwen3OmniMoe video processing vs Qwen3VL from_pretrained weight_only loading." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "Unrelated symptoms: metric gathering last-batch label truncation vs SAM3 text/bbox behavior." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Different tokenizers and failure modes: LayoutLMv2 batching/NER crash vs MLuke task AttributeError." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Both mention model execution/offloading, but the concrete failures are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Different models and failure classes: FineGrainedFP8Config vs segmentation fault in Llava Next loading." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "Import error for GenerationMixin is unrelated to not being able to run 3d_parallel.py." + }, + { + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Different features and failures: Gemma4Processor serve crash vs Qwen3.5 streaming chat/completions bug." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "Exact same bug title and same batched-input/padding=False crash path in Qwen2_5_VLProcessor.apply_chat_template." + }, + { + "left": "issue:36331", + "right": "issue:41720", + "accept": false, + "reason": "CustomTrainer.compute_loss signature breakage is unrelated to Qwen3 auto device mapping." + }, + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "MacOS bus error with CLIP model is unrelated to progressive generation with inputs_embeds/past_key_values." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Different problems: Qwen3 device-mapping assert vs GraniteSpeech PEFT checkpoint loading." + }, + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "ImageProcessor segmentation-map handling and BarkProcessor voice_preset are unrelated." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "CPU offload device mismatch and BitNet packed-weight unpacking are different bugs." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "A typo in cache strategies is unrelated to stale docs mentioning removed pipeline tasks." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure is unrelated to BigBirdTokenizer mask-token registration." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "A warning about tie_word_embeddings is not the same as incorrect visual-encoder keys on save_pretrained." + }, + { + "left": "issue:44625", + "right": "issue:45479", + "accept": false, + "reason": "num_labels propagation in Qwen3.5 is not the same as zero-loss behavior for num_labels=1 across sequence-classification models." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "Runtime special-token post-processing update and config save/load custom token IDs are different issues." + }, + { + "left": "issue:43606", + "right": "issue:43653", + "accept": false, + "reason": "Different tokenizer/model bugs: offload device mismatch vs missing BigBird mask special-token registration." + }, + { + "left": "issue:43782", + "right": "issue:45381", + "accept": false, + "reason": "Different Qwen vision bugs: weight_only load failure vs incorrect vision_position_ids for video input." + }, + { + "left": "issue:34567", + "right": "issue:35707", + "accept": false, + "reason": "TrainerState token-count tracking is unrelated to progressive generation." + }, + { + "left": "issue:34634", + "right": "issue:41720", + "accept": false, + "reason": "Bark voice_preset and Qwen3 auto device mapping are unrelated." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Perceiver interpolation failure and dtype mismatch in SwitchTransformers/TimmWrapperModel are different failures." + }, + { + "left": "issue:42886", + "right": "issue:44843", + "accept": true, + "reason": "Same offline-loading bug path: unconditional model_info() / Mistral regex patch breaks HF_HUB_OFFLINE cache loading." + }, + { + "left": "issue:43421", + "right": "issue:43653", + "accept": false, + "reason": "TokenizersBackend special-token refresh and BigBirdTokenizer mask-token registration are different bugs." + }, + { + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer load failures, but they involve different models and no shared concrete code path is evident." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "AddedToken keyword-argument conflict is unrelated to tokenizer loading failure for ScandiBERT." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded overall: the packet is heterogeneous, the two accepted pairs are the clear same-bug matches, and the rejected pairs are conservatively separated. Minor wording nit: 44514/44545 are near-identical rather than literally exact title matches, but the duplicate judgment is still well supported." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 12, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:39692", + "issue:40444", + "issue:43335", + "issue:43381", + "issue:43421", + "issue:43525", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43596", + "issue:43618", + "issue:43653", + "issue:43697", + "issue:43716", + "issue:43720", + "issue:43746", + "issue:43756", + "issue:43819", + "issue:43844", + "issue:43873", + "issue:43994", + "issue:44008", + "issue:44062", + "issue:44077", + "issue:44186", + "issue:44220", + "issue:44488", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44661", + "issue:44671", + "issue:44811", + "issue:44843", + "issue:44898", + "issue:44918", + "issue:44936", + "issue:44987", + "issue:45042", + "issue:45072", + "issue:45081", + "issue:45183", + "issue:45356", + "issue:45381", + "issue:45479" + ], + "soft_pairs": [ + "issue:43381|issue:43844", + "issue:44918|issue:44936", + "issue:44077|issue:44625", + "issue:44987|issue:45042", + "issue:43531|issue:44589", + "issue:43994|issue:44671", + "issue:39692|issue:44610", + "issue:43421|issue:44186", + "issue:45081|issue:45356", + "issue:43525|issue:43531", + "issue:43596|issue:44220", + "issue:43746|issue:45381", + "issue:45183|issue:45479", + "issue:39692|issue:43873", + "issue:43540|issue:44008", + "issue:43618|issue:43697", + "issue:40444|issue:43746", + "issue:43746|issue:43873", + "issue:44220|issue:44661", + "issue:43716|issue:44008", + "issue:43335|issue:43756", + "issue:43720|issue:43819", + "issue:43653|issue:43720", + "issue:43421|issue:44811", + "issue:30064|issue:36010", + "issue:44843|issue:45081", + "issue:44062|issue:44488", + "issue:44186|issue:44898", + "issue:43550|issue:45072", + "issue:43540|issue:45381" + ], + "prepared_review_unit_hash": "184acd7bec9d8df12b5679a0c0b94ee968f19599f440bfd79ebd2161d7475d9c", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36407, + "estimated_input_tokens": 9102, + "estimated_eval_tokens": 18460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is largely a heterogeneous mix of unrelated bug reports; there is no strong duplicate cluster. The closest shared theme is tokenizer/loading regressions, but even those are mostly distinct code paths.", + "confidence": 0.21, + "canonical_issue_reason": "Issue 45081 is the most concrete, code-path-specific bug in the set and sits near the only small tokenizer-regex subcluster.", + "canonical_pr_reason": null, + "best_issue_reason": "45081 is open, reproducible, and narrowly scoped, making it the best representative issue for triage despite the overall heterogeneity.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Different failures: eval-mode gradient checkpointing vs DeepSpeed ZeRO-3 gradient explosion." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "Both trainer-related, but one is an SFT input-embedding unpacking bug and the other is an evaluate-after-train failure." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "Unrelated models and code paths: patchtsmixer post_init policy vs Qwen3.5 num_labels propagation." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "A model-loading regression vs a PIL image processor torchvision dependency issue." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Sliding-window logic in Qwen3-MoE is unrelated to a Float8 storage lookup error." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Different models, different symptoms, and different fixes: SigLIP2 inference mismatch vs CamemBERT masked LM regression." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "Documentation/example errors do not match the OmDet-Turbo processor/model resolution mismatch." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "TokenizersBackend post-processor updates and LayoutLMv2 NER padding crashes are separate tokenizer bugs." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Both touch Mistral regex handling, but one crashes on backend_tokenizer access while the other reports codec handling and warning regressions." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Llama4Config pad_token_id absence is unrelated to Qwen3-MoE sliding_window behavior." + }, + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "BertModel ZeRO-3 init index error and audio fbank feature extraction are unrelated failures." + }, + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "GraniteSpeech PEFT checkpoint loading and Qwen2.5-VL video vision_position_ids are different model-specific bugs." + }, + { + "left": "issue:45183", + "right": "issue:45479", + "accept": false, + "reason": "General input validation messaging bug vs a single-label classification zero-loss issue." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "A SigLIP2 docs/example problem is not the same as quantization offloading not working." + }, + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Different multimodal models and causes: Qwen3OmniMoe video processing vs Gemma 3n variable-name collision." + }, + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "CLIPOutput attentions regression and RTDetrV2 output drift are not the same code path." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "Qwen2.5-VL iterable-dataset finetuning failure is unrelated to GraniteSpeech PEFT checkpoint loading." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "Checkpoint loading with PEFT adapters is unrelated to quantization offloading behavior." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Audio fbank feature extraction and tokenizer mapping registration are distinct subsystems." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Mistral image-preprocessor dtype mismatch is unrelated to Gemma 3n forward variable collision." + }, + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer dropping are different configuration bugs." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "BitNet packed-weight loading and DAC.from_latents/STE mismatch are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and BitNet weight unpacking do not share a code path." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Post-processor mutation at runtime is not the same as Whisper batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Void segmentation map processing and a GenerationMixin import error are unrelated." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Both involve _patch_mistral_regex, but one breaks offline mode via model_info() and the other crashes on backend_tokenizer access; distinct bugs." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "AddedToken special-argument duplication is unrelated to loading cjvt/sleng-bert." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and Perceiver non-default-resolution failures are different issues." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "torch.compile+SDPA failure in Bamba is unrelated to bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "Qwen3OmniMoe video-input processing and Qwen2.5-VL vision_position_ids are separate multimodal bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 13, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:36010", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43262", + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43606", + "issue:43643", + "issue:43644", + "issue:43650", + "issue:43653", + "issue:43720", + "issue:43749", + "issue:43782", + "issue:43818", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43906", + "issue:43927", + "issue:43994", + "issue:44060", + "issue:44186", + "issue:44368", + "issue:44442", + "issue:44448", + "issue:44485", + "issue:44509", + "issue:44561", + "issue:44610", + "issue:44671", + "issue:44805", + "issue:44821", + "issue:44877", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45084", + "issue:45325", + "issue:45406" + ], + "soft_pairs": [ + "issue:43577|issue:43818", + "issue:43643|issue:44877", + "issue:43550|issue:43927", + "issue:43782|issue:44977", + "issue:43531|issue:43643", + "issue:43577|issue:43582", + "issue:44448|issue:44671", + "issue:43644|issue:43749", + "issue:43653|issue:43819", + "issue:44561|issue:45020", + "issue:43825|issue:43994", + "issue:43550|issue:43819", + "issue:43825|issue:43827", + "issue:44186|issue:44442", + "issue:44821|issue:45084", + "issue:43650|issue:43906", + "issue:41720|issue:44991", + "issue:44805|issue:44936", + "issue:44485|issue:44509", + "issue:30064|issue:36331", + "issue:43606|issue:45072", + "issue:42175|issue:45081", + "issue:43782|issue:45325", + "issue:44610|issue:45072", + "issue:34634|issue:36010", + "issue:44060|issue:44368", + "issue:45081|issue:45406", + "issue:43720|issue:45072", + "issue:43262|issue:43819", + "issue:43653|issue:44671" + ], + "prepared_review_unit_hash": "da508ecdaf7b4fbc1a498091bf421465dedd5a03f4e6bb5b5374fc74678e4552", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36862, + "estimated_input_tokens": 9216, + "estimated_eval_tokens": 18688 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are a heterogeneous mix of unrelated bugs and regressions; the soft-similarity links mostly connect broad theme matches (v5 regressions, tokenizer/config loading, model-specific inference issues) rather than the same concrete underlying defect. No soft edge should be merged.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical duplicate exists because the cluster is not a true duplicate set. If one representative issue is needed, issue:45020 is the broadest umbrella for the remote_code/v5 regression theme.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the best umbrella-style issue in this set because it groups the broad 'recent transformers versions break models using remote_code' regression theme more generally than the other, more specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "Different bugs: BLIP2 dtype handling vs Video-LLaVA missing temporal attention/shared weights." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Both involve config loading, but the failures are different: missing fields with trust_remote_code vs strict config blocking granite_speech." + }, + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "Unrelated code paths: torch.compile/SDPA failure in Bamba vs DiaConfig token-ID persistence bug." + }, + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Different Qwen models and different failure modes: weight_only load error vs flash-attention generation issue." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "Sliding-window logic in Qwen3-MoE is unrelated to trust_remote_code config deserialization." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "BLIP2 dtype mismatch and Apple Silicon allocator warmup TypeError are unrelated." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Both are v5 behavior regressions, but they affect different models and different failure surfaces (Pegasus generation output vs CamemBERT masked LM predictions)." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Non-persistent buffer initialization bug is not the same as broken FSDP CPU RAM efficient loading." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Tokenizer special-token registration bug vs DAC latent reconstruction/STE mismatch." + }, + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "44561 is a specific missing-symbol regression; 45020 is a broad umbrella issue about remote_code breakage, not the same concrete bug." + }, + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "Pipeline error-message wording is unrelated to SigLIP2 producing bad outputs." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "Different models and failures: Bamba compile/SDPA vs DAC latent conversion mismatch." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "Related to v5 pipeline removals, but one is an error-message bug and the other is docs lag; not the same defect." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "LayoutLMv2 NER/padding crash and FastSpeech2Conformer tokenizer load failure are distinct tokenizer issues." + }, + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "AutoImageProcessor URL loading and template-node compile errors are unrelated." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "Placeholder-style title vs an isolated reproduction of another issue; no shared underlying bug is evident." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "CUDA assert with Qwen3 device mapping is unrelated to tokenizer loading failure for est-roberta." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Mask-shape IndexError and trainer.evaluate() failing after train() are different problems." + }, + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still referencing removed pipeline tasks." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "ImageProcessor segmentation-map handling and CustomTrainer compute_loss signature break are unrelated." + }, + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "CPU offload device mismatch in Bark-small is not the same as bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Packaging backend omission and Mistral regex patch crash are unrelated." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Different Qwen2.5-VL rope-index bug and Qwen3VL weight_only load error are not the same issue." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "OmDet-Turbo input-size mismatch is unrelated to bfloat16 dtype mismatch bugs." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice_preset and GenerationMixin import failure are different breakages." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Both mention tied-weights warnings in Qwen-family models, but they describe different configs and symptoms; not the same concrete bug." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Mistral tokenizer regex patch crash and Gemma4Processor missing _tokenizer are different processor/tokenizer failures." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "BitNet packed-weight unpacking during accelerate loading is unrelated to dtype mismatch inference errors." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio processor chat-template sampling-rate default bug and DAC forward/from_latents mismatch are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:44671", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug and CamemBERT masked-LM regression are different issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 14, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:34634", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:43257", + "issue:43262", + "issue:43329", + "issue:43452", + "issue:43454", + "issue:43526", + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43638", + "issue:43645", + "issue:43653", + "issue:43696", + "issue:43697", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43756", + "issue:43819", + "issue:43854", + "issue:43927", + "issue:43931", + "issue:44291", + "issue:44479", + "issue:44554", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44857", + "issue:44912", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45042", + "issue:45084", + "issue:45356", + "issue:45488", + "issue:45538", + "issue:45600" + ], + "soft_pairs": [ + "issue:43749|issue:44291", + "issue:43582|issue:44857", + "issue:43526|issue:43697", + "issue:43526|issue:44554", + "issue:45084|issue:45538", + "issue:43329|issue:45600", + "issue:43653|issue:44912", + "issue:43454|issue:43577", + "issue:44568|issue:45488", + "issue:34634|issue:36331", + "issue:43638|issue:44936", + "issue:43452|issue:44991", + "issue:43257|issue:43329", + "issue:43746|issue:44811", + "issue:38175|issue:44610", + "issue:43854|issue:44977", + "issue:44589|issue:45042", + "issue:43257|issue:43696", + "issue:39692|issue:44912", + "issue:44568|issue:45356", + "issue:36010|issue:43645", + "issue:43819|issue:43927", + "issue:43720|issue:44610", + "issue:43262|issue:44811", + "issue:43550|issue:44912", + "issue:43749|issue:44756", + "issue:43531|issue:45042", + "issue:43329|issue:44479", + "issue:43257|issue:43931", + "issue:36010|issue:36331", + "issue:43257|issue:43756" + ], + "prepared_review_unit_hash": "8b1ee8302727314f0200bfe372a68a8ecc6a177c3ad3327bc633eadf138afa46", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 31, + "serialized_chars": 36681, + "estimated_input_tokens": 9171, + "estimated_eval_tokens": 18598 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a mix of unrelated Transformers issues, mostly centered on model loading, tokenizers, processors, and training regressions. None of the proposed soft pairs look like the same underlying bug or change, so all soft edges are rejected. The most plausible umbrella issue is the FSDP loading regression because it is broad and well-discussed.", + "confidence": 0.61, + "canonical_issue_reason": "issue:43749 is the broadest, most central loading-regression report in the set and has the strongest discussion/inbound signal.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 is a good representative issue for the cluster because it is broad, high-impact, and closest to an umbrella regression rather than a narrow model-specific bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading-related, but one is FSDP CPU RAM efficient loading and the other is init_empty_weights/_is_hf_initialized argument handling; different code paths and failure modes." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "AppleSilicon caching_allocator_warmup TypeError is unrelated to LwDetrImageLoss crashing under float16 AMP/CUDA." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "BEiT Fast image processor label reduction is unrelated to RTDetrV2 output drift in v5." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "Image label reduction bug and MPS attention correctness issue are different subsystems and different symptoms." + }, + { + "left": "issue:45084", + "right": "issue:45538", + "accept": false, + "reason": "Template-node compilation error is unrelated to CLIPTokenizer model_max_length behavior." + }, + { + "left": "issue:43329", + "right": "issue:45600", + "accept": false, + "reason": "Undefined video-token helper bug is unrelated to stale auto_mappings references to removed configs." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "BigBird special-token decode bug is unrelated to MXFP4 quantization/load fallback in git-oss-20b." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "Both involve model loading, but AyaVision lm_head tying and BLIP2 dtype propagation are separate model-specific bugs." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "BarkProcessor voice_preset failure is unrelated to CustomTrainer.compute_loss signature changes." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "DeepSpeed ZeRO-3 index error on a non-pretrained Bert model is not the same issue as trainer.evaluate() failing after training." + }, + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "GGUF loading breakage for auto classes and tokenizer loading regression for est-roberta are different failures." + }, + { + "left": "issue:43257", + "right": "issue:43329", + "accept": false, + "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to multimodal token counting video-branch undefined names." + }, + { + "left": "issue:43746", + "right": "issue:44811", + "accept": false, + "reason": "PEFT local checkpoint loading and Whisper batch_decode skip_special_tokens are unrelated bugs." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "Both are vision-related, but zero probabilities in SigLIP2 and processor/model resolution mismatch in OmDet-Turbo are not the same underlying defect." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "GLM-4.7-Flash loading test failure is unrelated to Qwen3.5 flash-attention generation regression." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to PIL image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43257", + "right": "issue:43696", + "accept": false, + "reason": "Qwen3 MoE weight conversion bug is unrelated to GPT-oss-20b CUDA OOM." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "SigLIP2 doc example errors/quantization failure are not the same as git-oss-20b MXFP4 loading fallback." + }, + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer regressions, but BOS/EOS addition and Kimi-K2.5 codec/warning handling are different concrete bugs." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "GenerationMixin import failure is unrelated to Jupyter custom-model initialization breakage." + }, + { + "left": "issue:43819", + "right": "issue:43927", + "accept": false, + "reason": "DAC latent reconstruction mismatch and DiaConfig token-ID persistence are different model families and unrelated bugs." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "Packed-weight unpacking during accelerate loading is unrelated to OmDet-Turbo processor resolution mismatch." + }, + { + "left": "issue:43262", + "right": "issue:44811", + "accept": false, + "reason": "Audio chat-template sampling-rate defaulting and Whisper batch_decode special-token handling are separate processor bugs." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure is unrelated to git-oss-20b quantization load fallback." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "FSDP CPU RAM efficient loading regression and mmap disablement on Strix Halo are different memory/loading issues." + }, + { + "left": "issue:43531", + "right": "issue:45042", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to PIL backend torchvision requirements." + }, + { + "left": "issue:43329", + "right": "issue:44479", + "accept": false, + "reason": "Both touch video/multimodal paths, but one is undefined helper usage in token counting and the other is a broader v5.3.0 video-input regression; not the same concrete bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 15, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:28282", + "issue:33357", + "issue:34567", + "issue:34689", + "issue:36010", + "issue:38175", + "issue:40990", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43317", + "issue:43329", + "issue:43450", + "issue:43452", + "issue:43526", + "issue:43540", + "issue:43577", + "issue:43582", + "issue:43646", + "issue:43697", + "issue:43742", + "issue:43749", + "issue:43782", + "issue:43824", + "issue:43906", + "issue:43927", + "issue:44062", + "issue:44291", + "issue:44479", + "issue:44488", + "issue:44560", + "issue:44737", + "issue:44821", + "issue:44898", + "issue:44912", + "issue:44938", + "issue:44987", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45137", + "issue:45357", + "issue:45381", + "issue:45405", + "issue:45479", + "issue:45538", + "issue:45588" + ], + "soft_pairs": [ + "issue:34567|issue:40990", + "issue:43329|issue:43450", + "issue:43697|issue:45357", + "issue:43577|issue:45072", + "issue:41720|issue:43540", + "issue:44479|issue:45381", + "issue:43452|issue:43582", + "issue:44987|issue:45538", + "issue:42175|issue:44938", + "issue:43317|issue:45538", + "issue:43927|issue:44912", + "issue:44291|issue:45042", + "issue:33357|issue:34567", + "issue:43906|issue:45381", + "issue:43540|issue:44560", + "issue:43540|issue:44479", + "issue:42915|issue:43742", + "issue:42915|issue:45538", + "issue:42175|issue:43824", + "issue:42915|issue:44821", + "issue:43526|issue:45479", + "issue:44062|issue:44991", + "issue:44737|issue:45588", + "issue:42175|issue:44488", + "issue:45137|issue:45405", + "issue:38175|issue:44898", + "issue:43646|issue:43749", + "issue:43582|issue:43782", + "issue:28282|issue:34689", + "issue:36010|issue:42175" + ], + "prepared_review_unit_hash": "bf7ca8419556ccd38580200633b6e0a2b73bd09c7b1627c155c9fd2d1a2b70ff", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36213, + "estimated_input_tokens": 9054, + "estimated_eval_tokens": 18364 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: many unrelated model- or backend-specific issues, with one plausible duplicate-style link around Qwen video-input regressions. No PRs are present.", + "confidence": 0.41, + "canonical_issue_reason": "issue:44479 is the broadest and most central report here: it describes a v5.3.0 video-input regression across multiple Qwen VL variants, and it likely subsumes the narrower Qwen2.5-VL symptom in issue:45381.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44479 is the best representative issue because it states the regression clearly, spans multiple affected models, and matches the only plausible soft duplicate in the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "Different problems: token-count tracking vs perplexity quality on a specific model." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Both involve video paths, but one is a missing helper/undefined variable bug and the other is a batched-shape processor issue; not the same code-path bug." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "Different model families and failure modes: RTDetrV2 output drift vs Qwen3.5 save_pretrained key regression." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype, but they affect different components and symptoms; not clearly one underlying bug." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "Qwen3 auto device-map CUDA assert is unrelated to Qwen3OmniMoe video-input ValueError." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Both point to the same Qwen2.5-VL video-input regression; 45381 looks like a narrower symptom of the broader v5.3.0 issue in 44479." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "Unrelated topics: gguf_file loading path vs Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:44987", + "right": "issue:45538", + "accept": false, + "reason": "Loading a specific model regression is unrelated to CLIPTokenizer's model_max_length behavior." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "Missing TensorFlow in a torch extra is a packaging/backend issue, not the same as Python 3.14 load failure." + }, + { + "left": "issue:43317", + "right": "issue:45538", + "accept": false, + "reason": "Device-map/offload failure is unrelated to tokenizer max-length configuration." + }, + { + "left": "issue:43927", + "right": "issue:44912", + "accept": false, + "reason": "Config save/load token-id loss is unrelated to MXFP4 quantization fallback." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "Different loading/initialization regressions in different subsystems." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "MacOS bus error on CLIP model loading is unrelated to TrainerState token counting." + }, + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "An isolated reproduction of a prior issue is not the same as the Qwen2.5-VL video-position-id bug." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Both are video-related, but they hit different Qwen models and different error mechanisms." + }, + { + "left": "issue:43540", + "right": "issue:44479", + "accept": false, + "reason": "Same broad video area, but the failures differ: Qwen3OmniMoe video processing vs a multi-model Qwen VL regression." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "FineGrainedFP8Config failure is unrelated to a key error loading MobileLLM-125M." + }, + { + "left": "issue:42915", + "right": "issue:45538", + "accept": false, + "reason": "Unrelated model quantization/config failure vs tokenizer max-length behavior." + }, + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Packaging/backend inclusion issue is unrelated to missing Qwen2_5_VL export." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "FP8 config failure is unrelated to AutoImageProcessor-from-URL loading." + }, + { + "left": "issue:43526", + "right": "issue:45479", + "accept": false, + "reason": "Different bug classes: image-processor label reduction vs sequence-classification loss degeneracy." + }, + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "Tokenizer constructor argument conflict is not the same as a specific tokenizer load regression." + }, + { + "left": "issue:44737", + "right": "issue:45588", + "accept": false, + "reason": "XLNet CPU device placement bug is unrelated to flash_attention.py crashing on sink-less models." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "Both are load-related at a high level, but the titles indicate different root causes." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque error is unrelated to a PEFT version bump." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "Different image-model correctness issues with no clear shared root cause." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Custom model init breakage in v5 is unrelated to FSDP CPU RAM efficient loading." + }, + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Apple Silicon allocator TypeError is unrelated to Qwen3VL weight_only loading failure." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Generic PyTorch ImportError is unrelated to Llama 3.2 Vision model loading regression." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "Missing GenerationMixin export and missing TensorFlow backend are separate problems." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative overall. The one accepted soft edge (44479/45381) is well supported by the titles and scope, and the rest of the rejections avoid overclaiming. No PR-pair issue applies here since there are no PRs present." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 16, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:34567", + "issue:36331", + "issue:41762", + "issue:43452", + "issue:43493", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43653", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43756", + "issue:43819", + "issue:43994", + "issue:44062", + "issue:44291", + "issue:44484", + "issue:44488", + "issue:44514", + "issue:44561", + "issue:44568", + "issue:44589", + "issue:44625", + "issue:44704", + "issue:44792", + "issue:44811", + "issue:44821", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44936", + "issue:44964", + "issue:45042", + "issue:45081", + "issue:45216", + "issue:45290", + "issue:45325", + "issue:45375", + "issue:45381", + "issue:45405", + "issue:45440", + "issue:45600" + ], + "soft_pairs": [ + "issue:43643|issue:45375", + "issue:43756|issue:45325", + "issue:43742|issue:44291", + "issue:43493|issue:43994", + "issue:44484|issue:44704", + "issue:28282|issue:29127", + "issue:34567|issue:36331", + "issue:44062|issue:45081", + "issue:44291|issue:44821", + "issue:43452|issue:44488", + "issue:44792|issue:44964", + "issue:44514|issue:45290", + "issue:45405|issue:45600", + "issue:43643|issue:44561", + "issue:43688|issue:43749", + "issue:44514|issue:45381", + "issue:43697|issue:45216", + "issue:43452|issue:43577", + "issue:43742|issue:44589", + "issue:45042|issue:45081", + "issue:43531|issue:44821", + "issue:43688|issue:45440", + "issue:43452|issue:44843", + "issue:44625|issue:44877", + "issue:43653|issue:44568", + "issue:43720|issue:44898", + "issue:43819|issue:44811", + "issue:41762|issue:44936" + ], + "prepared_review_unit_hash": "c7cb16ae802ef39d553e72c2e43faf5a5abf4d3b16ca03a3b4bcb15780bfdf61", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35851, + "estimated_input_tokens": 8963, + "estimated_eval_tokens": 18182 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These candidate pairs mostly share only broad subsystem overlap (loading, tokenizers, configs, multimodal models) and do not appear to be the same concrete bug/change. I would not merge any of the soft edges as duplicates.", + "confidence": 0.89, + "canonical_issue_reason": "Issue 44561 is the strongest representative to anchor a cluster: it describes a broad, regression-style breakage affecting trust_remote_code models and has the clearest cross-cutting impact. That said, the overall set is heterogeneous, so there is no true duplicate hub here.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44561 is the best global issue candidate because it is the most broadly impactful and clearly framed regression among the set; the rest are narrower, model-specific, or unrelated.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config fields and trust_remote_code, but one is a generic AutoConfig missing-fields report while the other is a specific Qwen3.5MoeVisionConfig strict-field omission; different code paths." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Both mention RoPE, but they describe different models and different bugs: SmolLM3 layer dropping vs Qwen2.5-VL temporal position scaling." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "Both are model loading failures, but the symptoms and root causes differ: a KeyError on MobileLLM loading versus an unexpected _is_hf_initialized argument with init_empty_weights." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": false, + "reason": "Both are about SigLIP2, but one reports HF/JAX implementation divergence and the other reports nonsensical AutoModel/pipeline outputs; not enough evidence of the same bug." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "Unrelated API issues: save_pretrained max_shard_size behavior versus AutoProcessor not forwarding kwargs to cached_file." + }, + { + "left": "issue:28282", + "right": "issue:29127", + "accept": false, + "reason": "These are entirely different errors: missing PyTorch for AutoModel versus a LayoutLMv3 box-information error message." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "Different trainer issues: num_input_tokens_seen not updating versus compute_loss receiving an unexpected num_items_in_batch argument." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "Both involve tokenizers, but one is an AddedToken constructor conflict and the other is a backend_tokenizer AttributeError in _patch_mistral_regex." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "Different loading paths and failures: init_empty_weights/_is_hf_initialized versus loading an AutoImageProcessor from URL." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "One is about gguf_file breaking AutoTokenizer/AutoModelForCausalLM; the other is a specific model that no longer loads. Too vague to call the same bug." + }, + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Both are multimodal/model-loading problems, but they concern different models and likely different code paths." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": false, + "reason": "Both hit apply_chat_template, but one is a batched padding=False crash and the other is tool-call assistant messages with no content." + }, + { + "left": "issue:45405", + "right": "issue:45600", + "accept": false, + "reason": "Completely different concerns: unreleased PEFT version bump versus stale auto_mappings references breaking CI." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Both are config/loading regressions, but one is missing fields under trust_remote_code while the other is a removed helper breaking trust_remote_code models." + }, + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Different subsystems and failures: auxiliary-loss normalization in MoE models versus FSDP_CPU_RAM_EFFICIENT_LOADING breakage." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "Both involve Qwen2.5-VL vision/chat processing, but one is a batching/padding crash and the other is wrong vision_position_ids for video input." + }, + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Different model families and bugs: RTDetrV2 output changes versus Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "One concerns gguf/model loading, the other dtype propagation for Blip2/Qformer; no shared concrete bug." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "Both are load-time TypeErrors, but the failure signatures and mechanisms are unrelated." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "Different areas: PIL backend image processors requiring torchvision versus a Mistral tokenizer regex patch crash." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Unrelated: Qwen3-MoE sliding_window behavior versus loading AutoImageProcessor from URL." + }, + { + "left": "issue:43688", + "right": "issue:45440", + "accept": false, + "reason": "Both touch MoE-like models, but one is auxiliary-loss normalization and the other is DeepSeekV3 implementation divergence." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "gguf_file loading breakage is unrelated to the offline-mode model_info call in _patch_mistral_regex." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Both mention config handling, but one is num_labels propagation and the other is strict config blocking granite_speech loading." + }, + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer special-token issues, but they affect different tokenizers and different symptoms; not clearly the same defect." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "Different components and bug classes: BitNet packed-weight loading versus Perceiver positional-encoding interpolation." + }, + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "Different decoding/computation bugs: DAC.from_latents mismatch versus Whisper processor.batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:41762", + "right": "issue:44936", + "accept": false, + "reason": "These are unrelated training/loading issues: ZeRO-3 Gemma3 loading IndexError versus trainer.evaluate failing after train." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 17, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:24643", + "issue:28282", + "issue:29127", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:34567", + "issue:35141", + "issue:38175", + "issue:41720", + "issue:43122", + "issue:43262", + "issue:43295", + "issue:43329", + "issue:43452", + "issue:43504", + "issue:43525", + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43643", + "issue:43653", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43906", + "issue:43927", + "issue:44361", + "issue:44442", + "issue:44451", + "issue:44464", + "issue:44466", + "issue:44479", + "issue:44554", + "issue:44625", + "issue:44743", + "issue:44843", + "issue:44849", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45005", + "issue:45084", + "issue:45325", + "issue:45356", + "issue:45357", + "issue:45588" + ], + "soft_pairs": [ + "issue:43295|issue:45356", + "issue:44466|issue:45005", + "issue:44464|issue:44743", + "issue:24643|issue:29127", + "issue:41720|issue:43906", + "issue:43525|issue:43643", + "issue:43577|issue:43697", + "issue:38175|issue:43295", + "issue:44361|issue:44442", + "issue:44843|issue:44991", + "issue:43653|issue:44361", + "issue:44479|issue:44936", + "issue:43122|issue:43906", + "issue:44977|issue:45356", + "issue:28282|issue:32090", + "issue:28282|issue:33357", + "issue:28282|issue:35141", + "issue:43577|issue:45588", + "issue:30064|issue:33666", + "issue:43577|issue:44554", + "issue:43653|issue:43927", + "issue:44464|issue:44977", + "issue:43531|issue:43742", + "issue:43329|issue:45325", + "issue:44479|issue:44625", + "issue:43550|issue:44361", + "issue:43452|issue:44451", + "issue:43504|issue:43720", + "issue:41720|issue:44464", + "issue:43504|issue:43550", + "issue:43262|issue:45084", + "issue:44849|issue:45357", + "issue:32090|issue:34567", + "issue:32090|issue:33666" + ], + "prepared_review_unit_hash": "ee07b8b04ff05b25964f34d48053205140eb56dd808c22f20b3d171632451b96", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 38183, + "estimated_input_tokens": 9546, + "estimated_eval_tokens": 19348 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated tokenizer, trainer, multimodal, generation, loading, and serialization problems. None of the soft pairs look like the same concrete bug/change, so I would not merge any of them as duplicates and there is no clear canonical representative.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "Different regressions in different tokenizer/processor flows; same broad area, not the same bug." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": false, + "reason": "Both mention tied weights, but one is device-dependent lm_head serialization and the other is a broader translation-model v5 issue." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "One is chunked generation with compiled forward; the other is recurrent state reset with cache. Different code paths." + }, + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "Training/DeepSpeed weight-shape error vs layoutlmv3 box validation message. Unrelated bugs." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "Qwen3 auto device mapping crash vs an isolated reproduction of another issue; no evidence they are the same defect." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": false, + "reason": "Missing pad_token_id in Llama4Config vs missing fields from trust_remote_code AutoConfig. Similar symptom, different issue." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "Blip2 dtype propagation bug vs RTDetrV2 output mismatch. Different model families and failure modes." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "Siglip2 zero probabilities vs processor.tokenizer/image-handling regression. Not the same underlying bug." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "Tokenizer failures, but for different tokenizers and likely different loading paths." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-loading regressions, but one is offline model_info patching and the other is a specific model load failure." + }, + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "BigBird special-token decode issue vs MLukeTokenizer task AttributeError. Unrelated." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Video-input regression in Qwen VLMs vs Trainer evaluate-after-train failure. Different subsystems." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "Tokenization behavior change vs a reproduction of another issue. No shared concrete bug." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Flash-attention generation bug vs tokenizer codec/regex regression. Different components." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "PyTorch import error vs Trainer GPU broadcast NoneType error. Not duplicates." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "ImportError vs MacOS bus error. Completely different failures." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "Import-time PyTorch missing error vs tied-embedding reinitialization bug. Unrelated." + }, + { + "left": "issue:43577", + "right": "issue:45588", + "accept": false, + "reason": "Blip2 dtype issue vs flash_attention AttributeError on s_aux=None. Different code paths." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Void segmentation map processing vs Qwen2-VL multi-GPU training. Unrelated." + }, + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "Model dtype propagation vs MPS attention correctness issue. Not the same bug." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "BigBird mask-token decode issue vs DiaConfig custom token IDs lost on save/load. Different token/config bugs." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Compiled-forward generation inconsistency vs flash-attention generation failure. Similar symptom only, not same defect." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3-MoE sliding_window issue vs MobileLLM loading key error. Different areas." + }, + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Undefined helpers in multimodal video token counting vs rope-index scaling for still images. Different multimodal bugs." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "Qwen VL video regression vs num_labels propagation bug. Unrelated." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "torch.compile/SDPA model failure vs tokenizer AttributeError. Different code paths." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "gguf_file loading breakage vs a specific model-load failure. No evidence of same root cause." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Legacy-field preload bug vs packed-weight unpacking bug. Different loading mechanisms." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "CUDA assert on auto device mapping vs chunked generation inconsistency. Unrelated." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Pretrained-model legacy field issue vs compile/SDPA failure. Different subsystems." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "Audio chat template sampling-rate default vs template compilation TypeError. Not the same bug." + }, + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "Qwen3.5 hidden-states bug vs incorrect visual encoder keys on save_pretrained. Different regressions." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "GPU broadcast NoneType error vs TrainerState token counter not updating. Different issues." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "Trainer broadcast error vs Qwen2-VL multi-GPU training request. Unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 18, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:33666", + "issue:36010", + "issue:36331", + "issue:39692", + "issue:43257", + "issue:43278", + "issue:43295", + "issue:43299", + "issue:43334", + "issue:43493", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43572", + "issue:43606", + "issue:43646", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43742", + "issue:43746", + "issue:43756", + "issue:43824", + "issue:43825", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:44060", + "issue:44361", + "issue:44423", + "issue:44466", + "issue:44488", + "issue:44521", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44734", + "issue:44821", + "issue:44843", + "issue:44913", + "issue:44964", + "issue:44977", + "issue:45200", + "issue:45229", + "issue:45290", + "issue:45405", + "issue:45520", + "issue:45584" + ], + "soft_pairs": [ + "issue:43504|issue:44610", + "issue:43278|issue:43688", + "issue:43334|issue:43572", + "issue:43525|issue:43572", + "issue:43756|issue:44913", + "issue:43540|issue:43716", + "issue:43493|issue:43825", + "issue:43742|issue:44821", + "issue:45200|issue:45229", + "issue:33666|issue:36010", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:43688|issue:43873", + "issue:43257|issue:43299", + "issue:43278|issue:43844", + "issue:43540|issue:44977", + "issue:44488|issue:44843", + "issue:43720|issue:44361", + "issue:39692|issue:43493", + "issue:44964|issue:45405", + "issue:43493|issue:43844", + "issue:33666|issue:36331", + "issue:43756|issue:44060", + "issue:44589|issue:45520", + "issue:44625|issue:45584", + "issue:44521|issue:45290", + "issue:43646|issue:44466", + "issue:44423|issue:44734", + "issue:43824|issue:44589", + "issue:43688|issue:43746", + "issue:39692|issue:43295" + ], + "prepared_review_unit_hash": "76411339043bb094699a04b3fa22b9fd48ef5bb9bbbe73c6dd42e3c6c61d1cfa", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37136, + "estimated_input_tokens": 9284, + "estimated_eval_tokens": 18824 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: most items are separate model-specific bugs, regressions, or docs issues rather than duplicates. The soft-similarity pairs mostly share broad themes like loading, config fields, multimodal processing, or v5 regressions, but not the same concrete code-path failure.", + "confidence": 0.19, + "canonical_issue_reason": "issue:43873 is the most broadly scoped and active item in the set, with multiple inbound references and a general offloading/quantization theme that is closest to the few related loading/runtime issues here.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43873 is the best representative issue because it is broader than the highly model-specific reports and sits near the main runtime/offload cluster, even though the overall set does not form a tight duplicate group.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "Different models and failures: legacy field loading vs processor/model input-size mismatch." + }, + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "One is a dtype regression in evaluation; the other is auxiliary-loss normalization. Same training stack, different bug." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Both are missing config attributes, but on different models and different fields, so not the same bug." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Same broad symptom class, but different config attributes and model families." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Both involve config persistence, but one is RoPE layer drops and the other is rotary_pct reload; different code paths." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "Different modalities and root causes: video processing error vs image preprocessor/model dtype mismatch." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "SigLIP2 implementation discrepancy is unrelated to a pipeline error-message regression." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "Both are loading-related, but one is a model key error and the other is URL image-processor loading." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "Same Gemma4 family, but one is a training label/mask config issue and the other is multi-GPU inference OOM." + }, + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Multi-GPU training and a GenerationMixin import failure are unrelated." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "Packed-weight unpacking during accelerate loading is unrelated to DiaConfig token-ID persistence." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU offload device mismatch and config token-ID loss are different failure modes." + }, + { + "left": "issue:43688", + "right": "issue:43873", + "accept": false, + "reason": "Aux-loss normalization and quantized offloading are separate bugs." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3 MOE, but one is weight conversion during accelerated loading and the other is a dev-regression loading failure." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "Evaluation dtype change and ZeRO-3 gradient growth are different issues." + }, + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Different Qwen variants and different symptoms: video input processing vs flash-attention generation behavior." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Model load failure and offline-network behavior in tokenizer patching are not the same bug." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "Packed-weight loading issue vs tokenizer AttributeError on tasks; no shared code path." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "Docs example errors are distinct from a backend implementation discrepancy." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "Model-loading regression is unrelated to the PEFT version bump issue." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 accuracy/discrepancy bug and ZeRO-3 gradient amplification are unrelated." + }, + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Multi-GPU training and CustomTrainer signature breakage are different problems." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "RoPE-layer drop and tied-weights warning are distinct model-specific issues." + }, + { + "left": "issue:44589", + "right": "issue:45520", + "accept": false, + "reason": "Different runtime failures: missing Float8 storage type vs flash_attn import key error." + }, + { + "left": "issue:44625", + "right": "issue:45584", + "accept": false, + "reason": "Qwen3.5 num_labels propagation and Whisper empty-transcription generation are unrelated." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both hit apply_chat_template, but one is assistant-mask generation and the other is tool-call handling; different bugs." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Custom model initialization regressions and lm_head serialization inconsistency are different v5 issues." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Both are serving crashes, but one is multimodal tensor handling and the other is KV-cache continuation indexing." + }, + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "ImportError for a specific model class is unrelated to a Float8 storage lookup failure." + }, + { + "left": "issue:43688", + "right": "issue:43746", + "accept": false, + "reason": "Aux-loss normalization and PEFT adapter checkpoint loading are unrelated." + }, + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "Docs example mistakes do not match a regression in custom model code passing images to tokenizer." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 19, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:15354", + "issue:29942", + "issue:30333", + "issue:33357", + "issue:33666", + "issue:37428", + "issue:38175", + "issue:39401", + "issue:43232", + "issue:43278", + "issue:43295", + "issue:43334", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43572", + "issue:43575", + "issue:43582", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43844", + "issue:43927", + "issue:43957", + "issue:43994", + "issue:44112", + "issue:44291", + "issue:44315", + "issue:44361", + "issue:44464", + "issue:44493", + "issue:44554", + "issue:44568", + "issue:44610", + "issue:44792", + "issue:44849", + "issue:44964", + "issue:44977", + "issue:45072", + "issue:45084", + "issue:45198", + "issue:45200", + "issue:45325", + "issue:45356", + "issue:45405", + "issue:45446", + "issue:45520", + "issue:45584" + ], + "soft_pairs": [ + "issue:33357|issue:33666", + "issue:43572|issue:44291", + "issue:43994|issue:44493", + "issue:43504|issue:43606", + "issue:44464|issue:45584", + "issue:43504|issue:43653", + "issue:43927|issue:44361", + "issue:43575|issue:45405", + "issue:43278|issue:43701", + "issue:44792|issue:45405", + "issue:43606|issue:44112", + "issue:29942|issue:30333", + "issue:15354|issue:38175", + "issue:44554|issue:45446", + "issue:43540|issue:45200", + "issue:43232|issue:44849", + "issue:45084|issue:45520", + "issue:43844|issue:44964", + "issue:39401|issue:44568", + "issue:43582|issue:44977", + "issue:45072|issue:45198", + "issue:44315|issue:44849", + "issue:37428|issue:43957", + "issue:44554|issue:45356", + "issue:43334|issue:43525", + "issue:43295|issue:44610", + "issue:45200|issue:45325" + ], + "prepared_review_unit_hash": "845590747411e7465b7b63f31889a8c9bd58757bd031ebabc45832c9547d6fcd", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 27, + "serialized_chars": 35860, + "estimated_input_tokens": 8965, + "estimated_eval_tokens": 18186 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated Transformers bugs across loading, tokenization, generation, dtype/device handling, and CI failures. I found one likely duplicate family around missing `pad_token_id` in config/load paths; the rest do not look mergeable as the same underlying bug.", + "confidence": 0.41, + "canonical_issue_reason": "Issue 43957 is the broadest and most representative loading-regression report in the set, centered on model instantiation with `torch.device('meta')` and a class of failures that affects multiple models rather than a single model-specific edge case.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43957 is the best overall issue candidate because it describes a general framework-level regression with wider applicability than the more model-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "Both are model-related failures, but one is a MacOS bus error for CLIP and the other is a Qwen2-VL multi-GPU training issue; different symptoms and code paths." + }, + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "Both involve load/init regressions, but one is about `pad_token_idx` in `StableLmConfig` and the other about an unexpected `_is_hf_initialized` argument in `init_empty_weights`; different underlying bugs." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "One is a SigLIP2 inference quality problem, the other is a position-id key warning across many models; too different to be the same bug." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "A legacy-field pretrained-model load failure is not the same as a CPU offload device mismatch in `bark-small`." + }, + { + "left": "issue:44464", + "right": "issue:45584", + "accept": false, + "reason": "Chunked generation with compiled forward and empty-transcription Whisper generation after token alignment are unrelated generation bugs." + }, + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Legacy-field loading for BEiT semantic segmentation and BigBirdTokenizer special-token registration are different failure modes." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "DiaConfig save/load token-id corruption causing generation errors is not the same as MLukeTokenizer attribute failures on tasks." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "TP OOM on a large Qwen2 model and an unreleased PEFT version bump are unrelated." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "BF16-to-FP32 embedding dtype drift in evaluate is unrelated to resume checkpoint key mismatches." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "A Janus image-generation test failure does not match a dependency-version issue." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Both mention device handling, but one is a real CPU offload mismatch and the other is a stale CI test in GraniteSpeech; not the same concrete bug." + }, + { + "left": "issue:29942", + "right": "issue:30333", + "accept": false, + "reason": "Flash Attention 2 test failures and MLFlow reporting on failed training jobs are unrelated." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "TorchScript export support for generator expressions and SigLIP2 zero-probability outputs are unrelated model issues." + }, + { + "left": "issue:44554", + "right": "issue:45446", + "accept": false, + "reason": "An MPS attention correctness issue is not the same as a PyTorch version-check bug in flex_attention imports." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Both are multimodal-input problems, but one is video processing in Qwen3OmniMoe and the other is Gemma 4 text-only fine-tuning token-type defaults; different bugs." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "Generation kwargs syncing after `sync_gpus` and Qwen3.5 `output_hidden_states` behavior are separate generation-path issues." + }, + { + "left": "issue:45084", + "right": "issue:45520", + "accept": false, + "reason": "Template-node compilation errors and Python 3.13 `flash_attn` import mapping errors are not the same bug." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "ZeRO-3 gradient growth during training and inability to load Phi-4 multimodal are unrelated." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Wrong offset mapping in Qwen3 tokenizer and missing BOS/EOS insertion in mDeBERTa are different tokenizer behaviors." + }, + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "Apple Silicon allocator warmup TypeError and Qwen3.5 flash-attention generation problems are unrelated." + }, + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "bfloat16 inference dtype mismatches in SwitchTransformers/TimmWrapperModel are not the same as Wav2Vec2 save/tokenization failures." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Liger Kernel application during `model_init` and Qwen3.5 hidden-state output behavior are different concerns." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "A missing flash-attention utility import and meta-device model loading failures are both loading-related, but they are distinct code paths and failure causes." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "MPS attention correctness and Kimi-K2.5 tokenizer codec regression are unrelated." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": true, + "reason": "Both report `AttributeError` for missing `pad_token_id` on model configs during loading, with the same underlying config-attribute regression pattern." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Processor/tokenizer API regression and OmDet-Turbo processor output-size mismatch are different multimodal issues." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Gemma 4 token-type-id defaults and Qwen2.5-VL rope-index scaling are different multimodal modeling bugs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is grounded in the packet, and the only accepted soft-edge pair (issue:43334 / issue:43525) is a conservative duplicate call on the same missing `pad_token_id` config-attribute regression. The other rejections stay within the evidence and do not overstate sameness across clearly different bug classes." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 20, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:37428", + "issue:38175", + "issue:42915", + "issue:43122", + "issue:43278", + "issue:43295", + "issue:43388", + "issue:43450", + "issue:43493", + "issue:43526", + "issue:43572", + "issue:43653", + "issue:43701", + "issue:43819", + "issue:43824", + "issue:43827", + "issue:43844", + "issue:43856", + "issue:43873", + "issue:43901", + "issue:43975", + "issue:43994", + "issue:44112", + "issue:44206", + "issue:44360", + "issue:44442", + "issue:44464", + "issue:44466", + "issue:44514", + "issue:44521", + "issue:44561", + "issue:44568", + "issue:44589", + "issue:44792", + "issue:44849", + "issue:44898", + "issue:44936", + "issue:44987", + "issue:44991", + "issue:45083", + "issue:45127", + "issue:45198", + "issue:45245", + "issue:45290", + "issue:45381", + "issue:45412", + "issue:45563", + "issue:45584" + ], + "soft_pairs": [ + "issue:44466|issue:45127", + "issue:44898|issue:45412", + "issue:43493|issue:43701", + "issue:43873|issue:44464", + "issue:44792|issue:45563", + "issue:44514|issue:44521", + "issue:37428|issue:43824", + "issue:43388|issue:43526", + "issue:43572|issue:44987", + "issue:43844|issue:44936", + "issue:43572|issue:44589", + "issue:43824|issue:43975", + "issue:43295|issue:43450", + "issue:43295|issue:43873", + "issue:44442|issue:45198", + "issue:38175|issue:45412", + "issue:44360|issue:44561", + "issue:43295|issue:43994", + "issue:43122|issue:44849", + "issue:42915|issue:43824", + "issue:44206|issue:45584", + "issue:43278|issue:43856", + "issue:38175|issue:43493", + "issue:43827|issue:43901", + "issue:44568|issue:44991", + "issue:43653|issue:44112", + "issue:43450|issue:43493", + "issue:45290|issue:45381", + "issue:45127|issue:45245", + "issue:43819|issue:45083" + ], + "prepared_review_unit_hash": "ab5fea0c763f497cc2acd3e576eaf1bfb984d2ebd35c6de0b72f5ebcee5097cf", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36759, + "estimated_input_tokens": 9190, + "estimated_eval_tokens": 18636 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most pairs are loose semantic matches, not true duplicates. The only near-family is around multimodal/chat-template regressions, but the concrete failure modes still differ, so I would not merge any of the soft edges.", + "confidence": 0.26, + "canonical_issue_reason": "Issue 44521 is the broadest and most central open report in the multimodal `apply_chat_template` area, with multiple inbound references; it is the best single representative if one issue must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44521 has the clearest user-facing regression, the widest scope within the closest issue family, and active references, so it is the strongest global issue to keep as the representative node.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "Different bugs: serialization/tied-weights handling vs LoRA merge collapse after vocab extension." + }, + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Unrelated subsystems: Perceiver image resolution handling vs RT-DETR model memory release." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "SigLIP2 implementation mismatch and a checkpoint key mismatch are distinct failures." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization/offloading behavior and compiled chunked generation inconsistency are different code paths." + }, + { + "left": "issue:44792", + "right": "issue:45563", + "accept": false, + "reason": "Janus image-generation test failure is not the same as a paged-generate warning regression." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both involve `apply_chat_template`, but one crashes on batched padding=False input while the other produces wrong assistant masks." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but they expose different missing symbols and different model entry points." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Different label-handling bugs in training metrics vs image processor label reduction." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "A missing config field after v5 update is not the same as a model load failure for a specific repo." + }, + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "Training instability with ZeRO-3 and post-train evaluate failure are unrelated symptoms." + }, + { + "left": "issue:43572", + "right": "issue:44589", + "accept": false, + "reason": "Missing `pad_token_idx` config and Float8 storage lookup errors are unrelated regressions." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Different breakages: missing model export vs incorrect detokenization behavior." + }, + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Processor/tokenizer regression and batched video shape bug are separate processor issues." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "Custom processor API regression is not the same as quantization offloading behavior." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "Tokenizer load failure and save_pretrained/tokenization failure are different concrete problems." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 zero probabilities and RT-DETR memory retention are unrelated model bugs." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "A discussion about DSA indexing math is not the same as a removed utility breaking trust_remote_code models." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Processor/tokenizer regression vs SigLIP2 nonsensical outputs are different bugs." + }, + { + "left": "issue:43122", + "right": "issue:44849", + "accept": false, + "reason": "Tokenization drift across versions and Qwen3.5 hidden-state bug are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "Qwen3 MoE FP8 config failure is unrelated to a missing Qwen2.5-VL import export." + }, + { + "left": "issue:44206", + "right": "issue:45584", + "accept": false, + "reason": "Unsupported feature-arg crash and Whisper empty-transcription generation failure are different paths." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Dtype drift between train/eval and MoE memory usage are not the same bug." + }, + { + "left": "issue:38175", + "right": "issue:43493", + "accept": false, + "reason": "Both are SigLIP2-related but one is incorrect probabilities and the other is HF-vs-JAX discrepancy." + }, + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "Same general docs cleanup theme, but they target different pages and deprecated behaviors." + }, + { + "left": "issue:44568", + "right": "issue:44991", + "accept": false, + "reason": "Tokenizer special-token behavior and a specific tokenizer load failure are different issues." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "BigBird tokenizer special-token registration and a GraniteSpeech CI test are unrelated." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "Video batch shape handling and SigLIP2 output discrepancy are different subsystems and failures." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Chat-template tool-call crash and Qwen2.5-VL vision_position_ids bug are different multimodal regressions." + }, + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "LoRA/tied-weights collapse is unrelated to the category-count runtime error." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "DAC latent reconstruction mismatch and qwen3_omni_moe feature-length helper behavior are not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 21, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:32090", + "issue:37428", + "issue:38175", + "issue:39692", + "issue:43262", + "issue:43278", + "issue:43335", + "issue:43425", + "issue:43450", + "issue:43504", + "issue:43526", + "issue:43550", + "issue:43575", + "issue:43606", + "issue:43611", + "issue:43653", + "issue:43723", + "issue:43819", + "issue:43825", + "issue:43931", + "issue:44112", + "issue:44292", + "issue:44351", + "issue:44448", + "issue:44462", + "issue:44493", + "issue:44554", + "issue:44561", + "issue:44568", + "issue:44610", + "issue:44743", + "issue:44779", + "issue:44855", + "issue:44857", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45200", + "issue:45229", + "issue:45310", + "issue:45381", + "issue:45478", + "issue:45491", + "issue:45520", + "issue:45588", + "issue:45593" + ], + "soft_pairs": [ + "issue:44462|issue:44561", + "issue:44561|issue:44779", + "issue:43450|issue:43575", + "issue:45200|issue:45588", + "issue:44610|issue:44857", + "issue:37428|issue:43425", + "issue:44857|issue:45593", + "issue:43262|issue:43450", + "issue:44448|issue:44743", + "issue:43819|issue:44112", + "issue:43606|issue:45083", + "issue:45310|issue:45478", + "issue:43278|issue:43825", + "issue:29942|issue:32090", + "issue:37428|issue:44351", + "issue:39692|issue:43526", + "issue:43611|issue:43931", + "issue:43335|issue:43550", + "issue:43819|issue:44493", + "issue:45200|issue:45381", + "issue:43450|issue:43526", + "issue:39692|issue:44493", + "issue:44855|issue:45083", + "issue:44448|issue:44779", + "issue:29127|issue:32090", + "issue:43335|issue:43653", + "issue:38175|issue:44493", + "issue:44977|issue:45229", + "issue:44292|issue:45520", + "issue:43335|issue:45083", + "issue:43335|issue:43504", + "issue:43550|issue:44112", + "issue:44568|issue:45491", + "issue:43335|issue:45072", + "issue:43723|issue:44554" + ], + "prepared_review_unit_hash": "8e277924137ff59273149621282805002ad76bffcb7fe962568ea6d05abcbc21", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38242, + "estimated_input_tokens": 9561, + "estimated_eval_tokens": 19378 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a grab-bag of unrelated Transformers bug reports. The only likely duplicate pair is the Qwen3.5 MoE from_pretrained report (45310/45478), which has the same title and clearly the same failure class; the earlier report is the better canonical item.", + "confidence": 0.92, + "canonical_issue_reason": "issue:45310 is the best canonical issue: it is the earlier Qwen3.5 MoE from_pretrained report, has inbound reference activity, and issue:45478 appears to be a near-identical later duplicate based on the matching title and failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45310 is the strongest representative because it is the earlier report of the likely duplicate pair and captures the core Qwen3.5 MoE from_pretrained regression.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "Tokenizer.json loading behavior and a removed helper breaking trust_remote_code are different bugs." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 regressions, but one is an API removal and the other is a tokenizer correctness issue." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Batched video shape bug and Qwen2 TP load OOM are unrelated failures." + }, + { + "left": "issue:45200", + "right": "issue:45588", + "accept": false, + "reason": "Gemma4 mm_token_type_ids defaults and flash_attention s_aux handling are different code paths." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "OmDet-Turbo input-size mismatch is unrelated to the LwDetrImageLoss AMP/CUDA crash." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Flash-attention import error and general Torch 2.10 incompatibility are too broad and distinct." + }, + { + "left": "issue:44857", + "right": "issue:45593", + "accept": false, + "reason": "Different symptoms in different models: AMP/CUDA loss crash versus missing auxiliary losses." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Audio chat-template sampling-rate default and video batching shape bug are separate issues." + }, + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Pegasus output drift and Qwen3.5 recurrent-state reset are unrelated model behaviors." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "DAC latent mismatch and a GraniteSpeech CI device-override failure are not the same bug." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "CPU offload device mismatch and qwen3_omni_moe helper behavior are different problems." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Near-identical Qwen3.5 MoE from_pretrained reports with the same title and failure mode; 45310 is earlier and the better duplicate target." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "Embedding dtype drift in evaluate and an inaccurate pipeline error message are different regressions." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Flash Attention 2 test failures are not the same as a Trainer NoneType broadcast error." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import-related, but they concern different missing symbols and different regressions." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "SigLIP2 docs/example errors and BEiT reduce_labels behavior are unrelated." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "Broad loading regression and a Qwen3-VL weight-shape mismatch are not the same issue." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "SwitchTransformers sparse-layer config bug and Bamba torch.compile SDPA failure are different bugs." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "DAC latent reconstruction and unexpected position-id keys are unrelated." + }, + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Missing mm_token_type_ids defaults and wrong vision_position_ids for video input are separate multimodal bugs." + }, + { + "left": "issue:43450", + "right": "issue:43526", + "accept": false, + "reason": "Video batching shape bug and BEiT label reduction bug affect different processor logic." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 docs/example issues and widespread position-id key warnings are different regression families." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "Python 3.13 import failure and qwen3_omni_moe feature-length behavior are unrelated." + }, + { + "left": "issue:44448", + "right": "issue:44779", + "accept": false, + "reason": "Pegasus output differences and Deepseek tokenizer regression are different model families and symptoms." + }, + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and Trainer GPU broadcast NoneType failure are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "SwitchTransformers sparse-layer bug and BigBirdTokenizer special-token registration bug are separate issues." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 zero-probabilities and unexpected position-id keys are not the same bug." + }, + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Qwen3.5 flash-attention generation bug and Gemma4 CUDA OOM are distinct failures." + }, + { + "left": "issue:44292", + "right": "issue:45520", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime error and flash_attn import KeyError on Python 3.13 are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:45083", + "accept": false, + "reason": "SwitchTransformers config handling and qwen3_omni_moe helper behavior are different code paths." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "SwitchTransformers sparse-layer bug and BEiT pretrained-loading legacy-field bug are unrelated." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "Bamba SDPA failure and GraniteSpeech CI override failure are different problems." + }, + { + "left": "issue:44568", + "right": "issue:45491", + "accept": false, + "reason": "Tokenizer BOS/EOS handling and Gemma3 mixed-length NaN embeddings are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "SwitchTransformers config edge case and bfloat16 dtype mismatch in inference are not the same issue." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "AutoTokenizer loading in v5 and an MPS attention correctness bug are unrelated." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The cluster-level conclusion is mostly plausible, but the justification overstates the packet evidence. The 45310/45478 pair is supported as a likely duplicate because the titles are identical and 45310 is earlier with inbound references, but the response claims the two reports match \"exactly\" in problem description/body content, which is not visible in the packet. Please tone that down to a conservative \"near-identical / likely duplicate\" assessment. The other soft-edge rejections look appropriately conservative." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 22, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:25251", + "issue:29942", + "issue:32090", + "issue:33357", + "issue:34689", + "issue:35141", + "issue:37428", + "issue:40444", + "issue:41720", + "issue:43065", + "issue:43066", + "issue:43278", + "issue:43329", + "issue:43335", + "issue:43412", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43720", + "issue:43873", + "issue:43976", + "issue:43994", + "issue:44112", + "issue:44448", + "issue:44451", + "issue:44462", + "issue:44466", + "issue:44493", + "issue:44743", + "issue:44857", + "issue:44977", + "issue:44987", + "issue:45072", + "issue:45083", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45406", + "issue:45412", + "issue:45440", + "issue:45459", + "issue:45520", + "issue:45563", + "issue:45588", + "issue:45593" + ], + "soft_pairs": [ + "issue:40444|issue:43526", + "issue:44448|issue:44977", + "issue:43335|issue:45593", + "issue:43720|issue:44112", + "issue:43066|issue:44462", + "issue:45372|issue:45588", + "issue:34689|issue:35141", + "issue:44977|issue:45563", + "issue:44857|issue:45072", + "issue:43606|issue:44857", + "issue:43412|issue:45588", + "issue:43873|issue:45229", + "issue:37428|issue:44987", + "issue:43873|issue:44493", + "issue:45372|issue:45406", + "issue:43701|issue:45083", + "issue:29942|issue:35141", + "issue:43976|issue:45440", + "issue:45245|issue:45563", + "issue:43550|issue:45083", + "issue:41720|issue:45229", + "issue:43278|issue:43329", + "issue:32090|issue:33357", + "issue:45372|issue:45459", + "issue:44448|issue:44466", + "issue:44451|issue:45520", + "issue:43065|issue:43994", + "issue:43065|issue:45412", + "issue:25251|issue:32090", + "issue:43653|issue:44743", + "issue:44857|issue:45588" + ], + "prepared_review_unit_hash": "e5c71020f8f662a85e731491b8c89e5900e3beb63b8605e0b0e60620bc6f9f50", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 31, + "serialized_chars": 36497, + "estimated_input_tokens": 9125, + "estimated_eval_tokens": 18506 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a true duplicate cluster. The soft pairs mostly share only broad subsystem similarity (tokenizers, flash-attn, offloading, vision models, etc.) but describe different concrete bugs or regressions.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out: the set spans many unrelated bugs, so a representative issue would be misleading.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue candidate; none of the issues cleanly represents the others as the same underlying defect.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Both involve image processing, but one is a multimodal IterableDataset training failure and the other is a BEiT fast processor label-reduction bug." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Different models and symptoms: Pegasus v4/v5 output mismatch vs Qwen3.5 flash-attention generation failure." + }, + { + "left": "issue:43335", + "right": "issue:45593", + "accept": false, + "reason": "Unrelated model bugs: SwitchTransformers sparse-layer config creation vs D-FINE auxiliary-loss handling." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "Both are CI failures, but the root causes differ: packed-weight loading vs a stale device-override test." + }, + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Both concern tokenizers, but one is decoder-type selection in v5 and the other is ignoring repository tokenizer.json." + }, + { + "left": "issue:45372", + "right": "issue:45588", + "accept": false, + "reason": "Different Gemma/processor import issue versus a flash_attention.py None-handling crash." + }, + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Both are regressions, but one is model loading for Llama 3.2 Vision and the other is embedding reinitialization after resizing." + }, + { + "left": "issue:44977", + "right": "issue:45563", + "accept": false, + "reason": "Qwen3.5 flash-attention generation bug is unrelated to the stale num_return_sequences warning in paged generate()." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "Different dtype-related failures in different models/code paths: LwDetr AMP crash vs SwitchTransformers/TimmWrapper bfloat16 mismatch." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "One is a CPU-offload device mismatch in bark-small; the other is a float16 AMP crash in LwDetrImageLoss." + }, + { + "left": "issue:43412", + "right": "issue:45588", + "accept": false, + "reason": "ExecuTorch export issues for Gemma3n are unrelated to flash_attention.py crashing on s_aux=None." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Both involve memory/device constraints, but one is quantization offloading and the other is multi-GPU CUDA OOM for Gemma4." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "Both mention loading/import paths, but the first is a missing flash-attention helper import and the second is a broader loading failure for a model." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Offloading/quantization behavior is unrelated to unexpected position-id keys during model loading." + }, + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Different Gemma4 failures: missing ReasoningEffort import vs transformers serve crashing on Gemma4Processor._tokenizer." + }, + { + "left": "issue:43701", + "right": "issue:45083", + "accept": false, + "reason": "Resume-from-checkpoint key mismatch is unrelated to the qwen3_omni_moe feat-length helper bug." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "Failing Flash Attention tests and embedding reinitialization after resizing are separate regressions." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Python version compatibility problem vs DeepseekV3MoE implementation divergence; different defects." + }, + { + "left": "issue:45245", + "right": "issue:45563", + "accept": false, + "reason": "Category-count overflow and a stale generate warning are not the same bug." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba is unrelated to the qwen3_omni_moe helper-function issue." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Qwen3 auto-device-map cuda assert and Gemma4 multi-GPU OOM are different model/runtime problems." + }, + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Embedding dtype drift in evaluate is unrelated to the multimodal token-count helper using undefined video-branch variables." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Trainer broadcast NoneType error and MacOS bus error with CLIP are distinct failures." + }, + { + "left": "issue:45372", + "right": "issue:45459", + "accept": false, + "reason": "Gemma4 processor import failure is unrelated to tokenizer errors being hidden by except import_protobuf_decode_error()." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Pegasus output regression and lm_head serialization inconsistency are separate v4/v5 issues." + }, + { + "left": "issue:44451", + "right": "issue:45520", + "accept": false, + "reason": "Loading a specific model and a Python 3.13 flash_attn import KeyError are unrelated." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and SigLIP2 nonsensical AutoModel/pipeline output are different bugs." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "Sam3PixelDecoder dummy layer issue is unrelated to RT-DETR memory not being released on deletion." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Pipeline top_k nesting bug and Trainer _gpu_broadcast_one NoneType error are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "BigBirdTokenizer special-token registration issue and recurrent-state reset in qwen3_5 are different defects." + }, + { + "left": "issue:44857", + "right": "issue:45588", + "accept": false, + "reason": "Float16 AMP crash in LwDetrImageLoss and a flash_attention None-aux crash do not share the same code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 23, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:30064", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:35141", + "issue:36010", + "issue:39692", + "issue:42915", + "issue:42994", + "issue:43257", + "issue:43262", + "issue:43278", + "issue:43334", + "issue:43381", + "issue:43421", + "issue:43526", + "issue:43531", + "issue:43653", + "issue:43701", + "issue:43756", + "issue:43761", + "issue:43824", + "issue:43994", + "issue:44079", + "issue:44186", + "issue:44206", + "issue:44265", + "issue:44279", + "issue:44361", + "issue:44442", + "issue:44464", + "issue:44479", + "issue:44610", + "issue:44871", + "issue:44977", + "issue:45072", + "issue:45103", + "issue:45198", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45412", + "issue:45440", + "issue:45491", + "issue:45593" + ], + "soft_pairs": [ + "issue:44186|issue:45198", + "issue:43421|issue:45491", + "issue:43278|issue:43381", + "issue:44265|issue:45198", + "issue:43994|issue:45412", + "issue:44361|issue:45198", + "issue:42915|issue:42994", + "issue:44206|issue:44479", + "issue:44871|issue:44977", + "issue:43257|issue:43278", + "issue:43756|issue:45440", + "issue:44610|issue:45412", + "issue:29942|issue:30064", + "issue:45103|issue:45372", + "issue:43824|issue:44279", + "issue:33357|issue:39692", + "issue:45440|issue:45593", + "issue:29127|issue:34567", + "issue:32090|issue:33290", + "issue:45072|issue:45229", + "issue:43653|issue:44442", + "issue:43381|issue:43701", + "issue:43526|issue:44186", + "issue:43334|issue:43531", + "issue:44464|issue:45491", + "issue:44977|issue:45245", + "issue:29942|issue:36010", + "issue:43262|issue:44206", + "issue:29127|issue:33357", + "issue:29127|issue:35141", + "issue:43761|issue:44079" + ], + "prepared_review_unit_hash": "5c33c4e9035d63135b213318a47a68cd8a5c470de04573f5ceefbdc48f80418a", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37069, + "estimated_input_tokens": 9268, + "estimated_eval_tokens": 18792 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is noisy and largely heterogeneous: the soft pairs span unrelated bugs across tokenizers, model loading, training, export, quantization, vision/audio processing, and docs. I don\u2019t see any true duplicate pair here, so there is no single canonical artifact to merge around.", + "confidence": 0.98, + "canonical_issue_reason": "No clear canonical issue: the items are thematically broad but do not describe the same underlying bug, so there is no safe duplicate target.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue cleanly represents the cluster as a duplicate hub; the set is too heterogeneous to nominate a meaningful global best issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "Different tokenizer/model families and different failure modes; not the same bug." + }, + { + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "Special-token post-processor updates are unrelated to Gemma3 sliding-window NaN embeddings." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "Training/eval dtype mismatch is unrelated to gradient-checkpointing being disallowed in eval mode." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "torch.export / torch_compilable_check failure is unrelated to Wav2Vec2 save/tokenization issues." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 inference correctness and RT-DETR memory release are different problems." + }, + { + "left": "issue:44361", + "right": "issue:45198", + "accept": false, + "reason": "MLukeTokenizer task AttributeError is not the same as Wav2Vec2 save_pretrained/tokenization failure." + }, + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Both touch quantization, but one is FP8 config failure and the other is quantized-model saving." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Feature extractor center-arg regression and Qwen video-input regression are unrelated." + }, + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "EOS token config inconsistency is not the same as flash-attention generation failure." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "Qwen3 MoE loading/conversion with accelerate+deepspeed is unrelated to eval dtype changes." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Different model families and different implementation mismatches; not one concrete bug." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor image-size mismatch is unrelated to a model memory leak on deletion." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention 2 test failures and void segmentation-map processing are different subsystems." + }, + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "Auto-docstring future-annotations crash is unrelated to Gemma 4 processor dependency import failure." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "Specific Qwen2.5-VL import error is not the same as a generic dependency issue." + }, + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "MacOS CLIP bus error and SigLIP2 documentation mistakes are unrelated." + }, + { + "left": "issue:45440", + "right": "issue:45593", + "accept": false, + "reason": "DeepSeekV3 implementation divergence is unrelated to D-FINE auxiliary-loss behavior." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 error messaging and TrainerState token counters are unrelated." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "Trainer broadcast NoneType error and Adafactor+Deepspeed OOM are different runtime issues." + }, + { + "left": "issue:45072", + "right": "issue:45229", + "accept": false, + "reason": "BFloat16 dtype mismatch is not the same as multi-GPU CUDA OOM." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "BigBird special-token registration and AutoTokenizer loading failures are different tokenizer bugs." + }, + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Eval-mode checkpointing restriction is unrelated to resume checkpoint key mismatch." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BEiT label reduction and LayoutLMv2 tokenizer crashes are different code paths." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Qwen3-VL pad_token_id import/load issue is unrelated to Qwen3-MoE sliding_window behavior." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation and Gemma3 NaN embeddings are not the same bug." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention generation failure is unrelated to the 2^24 categories runtime error." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention 2 test breakage and GenerationMixin import error are distinct issues." + }, + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sample-rate defaulting is unrelated to a feature-extractor center argument crash." + }, + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "LayoutLMv3 box-validation messaging and CLIP bus error on MacOS are unrelated." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "LayoutLMv3 error clarity and output-embedding reinit after resize are different bugs." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression and ModelOutput key assignment are different layers of the stack." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 24, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:33290", + "issue:34567", + "issue:34689", + "issue:39692", + "issue:41628", + "issue:42915", + "issue:43232", + "issue:43316", + "issue:43388", + "issue:43441", + "issue:43452", + "issue:43493", + "issue:43525", + "issue:43526", + "issue:43575", + "issue:43611", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:43723", + "issue:43761", + "issue:43827", + "issue:43873", + "issue:43931", + "issue:43994", + "issue:44077", + "issue:44188", + "issue:44206", + "issue:44387", + "issue:44442", + "issue:44479", + "issue:44610", + "issue:44743", + "issue:44811", + "issue:44857", + "issue:44898", + "issue:44977", + "issue:45003", + "issue:45072", + "issue:45216", + "issue:45290", + "issue:45372", + "issue:45440", + "issue:45479", + "issue:45491", + "issue:45584", + "issue:45588", + "issue:45600" + ], + "soft_pairs": [ + "issue:42915|issue:43931", + "issue:43441|issue:45588", + "issue:43232|issue:43388", + "issue:44442|issue:45072", + "issue:43526|issue:44857", + "issue:43723|issue:45479", + "issue:44479|issue:45290", + "issue:43526|issue:44610", + "issue:45003|issue:45440", + "issue:44743|issue:44811", + "issue:43452|issue:45588", + "issue:43931|issue:45216", + "issue:44977|issue:45584", + "issue:45588|issue:45600", + "issue:43761|issue:44188", + "issue:43316|issue:43525", + "issue:43575|issue:43701", + "issue:43646|issue:44077", + "issue:43994|issue:44206", + "issue:39692|issue:45491", + "issue:43575|issue:44387", + "issue:43873|issue:45588", + "issue:43673|issue:44743", + "issue:33290|issue:34567", + "issue:44442|issue:44898", + "issue:34689|issue:43611", + "issue:41628|issue:45372", + "issue:43493|issue:43827" + ], + "prepared_review_unit_hash": "61f2a5d53bbb3014e6ae16299c3819484c6ee76fc21812ff5f2de4e0337608f9", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 28, + "serialized_chars": 36197, + "estimated_input_tokens": 9050, + "estimated_eval_tokens": 18356 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: it mixes unrelated bugs in loading, generation, quantization/memory, processors, docs, and config/API regressions. None of the soft edges look like true duplicates, and there are no PRs to assess.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out; the items cover distinct subsystems and failure modes, so treating them as one duplicate cluster would be too aggressive.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43873 is the broadest and most central memory/quantization report in the set, with the most discussion, but it still does not represent the other unrelated bugs well.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading/runtime failures, but one is Qwen3Moe + FineGrainedFP8Config and the other is Qwen3-VL weight-shape mismatch; different models and root causes." + }, + { + "left": "issue:43441", + "right": "issue:45588", + "accept": false, + "reason": "Both mention flash attention, but they hit different code paths: Ministral-3 v5 RC compatibility vs an AttributeError in integrations/flash_attention.py for sink-less models." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwargs syncing and metric-gathering label truncation are unrelated behaviors in different parts of the trainer stack." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer loading failure and bfloat16 dtype mismatch are separate bugs with no shared code path." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and LwDetrImageLoss AMP/CUDA crashes are different subsystems and failure modes." + }, + { + "left": "issue:43723", + "right": "issue:45479", + "accept": false, + "reason": "AutoTokenizer loading in v5 and zero-loss sequence classification are unrelated issues." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "A video-input regression for Qwen VL models is not the same as chat template crashing on tool-call assistant messages." + }, + { + "left": "issue:43526", + "right": "issue:44610", + "accept": false, + "reason": "Label reduction in an image processor and a processor/model input-size mismatch are distinct bugs." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "Unsafe sys.modules access in modeling_utils is unrelated to DeepseekV3MoE divergence from remote implementation." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Recurrent-state reset in Qwen3.5 cache handling and Whisper batch_decode skip_special_tokens behavior are unrelated." + }, + { + "left": "issue:43452", + "right": "issue:45588", + "accept": false, + "reason": "gguf_file loading breakage for tokenizer/model construction is not the same as flash_attention.py crashing on s_aux=None." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Weight-shape mismatch during model loading and incorrect save_pretrained checkpoint serialization are different defects." + }, + { + "left": "issue:44977", + "right": "issue:45584", + "accept": false, + "reason": "Qwen3.5 flash-attention generation issues do not match Whisper's empty-transcription failure after align_special_tokens." + }, + { + "left": "issue:45588", + "right": "issue:45600", + "accept": false, + "reason": "A flash-attention AttributeError and an auto_mappings stale-config CI break are unrelated." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "CLIPVisionModel returning hidden_states=None and attention-kernel divergence under torch.compile are different code paths and symptoms." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Gemma3TextConfig API mismatch and Llama4Config missing pad_token_id are separate config-schema regressions." + }, + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "Tensor-parallel OOM on a specific Qwen model and resume_from_checkpoint key mismatch are unrelated." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "Custom model initialization breakage and patchtsmixer post_init validation are not the same bug." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 nonsensical outputs and a LaserFeatureExtractor center-arg crash are distinct issues." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "SigLIP2 documentation example errors and Gemma3 mixed-length NaN embeddings are different concerns." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both involve memory pressure, but one is TP loading OOM for a specific model and the other is a broader int4 reserved-memory regression; not the same concrete fix." + }, + { + "left": "issue:43873", + "right": "issue:45588", + "accept": false, + "reason": "Quantization/offloading behavior and a flash_attention AttributeError are separate subsystems." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Missing GenerationMixin cache in chunked_prefill and recurrent-state reset in modular_qwen3_5 are different generation bugs." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "Adafactor/DeepSpeed OOM and TrainerState.num_input_tokens_seen not updating are unrelated trainer issues." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "Tokenizer loading failure and Perceiver non-default-resolution classification failure are different model families and code paths." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Llama 3.2 Vision loading regression and base_model_prefix loading breakage are both loading-related, but the concrete failures differ." + }, + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "AutoImageProcessor import error and ReasoningEffort import error are separate missing-symbol regressions." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "SigLIP2 HF/JAX implementation discrepancy is unrelated to docs still referencing pipeline() after removals." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 25, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:33290", + "issue:34689", + "issue:35141", + "issue:36010", + "issue:36331", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43295", + "issue:43316", + "issue:43381", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43643", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43749", + "issue:43761", + "issue:43825", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:44077", + "issue:44162", + "issue:44190", + "issue:44442", + "issue:44479", + "issue:44496", + "issue:44514", + "issue:44617", + "issue:44792", + "issue:44855", + "issue:44871", + "issue:44877", + "issue:44913", + "issue:45127", + "issue:45137", + "issue:45200", + "issue:45584" + ], + "soft_pairs": [ + "issue:43381|issue:43873", + "issue:33290|issue:35141", + "issue:33290|issue:34689", + "issue:44077|issue:44479", + "issue:43720|issue:44442", + "issue:43643|issue:44913", + "issue:44190|issue:44496", + "issue:43749|issue:43931", + "issue:43873|issue:44871", + "issue:43540|issue:44514", + "issue:44162|issue:44792", + "issue:43381|issue:43856", + "issue:33290|issue:36331", + "issue:43316|issue:44877", + "issue:34689|issue:36010", + "issue:44855|issue:45137", + "issue:42175|issue:43316", + "issue:43404|issue:43441", + "issue:43388|issue:43688", + "issue:43065|issue:44617", + "issue:16998|issue:22355", + "issue:25251|issue:28282", + "issue:43316|issue:43716", + "issue:41950|issue:43475", + "issue:45127|issue:45200", + "issue:43761|issue:44479", + "issue:43295|issue:43825", + "issue:44871|issue:45584" + ], + "prepared_review_unit_hash": "7e656ab5c403fbda5b606cd305d44e38c17430309d8b8db28f580e8a025f8539", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35508, + "estimated_input_tokens": 8877, + "estimated_eval_tokens": 18010 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs span unrelated bugs across loading, config, multimodal processing, training, and environment/setup issues. None of the soft edges look like the same underlying defect, so all are rejected and there is no clear canonical duplicate to choose.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Different problems: eval-mode gradient checkpointing vs quantization/offloading behavior." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Unrelated issues: DeepSpeed Adafactor OOM vs embedding reinit after resize/tied embeddings." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "OOM during optimizer/deepspeed use is not the same as a Llama model-loading regression." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "PatchTSMixer post_init API restriction is unrelated to Qwen video-input regression." + }, + { + "left": "issue:43720", + "right": "issue:44442", + "accept": false, + "reason": "Different failure modes: BitNet packed-weight loading vs tokenizer loading for FastSpeech2Conformer." + }, + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Both concern config loading, but one is missing fields with trust_remote_code and the other is rotary_pct not persisting on reload." + }, + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Dataset script loading failure is unrelated to config/model_type recognition failure." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading related, but one is FSDP CPU RAM-efficient loading and the other is a Qwen3-VL shape mismatch." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading issue is unrelated to Gemma eos_token_id config inconsistency." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Both touch video/multimodal paths, but one is a video ValueError in Qwen3OmniMoe and the other is batched chat-template padding behavior in Qwen2_5_VLProcessor." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "ESM2 model breakage is unrelated to a janus image-generation test failure." + }, + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "Gradient checkpointing in eval mode is not the same as Qwen3 MoE training memory usage." + }, + { + "left": "issue:33290", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed OOM and CustomTrainer compute_loss signature mismatch are different bugs." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Both are config-related, but one is a Gemma3TextConfig API discrepancy and the other is strict config blocking granite_speech loading." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "Model-loading regression for Llama 3.2 vision is unrelated to a missing GenerationMixin import." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Python 3.13 torch.jit parsing/IndentationError is unrelated to DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Package extras/backend installation problem is unrelated to a model config API discrepancy." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "Same general family name, but different concrete failures: tied lm_head weights vs FlashAttention runtime failure." + }, + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Metric gathering drops tuple labels, while the other issue is auxiliary-loss normalization." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "Dummy Conv2d implementation in Sam3PixelDecoder is not the same issue as Sam3Video CUDA OOM." + }, + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "DeBERTa model_max_length question is unrelated to the missing transformers.onnx import." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Pipeline top_k output nesting is unrelated to AutoModel missing PyTorch." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "Gemma3TextConfig API discrepancy is unrelated to Mistral-3 image preprocessor dtype mismatch." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Video-classification pipeline processor selection is unrelated to Sam3VisionEncoderOutput missing fpn_position_embeddings." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "LoRA/extended-vocab collapse with tied embeddings is unrelated to Gemma 4 mm_token_type_ids defaults." + }, + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "Different regressions in different multimodal paths: CLIPVisionModel hidden_states vs Qwen video input regression." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "Processor/tokenizer regression is unrelated to the pipeline error message claiming translation support." + }, + { + "left": "issue:44871", + "right": "issue:45584", + "accept": false, + "reason": "Gemma eos_token_id config inconsistency is not the same as Whisper empty-transcription failure after token alignment." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 26, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:42617", + "issue:43065", + "issue:43295", + "issue:43452", + "issue:43550", + "issue:43575", + "issue:43606", + "issue:43646", + "issue:43701", + "issue:43742", + "issue:43761", + "issue:43827", + "issue:43873", + "issue:43906", + "issue:43931", + "issue:43994", + "issue:44060", + "issue:44077", + "issue:44190", + "issue:44242", + "issue:44464", + "issue:44479", + "issue:44496", + "issue:44521", + "issue:44534", + "issue:44610", + "issue:44617", + "issue:44734", + "issue:44792", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44938", + "issue:44964", + "issue:44977", + "issue:45245", + "issue:45290", + "issue:45362", + "issue:45381", + "issue:45406", + "issue:45542", + "issue:45563", + "issue:45584", + "issue:45600" + ], + "soft_pairs": [ + "issue:44734|issue:45406", + "issue:43295|issue:44871", + "issue:44077|issue:44534", + "issue:43550|issue:43701", + "issue:43606|issue:43701", + "issue:43646|issue:44913", + "issue:43906|issue:45290", + "issue:44617|issue:45584", + "issue:43761|issue:44242", + "issue:43931|issue:44060", + "issue:43452|issue:44496", + "issue:45563|issue:45584", + "issue:44521|issue:45381", + "issue:43575|issue:43742", + "issue:42617|issue:43575", + "issue:44479|issue:44734", + "issue:44933|issue:45362", + "issue:43575|issue:43873", + "issue:32090|issue:35141", + "issue:44464|issue:45563", + "issue:43065|issue:44610", + "issue:44464|issue:44871", + "issue:44792|issue:44977", + "issue:43827|issue:43994", + "issue:44534|issue:44913", + "issue:41720|issue:45245", + "issue:44190|issue:44964", + "issue:44938|issue:45542", + "issue:44792|issue:45600" + ], + "prepared_review_unit_hash": "071197f85d3bc66a93662ed6a6e14b28d395d26f3f95397f7200af717786a8db", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 35615, + "estimated_input_tokens": 8904, + "estimated_eval_tokens": 18064 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like a true duplicate: the titles point to different models, code paths, or failure modes, so this set does not have a safe canonical issue/PR.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "Both involve transformers serve/chat-like crashes, but one is KV-cache tensor indexing and the other is a Gemma4Processor attribute regression; different bugs." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "One is a processor/tokenizer API regression in custom code, the other is an eos_token_id config mismatch for Gemma-3; not the same defect." + }, + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "Unrelated issues: patchtsmixer post_init handling vs non-persistent buffer initialization garbage." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "Bamba torch.compile + SDPA failure is unrelated to resume_from_checkpoint key mismatch." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "CPU offload device mismatch is a different loading/runtime problem than checkpoint key mismatch." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom model initialization breakage and GPTNeoX rotary_pct reload drift are separate config/init issues." + }, + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "A reproduction of an older issue is not the same as apply_chat_template crashing on tool-call assistant messages." + }, + { + "left": "issue:44617", + "right": "issue:45584", + "accept": false, + "reason": "Sam3Video CUDA OOM and Whisper empty-transcription failure are different models and failure modes." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "CLIPVision hidden_states regression is unrelated to MoE load-balancing loss omission." + }, + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Qwen3-VL weight-shape mismatch and Qwen3-Next tied-weight warning are distinct configuration/loading problems." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "gguf_file load breakage is not the same as missing model_type in config.json." + }, + { + "left": "issue:45563", + "right": "issue:45584", + "accept": false, + "reason": "A stale generate warning and Whisper empty-transcription crash are different issues." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "assistant_masks all-zero for multimodal templates is separate from Qwen2.5-VL video vision_position_ids being wrong." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "Qwen2-57B tensor-parallel OOM and MobileLLM loading key error do not share a concrete code-path bug." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "3d_parallel.py execution failure is unrelated to Qwen2-57B TP memory exhaustion." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression in Qwen VL models and serve KV-cache indexing crash are different subsystems and symptoms." + }, + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Missing image_utils import and Qwen3.5 chat crash are unrelated." + }, + { + "left": "issue:43575", + "right": "issue:43873", + "accept": false, + "reason": "Both mention memory/offload concerns, but one is TP OOM and the other is quantization/offloading behavior; not the same bug." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer _gpu_broadcast_one TypeError and output embedding reinitialization in post_init are separate problems." + }, + { + "left": "issue:44464", + "right": "issue:45563", + "accept": false, + "reason": "Compiled-forward generation inconsistency is unrelated to a stale num_return_sequences warning." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo processor size mismatch are different model-specific issues." + }, + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Chunked generation/compiled-forward inconsistency is unrelated to Gemma-3 eos_token_id config mismatch." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "Failed janus image-generation test and Qwen3.5 flash-attention generation issue are different models and failure paths." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "Docs still referencing pipeline() is not the same as a model producing nonsensical outputs under AutoModel/pipeline." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk and GPTNeoX rotary_pct reload regression are unrelated." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3 auto-device-map cuda assert has nothing to do with the 2^24 category limit error." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Local dataset loading in a no-trainer script is unrelated to loading a multimodal model with latest transformers." + }, + { + "left": "issue:44938", + "right": "issue:45542", + "accept": false, + "reason": "Python 3.14 import/load failure and undefined tf backend due to tensorboard-only install are different environment issues." + }, + { + "left": "issue:44792", + "right": "issue:45600", + "accept": false, + "reason": "Janus test failure and auto_mappings referencing removed Sam3LiteText configs are unrelated CI/model issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 27, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41628", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43116", + "issue:43295", + "issue:43388", + "issue:43404", + "issue:43479", + "issue:43540", + "issue:43550", + "issue:43582", + "issue:43644", + "issue:43650", + "issue:43761", + "issue:43976", + "issue:44016", + "issue:44077", + "issue:44079", + "issue:44112", + "issue:44186", + "issue:44190", + "issue:44220", + "issue:44279", + "issue:44355", + "issue:44466", + "issue:44561", + "issue:44683", + "issue:44734", + "issue:44743", + "issue:44855", + "issue:44913", + "issue:45127", + "issue:45216", + "issue:45245", + "issue:45276", + "issue:45357", + "issue:45375", + "issue:45406", + "issue:45446", + "issue:45584", + "issue:45588", + "issue:45600", + "issue:45632" + ], + "soft_pairs": [ + "issue:45127|issue:45357", + "issue:43388|issue:43582", + "issue:43761|issue:44743", + "issue:44855|issue:45588", + "issue:43650|issue:44220", + "issue:44355|issue:44855", + "issue:43540|issue:44734", + "issue:43644|issue:44913", + "issue:45276|issue:45584", + "issue:43976|issue:44855", + "issue:44077|issue:44079", + "issue:43404|issue:45216", + "issue:43550|issue:44190", + "issue:43116|issue:44190", + "issue:43295|issue:45276", + "issue:44112|issue:45600", + "issue:45245|issue:45584", + "issue:41950|issue:44016", + "issue:39692|issue:44190", + "issue:36246|issue:41628", + "issue:44734|issue:45632", + "issue:43479|issue:45375", + "issue:36246|issue:38617", + "issue:45406|issue:45632", + "issue:44561|issue:45632", + "issue:44186|issue:44734", + "issue:38175|issue:43065", + "issue:44016|issue:44220", + "issue:42175|issue:44279", + "issue:44683|issue:45446", + "issue:44466|issue:44913" + ], + "prepared_review_unit_hash": "4d2041ec799464ef48dd817daafb571a24bcd3e2d69ff3e1e88f0158e385a412", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37249, + "estimated_input_tokens": 9313, + "estimated_eval_tokens": 18882 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All proposed soft edges look like false positives: the paired issues target different subsystems, models, or failure modes, so none should be merged as duplicates. There is no clear canonical issue in this mixed set.", + "confidence": 0.95, + "canonical_issue_reason": "No single issue is a good canonical here; the candidate pairs are heterogeneous and do not share one underlying bug or change.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue stands out as a global representative because the set spans unrelated import errors, model regressions, docs problems, and pipeline/config bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen3.5-related symptoms: LoRA/weight-tying collapse vs incorrect visual encoder keys on save_pretrained." + }, + { + "left": "issue:43388", + "right": "issue:43582", + "accept": false, + "reason": "Unrelated areas and failures: gather_for_metrics label dropping vs Apple Silicon caching_allocator_warmup TypeError." + }, + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression is unrelated to Qwen recurrent state reset with cache." + }, + { + "left": "issue:44855", + "right": "issue:45588", + "accept": false, + "reason": "Python 3.13 indentation/torch.jit parsing issue is not the same as flash_attention s_aux=None AttributeError." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "Generic placeholder issue vs audio feature extraction bug; no shared concrete code path." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "Compiled Python file errors are unrelated to the DebertaV2Model import parsing failure." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Different multimodal/video processing bug versus serve KV-cache tensor indexing crash." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk in v5 is unrelated to GPTNeoX rotary_pct config reload behavior." + }, + { + "left": "issue:45276", + "right": "issue:45584", + "accept": false, + "reason": "Gemma4 embedding resize mismatch and Whisper empty-transcription generation failure are separate bugs." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "Python version packaging failure is not the same as a DebertaV2 Python 3.13 import parsing error." + }, + { + "left": "issue:44077", + "right": "issue:44079", + "accept": false, + "reason": "Optional post_init enforcement and ModelOutput key assignment are different API/behavior issues." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Weight tying bug in Mistral3ForConditionalGeneration does not match Qwen3.5 checkpoint save_pretrained regression." + }, + { + "left": "issue:43550", + "right": "issue:44190", + "accept": false, + "reason": "torch.compile/SDPA model failure is unrelated to local dataset loading in the image-classification script." + }, + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "Multi-label classification example returning empty results is a different script and failure mode from dataset loading." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Processor/tokenizer regression is unrelated to Gemma4 token embedding resize behavior." + }, + { + "left": "issue:44112", + "right": "issue:45600", + "accept": false, + "reason": "Two different CI failures with different root causes and code paths." + }, + { + "left": "issue:45245", + "right": "issue:45584", + "accept": false, + "reason": "Category-count runtime limit has nothing to do with Whisper empty transcription handling." + }, + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Video-classification pipeline processor lookup bug is unrelated to a notebook syntax error." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "SigLIP2 docs example errors are not the same as local dataset loading failure in the no-trainer script." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols and different modules; not the same bug." + }, + { + "left": "issue:44734", + "right": "issue:45632", + "accept": false, + "reason": "Serve KV-cache continuation crash is unrelated to trust_remote_code cache path collisions." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Phi4MultimodalConfig default-init issue differs from missing deepstack_visual_indexes in Qwen3_5MoeVisionConfig." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Different missing imports in different modules; no shared underlying regression." + }, + { + "left": "issue:45406", + "right": "issue:45632", + "accept": false, + "reason": "Gemma4Processor _tokenizer crash is unrelated to cache-path collision for local trust_remote_code models." + }, + { + "left": "issue:44561", + "right": "issue:45632", + "accept": false, + "reason": "Removal of is_torch_fx_available breaking remote-code models is unrelated to cache path collisions." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Tokenizer padding/truncation crash and serve KV-cache indexing crash are distinct." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "SigLIP2 zero probabilities and Sam3PixelDecoder dummy Conv2d are unrelated model bugs." + }, + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "Notebook syntax error is unrelated to audio feature extraction failure." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "Backend dependency packaging issue is too generic and does not match the other transformers dependency report as the same bug." + }, + { + "left": "issue:44683", + "right": "issue:45446", + "accept": false, + "reason": "Both touch flex_attention, but one is a torch>=2.9 compiled-flex_attention failure and the other is an incorrect version check for AuxRequest import; different concrete defects." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Device-dependent lm_head serialization bug is unrelated to GPTNeoX rotary_pct config reload regression." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 28, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:42907", + "issue:43295", + "issue:43299", + "issue:43381", + "issue:43441", + "issue:43479", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43673", + "issue:43824", + "issue:43854", + "issue:43901", + "issue:43927", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44242", + "issue:44246", + "issue:44279", + "issue:44336", + "issue:44351", + "issue:44368", + "issue:44496", + "issue:44521", + "issue:44683", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45003", + "issue:45042", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45325", + "issue:45335", + "issue:45357", + "issue:45405", + "issue:45542", + "issue:45561", + "issue:45584", + "issue:45593", + "issue:45600", + "issue:45632" + ], + "soft_pairs": [ + "issue:44246|issue:45003", + "issue:43299|issue:45216", + "issue:43643|issue:44496", + "issue:42907|issue:43441", + "issue:43295|issue:45335", + "issue:44016|issue:44062", + "issue:44964|issue:45600", + "issue:43976|issue:44987", + "issue:43673|issue:44336", + "issue:44112|issue:44964", + "issue:43531|issue:45600", + "issue:45335|issue:45584", + "issue:45561|issue:45632", + "issue:43824|issue:44933", + "issue:43976|issue:44279", + "issue:44496|issue:45042", + "issue:43854|issue:44964", + "issue:43577|issue:45216", + "issue:43299|issue:45357", + "issue:43526|issue:43901", + "issue:44075|issue:44368", + "issue:44521|issue:45325", + "issue:44242|issue:45200", + "issue:43381|issue:45593", + "issue:44351|issue:45278", + "issue:43927|issue:45632", + "issue:43901|issue:44496", + "issue:43976|issue:45405", + "issue:44683|issue:45542", + "issue:43479|issue:43525" + ], + "prepared_review_unit_hash": "2bea3d432659b5501cbfa28aba1955383f9366b860bc588d078b17e6c936e2ab", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36624, + "estimated_input_tokens": 9156, + "estimated_eval_tokens": 18568 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated Transformers bug reports; none of the suggested soft edges look like true duplicates, so I rejected all of them.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue stands out: the set spans many distinct regressions, model-specific loading/saving bugs, and unrelated CI/docs/import problems. If forced, issue 45278 is the broadest umbrella report, but it is not a duplicate target for the rest.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45278 is the broadest general report in the set and could serve as a coarse umbrella for migration/import breakage, but it is still too generic to canonicalize these heterogeneous issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "One is a slow-import complaint; the other is about unsafe sys.modules access. Related area, different bug." + }, + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Different Qwen versions and different failures: model loading vs save_pretrained checkpoint corruption." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "Both involve config/loading, but one is missing fields under trust_remote_code and the other is an unrecognized model missing model_type." + }, + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Same model family, but different code paths: saving dequantized weights vs FlashAttention load/runtime failure." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Completely different subsystems: processor/tokenizer regression versus t5gemma embedding resize behavior." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "Notebook syntax error versus AddedToken keyword-argument bug; no shared underlying defect." + }, + { + "left": "issue:44964", + "right": "issue:45600", + "accept": false, + "reason": "One is a specific model-load failure; the other is stale auto_mappings references breaking CI. Different root causes." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "Version/Python compatibility issue versus a specific model-loading regression." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Generation cache bug and ANSI output formatting bug are unrelated." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "CI test flake in GraniteSpeech versus Phi-4 multimodal loading failure; unrelated." + }, + { + "left": "issue:43531", + "right": "issue:45600", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to removed Sam3LiteText mappings." + }, + { + "left": "issue:45335", + "right": "issue:45584", + "accept": false, + "reason": "Embedding resize bug versus Whisper empty-transcription generation failure; different code paths." + }, + { + "left": "issue:45561", + "right": "issue:45632", + "accept": false, + "reason": "Both mention file/path issues, but one is a pytest-xdist race and the other is a trust_remote_code cache collision." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols and likely different breakages." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "Generic dependency complaint does not match the specific Python-version regression." + }, + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "Unrecognized model/config issue versus PIL backend incorrectly requiring torchvision." + }, + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Different models and different failure surfaces; no evidence of the same bug." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "Blip2 dtype propagation bug versus Qwen3.5 save_pretrained regression; unrelated." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Same family name, but one is a loading failure and the other is incorrect visual-encoder key serialization." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "Image-processor label reduction bug versus pipeline docs mismatch." + }, + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "Optimizer argument handling and a Qwen warning about tie_word_embeddings are unrelated." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "Both are multimodal, but assistant-mask generation and rope-index scaling are distinct bugs." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "MoE load-balancing loss logic versus Gemma 4 multimodal token-id defaults; different implementations." + }, + { + "left": "issue:43381", + "right": "issue:45593", + "accept": false, + "reason": "Gradient checkpointing eval-mode restriction is unrelated to D-FINE auxiliary-loss behavior." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "Missing HybridCache import may be one symptom of broader breakage, but the reports are not the same concrete bug." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Docs/behavior mismatch for a pipeline versus a model-loading configuration error." + }, + { + "left": "issue:43927", + "right": "issue:45632", + "accept": false, + "reason": "Save/load token-ID corruption and local trust_remote_code cache collisions are different bugs." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Same as above: documentation mismatch is unrelated to model-type loading failure." + }, + { + "left": "issue:44683", + "right": "issue:45542", + "accept": false, + "reason": "Compiled flex_attention on new torch and missing TensorFlow backend are unrelated." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Different config bugs in different models: default subconfig initialization versus missing pad_token_id." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 29, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:41950", + "issue:42994", + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43295", + "issue:43299", + "issue:43317", + "issue:43441", + "issue:43450", + "issue:43650", + "issue:43653", + "issue:43673", + "issue:43723", + "issue:43761", + "issue:43784", + "issue:43854", + "issue:43901", + "issue:44016", + "issue:44162", + "issue:44261", + "issue:44263", + "issue:44279", + "issue:44291", + "issue:44297", + "issue:44462", + "issue:44466", + "issue:44496", + "issue:44521", + "issue:44617", + "issue:44623", + "issue:44671", + "issue:44683", + "issue:44749", + "issue:44779", + "issue:44877", + "issue:44898", + "issue:44987", + "issue:45020", + "issue:45081", + "issue:45276", + "issue:45290", + "issue:45397", + "issue:45405", + "issue:45542", + "issue:45593" + ], + "soft_pairs": [ + "issue:44462|issue:45081", + "issue:41950|issue:45020", + "issue:44297|issue:44462", + "issue:44279|issue:45405", + "issue:44496|issue:44877", + "issue:41950|issue:43650", + "issue:43299|issue:43854", + "issue:38175|issue:44617", + "issue:44462|issue:44779", + "issue:43295|issue:44779", + "issue:39692|issue:43901", + "issue:43650|issue:45290", + "issue:43723|issue:44162", + "issue:43450|issue:44279", + "issue:44016|issue:44291", + "issue:43262|issue:44521", + "issue:42994|issue:43317", + "issue:44261|issue:44263", + "issue:43441|issue:44683", + "issue:43065|issue:44898", + "issue:44466|issue:45276", + "issue:44623|issue:45397", + "issue:43122|issue:43650", + "issue:44623|issue:44749", + "issue:43784|issue:45542", + "issue:44671|issue:44779", + "issue:43673|issue:43761", + "issue:44987|issue:45542", + "issue:43653|issue:45593" + ], + "prepared_review_unit_hash": "6286cb9d9d49ca28b8cf2dd1d3ffeb62630fb73f8b0653d13a787f93e32e0c62", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36365, + "estimated_input_tokens": 9092, + "estimated_eval_tokens": 18440 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is broadly mixed: several tokenizer/loading regressions, some multimodal processor/chat-template bugs, and a number of model-specific failures. None of the proposed soft pairs look like true duplicates of the same concrete bug.", + "confidence": 0.34, + "canonical_issue_reason": "issue:44462 is the clearest representative of the tokenizer/loading regression theme and sits near several other tokenizer-related reports, even though the cluster as a whole is not a tight duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44462 is the strongest standalone representative: concrete repro, broad enough to anchor the tokenizer-loading subgroup, and more central than the more model-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Both involve tokenizers, but one is AutoTokenizer ignoring repository tokenizer.json and the other is a Mistral regex patch crash; different failure modes and code paths." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Video-classification processor lookup is unrelated to the broad remote_code loading regressions in v5; not the same bug." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "save_pretrained writing the wrong tokenizer_class and AutoTokenizer ignoring tokenizer.json are related areas, but they are distinct serialization vs loading bugs." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "These are generic dependency/version issues affecting different packages and symptoms, not one underlying defect." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Missing model_type in config.json and strict config rejection for granite_speech are different config-loading problems." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "Video pipeline processor mismatch and a placeholder 'ADD THE DATA' issue are clearly unrelated." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "Both are model loading failures, but they concern different models and likely different root causes." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to Sam3Video CUDA OOM." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions, but one is loading repository tokenizer.json and the other is incorrect Deepseek tokenization; not enough evidence of one shared bug." + }, + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "Custom code accessing processor.tokenizer and Deepseek tokenizer output regressions are separate issues." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "Documentation/example errors in different sections and for different APIs; not the same change or bug." + }, + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "A placeholder issue and an apply_chat_template crash with tool-call messages are unrelated." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "AutoTokenizer.from_pretrained loading regression and ESM2 model breakage are different failures." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Video batch-shape bug and a dependency issue do not share a concrete code path." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "Notebook syntax error and init_empty_weights argument mismatch are unrelated." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Both mention apply_chat_template, but one is audio sampling-rate defaults and the other is assistant_mask generation for multimodal inputs." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Quantized model saving and dequantized model offload loading are different parts of the quantization workflow." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "One is an MLA layernorm precision/config issue; the other is a torch.split return-value issue in a different model component." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "FlashAttention failure on Ministral-3 and compiled flex_attention failure on torch>=2.9 are different attention stack problems." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "A dummy Conv2d in Sam3PixelDecoder and a Perceiver resize/interpolation bug are not the same defect." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "Inconsistent lm_head.weight serialization and resize_token_embeddings not updating Gemma4 embeddings are related to weights, but not the same concrete bug." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "processor.save_pretrained missing files and gemma-4 zero3 from_pretrained are separate save/load failures." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Tokenizer regression across versions and a placeholder issue are unrelated." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "Missing processor save files and slow filtering after upgrade are not the same issue." + }, + { + "left": "issue:43784", + "right": "issue:45542", + "accept": false, + "reason": "Import-time NameError in sentence-transformers integration and tensorboard/TensorFlow backend mismatch are distinct environment/import problems." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "CamemBERT masked LM predictions and Deepseek tokenizer outputs are different model-specific regressions." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "Generation cache missing in chunked_prefill and CLIPVision hidden_states being None are unrelated model/runtime bugs." + }, + { + "left": "issue:44987", + "right": "issue:45542", + "accept": false, + "reason": "A model loading regression for physical-intelligence/fast and a tensorboard backend issue are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:45593", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and D-FINE auxiliary-loss behavior do not share a code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 30, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42175", + "issue:42491", + "issue:42757", + "issue:43295", + "issue:43299", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43673", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44162", + "issue:44188", + "issue:44220", + "issue:44242", + "issue:44246", + "issue:44295", + "issue:44297", + "issue:44336", + "issue:44351", + "issue:44361", + "issue:44373", + "issue:44462", + "issue:44485", + "issue:44496", + "issue:44561", + "issue:44589", + "issue:44617", + "issue:44661", + "issue:45003", + "issue:45200", + "issue:45250", + "issue:45356", + "issue:45362", + "issue:45397", + "issue:45542", + "issue:45561", + "issue:45563", + "issue:45593" + ], + "soft_pairs": [ + "issue:42175|issue:43976", + "issue:43901|issue:43994", + "issue:44297|issue:45397", + "issue:44361|issue:45593", + "issue:43475|issue:44016", + "issue:45561|issue:45563", + "issue:43824|issue:43976", + "issue:43295|issue:44617", + "issue:43352|issue:45250", + "issue:43525|issue:43881", + "issue:44589|issue:45362", + "issue:44188|issue:44336", + "issue:41628|issue:43976", + "issue:44295|issue:45397", + "issue:43901|issue:44246", + "issue:42491|issue:43299", + "issue:41950|issue:44220", + "issue:44336|issue:44373", + "issue:44242|issue:45593", + "issue:44462|issue:45356", + "issue:44661|issue:45542", + "issue:44246|issue:44496", + "issue:45003|issue:45561", + "issue:36246|issue:42757", + "issue:36246|issue:44351", + "issue:43531|issue:45362", + "issue:43526|issue:45003", + "issue:44336|issue:44485", + "issue:43673|issue:44561", + "issue:44162|issue:45200" + ], + "prepared_review_unit_hash": "f61dbba875ffbaf9e69a4aedb83506d2a76412f177601d95f986609f10d47c76", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36332, + "estimated_input_tokens": 9083, + "estimated_eval_tokens": 18422 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a grab-bag of distinct Transformers issues (API import failures, model-specific load regressions, tokenizer bugs, docs/tests, and training-loss behavior). None of the soft pairs look like the same underlying defect, so all soft edges should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "issue:43824 is the strongest representative of the cluster by impact and discussion volume, but it is not a duplicate of the other reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 has the clearest, widely visible user-facing regression and the most reference activity among mostly unrelated issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "Different problems: missing TensorFlow in the torch extra vs a Python-version import compatibility issue." + }, + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs mismatch for `return_all_scores` is unrelated to SigLIP2 model output quality." + }, + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer class metadata bug is unrelated to Gemma-4 ZeRO3 loading failure." + }, + { + "left": "issue:44361", + "right": "issue:45593", + "accept": false, + "reason": "MLukeTokenizer task AttributeError and D-FINE auxiliary-loss behavior are different model/code paths." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "SAM 3 Video attribute error is unrelated to a notebook syntax error." + }, + { + "left": "issue:45561", + "right": "issue:45563", + "accept": false, + "reason": "pytest-xdist file race and stale generation warning are different testing/runtime bugs." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "Import failure for a specific Qwen2.5-VL symbol is unrelated to Python version support." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "Processor/tokenizer regression is unrelated to SAM3 video OOM." + }, + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "A model-specific lack of Flash Attention 2 support is not the same as a generic Flash Attention 2 issue." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Llama4Config missing pad_token_id and GLM-4V loading failure are distinct model-loading bugs." + }, + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Float8 storage deserialization failure is unrelated to Qwen3.5 chat crashes." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "Torch.compile attention-kernel divergence and ANSI code output are unrelated." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "AutoImageProcessor import failure is unrelated to the Python 3.9/3.10 compatibility report." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "position_ids buffer access bug is unrelated to Gemma-4 ZeRO3 loading." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "A docs issue and a slow-import performance issue do not share the same root cause." + }, + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3 MoE, but one is LoRA compatibility across branches and the other is a model-loading regression for Qwen3VL MoE; not the same bug." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Video classification pipeline processor lookup and `_torch_extract_fbank_features()` are different subsystems." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "ANSI color output in loading reports is unrelated to a docstring typo." + }, + { + "left": "issue:44242", + "right": "issue:45593", + "accept": false, + "reason": "Different loss paths and different models; both concern losses, but not the same concrete defect." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "Tokenizer repository file selection and Kimi-K2.5 codec/regression handling are separate tokenizer issues." + }, + { + "left": "issue:44661", + "right": "issue:45542", + "accept": false, + "reason": "Model registration logic and missing TensorFlow backend installation are unrelated." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Import-performance bug and unrecognized model config error are not the same issue." + }, + { + "left": "issue:36246", + "right": "issue:42757", + "accept": false, + "reason": "Different missing symbols from different packages; both are import errors but not the same defect." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Missing `Qwen2_5_VLImageProcessor` and missing `HybridCache` are unrelated exported-symbol regressions." + }, + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and Qwen3.5 chat crashes are distinct failures." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "A BeitImageProcessorFast label-reduction bug is unrelated to unsafe `sys.modules` access." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "ANSI codes in loading_report and GLM-5 RoPE implementation are unrelated." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Chunked-prefill cache missing and removal of `is_torch_fx_available` break different code paths." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 breakage and Gemma 4 mm-token defaults are unrelated model-specific issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 31, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42886", + "issue:43065", + "issue:43066", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43295", + "issue:43408", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43596", + "issue:43653", + "issue:43901", + "issue:44060", + "issue:44077", + "issue:44162", + "issue:44242", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44368", + "issue:44466", + "issue:44496", + "issue:44749", + "issue:44779", + "issue:44857", + "issue:44871", + "issue:44877", + "issue:44933", + "issue:44977", + "issue:44987", + "issue:44995", + "issue:45003", + "issue:45137", + "issue:45200", + "issue:45229", + "issue:45292", + "issue:45335", + "issue:45375", + "issue:45459", + "issue:45563", + "issue:45584", + "issue:45588" + ], + "soft_pairs": [ + "issue:44295|issue:44749", + "issue:44242|issue:44857", + "issue:44246|issue:44871", + "issue:44877|issue:45375", + "issue:36246|issue:43596", + "issue:43066|issue:44779", + "issue:43441|issue:43540", + "issue:45292|issue:45584", + "issue:44779|issue:45292", + "issue:44987|issue:45459", + "issue:41628|issue:41950", + "issue:42886|issue:45229", + "issue:43065|issue:43408", + "issue:44995|issue:45200", + "issue:43901|issue:45003", + "issue:43232|issue:44336", + "issue:43257|issue:45137", + "issue:44496|issue:45003", + "issue:43295|issue:45292", + "issue:44995|issue:45588", + "issue:44060|issue:44077", + "issue:43262|issue:44246", + "issue:43653|issue:44162", + "issue:43475|issue:43540", + "issue:44368|issue:45563", + "issue:44466|issue:45335", + "issue:44368|issue:44977", + "issue:44933|issue:45459" + ], + "prepared_review_unit_hash": "24561f8c8bcc96f3effbda4f24549cdd58a4546c0f67ef6b3a408b4788b6bc29", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35643, + "estimated_input_tokens": 8911, + "estimated_eval_tokens": 18078 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: several unrelated Transformers regressions are grouped together, with only a few loose theme overlaps around config/loading, tokenizers, and embedding resizing. I do not see any soft edge that clearly represents the same underlying bug/change.", + "confidence": 0.9, + "canonical_issue_reason": "If a single issue must anchor the cluster, 44877 is the most concrete, self-contained bug report around config loading/strictness; it is the closest fit to the small config-related subset, though the cluster overall is not a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "44933 is the best representative for triage because it is open, user-facing, and broad enough to stand in for the import/image-utils subset without overcommitting to an exact duplicate relationship.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44295", + "right": "issue:44749", + "accept": false, + "reason": "Different failures: a position_ids buffer access error versus a performance regression after upgrading. No shared code-path bug." + }, + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "One is about router/load-balancing loss handling; the other is a CUDA AMP crash in LwDetrImageLoss. Same broad ML area, not the same bug." + }, + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Import slowdown is unrelated to the Gemma eos_token_id config mismatch." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Both touch config strictness, but one is a loading failure for granite_speech while the other is a missing field being dropped in Qwen3_5MoeVisionConfig. Similar theme, not same bug." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "Qwen2.5 image processor import error and a ZeRO3/BertModel index error are unrelated." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but one is a wrong decoder-type warning and the other is incorrect Deepseek tokenization output. Different concrete issues." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "FlashAttention failure for Ministral-3 is not the same as Qwen3OmniMoe video input processing error." + }, + { + "left": "issue:45292", + "right": "issue:45584", + "accept": false, + "reason": "resize_token_embeddings not updating output embeddings is unrelated to Whisper empty-transcription failure after align_special_tokens." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer output regression versus embedding resize propagation bug; no shared fix path." + }, + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Loading a specific model and masking protobuf decode errors are different problems affecting different parts of the stack." + }, + { + "left": "issue:41628", + "right": "issue:41950", + "accept": false, + "reason": "Importing AutoImageProcessor and a video-classification pipeline picking image processors are only loosely related by image handling, not the same bug." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline cache loading and multi-GPU CUDA OOM are different failure classes." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "SAM 3 pixel decoder dummy Conv2d and a model-type warning for sam3_video vs sam3_tracker are not the same underlying issue." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "A stale second-forward cache crash and missing mm_token_type_ids defaults are different bugs." + }, + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch for return_all_scores is unrelated to unsafe sys.modules access in modeling_utils." + }, + { + "left": "issue:43232", + "right": "issue:44336", + "accept": false, + "reason": "Generation kwargs after sync_gpus and ANSI codes in loading_report are unrelated code paths." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Qwen3 MOE weight conversion under accelerate+deepspeed and a ZeRO3 deque underflow are different DeepSpeed-related bugs." + }, + { + "left": "issue:44496", + "right": "issue:45003", + "accept": false, + "reason": "Unrecognized model/config loading and unsafe sys.modules access are distinct problems." + }, + { + "left": "issue:43295", + "right": "issue:45292", + "accept": false, + "reason": "Processor.tokenizer regression and resize_token_embeddings propagation bug are not the same issue." + }, + { + "left": "issue:44995", + "right": "issue:45588", + "accept": false, + "reason": "Stale indexer cache crash and flash_attention s_aux=None AttributeError are unrelated." + }, + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Both are tied to model config warnings, but one is about incorrect tied-weight warnings and the other about post_init being optional for patchtsmixer. Not the same change." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio chat-template sampling-rate behavior and import slowness have no shared bug surface." + }, + { + "left": "issue:43653", + "right": "issue:44162", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and ESM2 breakage are unrelated." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "Missing fpn_position_embeddings in SAM 3 video output and Qwen3OmniMoe video-input processing errors are different model-specific bugs." + }, + { + "left": "issue:44368", + "right": "issue:45563", + "accept": false, + "reason": "A tied-embeddings warning and a stale num_return_sequences warning in paged generate are unrelated warnings." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Serialization of lm_head.weight depending on device and t5gemma resize_token_embeddings not affecting decoder.embed_tokens are different tied-weights/resize issues." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "Tie-word-embeddings warning versus Qwen3.5 flash-attention generation failure are unrelated." + }, + { + "left": "issue:44933", + "right": "issue:45459", + "accept": false, + "reason": "An import from image_utils missing entirely and protobuf-related tokenizer error masking are different failure modes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 32, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:41628", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:42831", + "issue:42994", + "issue:43232", + "issue:43475", + "issue:43482", + "issue:43525", + "issue:43643", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:43704", + "issue:43873", + "issue:43931", + "issue:44038", + "issue:44162", + "issue:44222", + "issue:44297", + "issue:44351", + "issue:44361", + "issue:44368", + "issue:44623", + "issue:44661", + "issue:44734", + "issue:44749", + "issue:44869", + "issue:44933", + "issue:44945", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45137", + "issue:45200", + "issue:45230", + "issue:45278", + "issue:45310", + "issue:45356", + "issue:45362", + "issue:45375", + "issue:45381", + "issue:45397", + "issue:45478", + "issue:45600" + ], + "soft_pairs": [ + "issue:43673|issue:44734", + "issue:44869|issue:45356", + "issue:45137|issue:45600", + "issue:43931|issue:44368", + "issue:43232|issue:43673", + "issue:44297|issue:44623", + "issue:43673|issue:45278", + "issue:43646|issue:44162", + "issue:44361|issue:44749", + "issue:42673|issue:43704", + "issue:28282|issue:30990", + "issue:44749|issue:45310", + "issue:45230|issue:45397", + "issue:44038|issue:44623", + "issue:42994|issue:43482", + "issue:43701|issue:44749", + "issue:42371|issue:45092", + "issue:43475|issue:45381", + "issue:45375|issue:45478", + "issue:43475|issue:43643", + "issue:41628|issue:44933", + "issue:45081|issue:45103", + "issue:45310|issue:45375", + "issue:42994|issue:43525", + "issue:42222|issue:45200", + "issue:43873|issue:44945", + "issue:43475|issue:44661", + "issue:42831|issue:44222", + "issue:45310|issue:45397", + "issue:45362|issue:45375", + "issue:41628|issue:44351" + ], + "prepared_review_unit_hash": "0fa2ca43eaf693f982e04d6749bcdce67a4b71c500c55137401d56dc2550ef23", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 36661, + "estimated_input_tokens": 9166, + "estimated_eval_tokens": 18588 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly noisy cluster with two clear duplicate subclusters: the Qwen3ForCausalLM VRAM leak pair (42673/43704) and the Qwen3.5-Moe from_pretrained error pair (45310/45478). The remaining soft edges look like broader version-regression overlaps rather than the same concrete bug.", + "confidence": 0.96, + "canonical_issue_reason": "Issue 45310 is the cleanest concrete bug report to anchor the Qwen3.5-Moe from_pretrained failure, and 45478 is an exact duplicate with the same title and failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "45310 is the best representative issue because it is specific and reproducible, with exact duplicate 45478 confirming the same bug. The cluster also contains a separate exact duplicate subcluster at 42673/43704.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Both involve generation/cache behavior, but they describe different failures and code paths." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Whisper timestamp decoding and Kimi tokenizer regression handling are unrelated bugs." + }, + { + "left": "issue:45137", + "right": "issue:45600", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque error and removed Sam3LiteText config references are different issues." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Model shape mismatch and a tie_word_embeddings warning are not the same bug." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "One is a sync_gpus kwargs problem; the other is a missing cache issue in chunked_prefill." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Tokenizer metadata mismatch and missing processor files are distinct save_pretrained failures." + }, + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "A specific generation/cache regression is not the same as broad import errors after upgrade." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Custom model initialization breakage is broader than the ESM2-specific failure described here." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "Tokenizer AttributeError and data filtering slowdown are unrelated." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": true, + "reason": "Exact same title and same Qwen3ForCausalLM multi-thread VRAM leak; this is a clear duplicate." + }, + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "Missing PyTorch ImportError and Sentence Transformers hanging while loading are different problems." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "Performance regression in filtering data is unrelated to the Qwen3.5-Moe from_pretrained error." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "Generic bug report vs a specific gemma-4 zero3 from_pretrained failure; not the same bug." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe/transformers v5 and missing processor save files are different issues." + }, + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Quantized model saving failure and Qwen2.5-GGUF loading failure are separate paths." + }, + { + "left": "issue:43701", + "right": "issue:44749", + "accept": false, + "reason": "Checkpoint key mismatch is unrelated to the data-processing slowdown report." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "TF32 API behavior and remote-code checkpoint meta-init incompatibility are different concerns." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "SAM 3 Video attribute error and qwen2.5-vl wrong vision_position_ids are different model-specific bugs." + }, + { + "left": "issue:45375", + "right": "issue:45478", + "accept": false, + "reason": "Both touch Qwen3.5/Moe config handling, but one is a missing vision field and the other is a generic from_pretrained failure." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "Different failure classes: SAM 3 Video attribute error vs missing fields from trust_remote_code AutoConfig." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "Both are import issues, but they concern different modules and different missing symbols." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Tokenizer regex patch crash and auto_docstring __future__ annotations crash are unrelated." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "Related only at a broad Qwen3.5/Moe family level, not the same concrete bug." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Quantized model saving failure is not the same as Llama4Config missing pad_token_id." + }, + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "vitpose breakage and Gemma 4 mm_token_type_ids defaulting are unrelated regressions." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Quantization offloading and pipeline-parallel output corruption are different code paths." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "SAM 3 Video attribute error is unrelated to add-new-model-like failing for TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "FP8 accuracy degradation and FP8 save_pretrained failure are different bugs." + }, + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "Missing AutoImageProcessor and missing HybridCache are separate import regressions." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The analysis is mostly conservative, but the summary is not fully grounded: it says the only clear duplicate pair is 42673/43704, while the packet also shows an exact duplicate pair for 45310/45478 (same title and same Qwen3.5-Moe from_pretrained failure). That omission makes the cluster summary imprecise. The issue anchor choice is otherwise reasonable, but the summary should acknowledge both clear duplicate pairs." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 33, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:30333", + "issue:30990", + "issue:33290", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42175", + "issue:42898", + "issue:42994", + "issue:43232", + "issue:43278", + "issue:43377", + "issue:43479", + "issue:43504", + "issue:43526", + "issue:43673", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:43874", + "issue:43976", + "issue:44038", + "issue:44060", + "issue:44079", + "issue:44164", + "issue:44188", + "issue:44222", + "issue:44242", + "issue:44297", + "issue:44373", + "issue:44448", + "issue:44496", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44779", + "issue:44792", + "issue:44869", + "issue:44933", + "issue:44964", + "issue:45092", + "issue:45103", + "issue:45125", + "issue:45137", + "issue:45310", + "issue:45356", + "issue:45459", + "issue:45561" + ], + "soft_pairs": [ + "issue:43232|issue:44188", + "issue:44222|issue:45310", + "issue:42994|issue:44164", + "issue:44038|issue:45310", + "issue:43232|issue:44792", + "issue:44496|issue:45561", + "issue:43526|issue:45561", + "issue:43377|issue:44242", + "issue:43504|issue:45561", + "issue:36010|issue:41628", + "issue:43673|issue:44188", + "issue:30990|issue:35141", + "issue:42898|issue:44568", + "issue:43479|issue:44079", + "issue:43479|issue:44869", + "issue:43976|issue:44933", + "issue:42175|issue:44933", + "issue:44964|issue:45137", + "issue:43784|issue:43824", + "issue:44623|issue:45310", + "issue:44188|issue:44373", + "issue:44060|issue:45125", + "issue:42898|issue:45356", + "issue:43723|issue:44568", + "issue:43278|issue:45137", + "issue:44734|issue:45103", + "issue:42898|issue:44779", + "issue:43874|issue:45092", + "issue:30333|issue:33290", + "issue:42898|issue:44448", + "issue:41628|issue:45459", + "issue:44222|issue:44297" + ], + "prepared_review_unit_hash": "349a280c5afd3e77286169a7ac8d68862baad2fb5b29f994dfca59b4bb36ed78", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37317, + "estimated_input_tokens": 9330, + "estimated_eval_tokens": 18916 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous; the soft edges are largely false positives from broad v5/model-loading/tokenizer similarity rather than true duplicates. I would not merge any of the candidate pairs.", + "confidence": 0.82, + "canonical_issue_reason": "issue:42898 is the closest thematic anchor: it is a broad tokenizer-regression report and has multiple nearby tokenizer-related soft matches, even though the cluster as a whole is not a real duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42898 is the best representative issue for this cluster because it is the most central broad regression report among the candidates, but it should still be treated as a loose topical anchor rather than a duplicate canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Different failure modes: generation kwargs after sync_gpus vs torch.compile attention-kernel branching." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "Both mention MoE/save-pretrained/from-pretrained, but one is FP8 save serialization and the other is a Qwen3.5 load error." + }, + { + "left": "issue:42994", + "right": "issue:44164", + "accept": false, + "reason": "Both concern saving/loading, but one is quantized-model saving and the other is extra_state handling in save/from_pretrained." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Same model family, but these appear to be different Qwen MoE loading bugs in different code paths." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Unrelated subsystems: generation bookkeeping vs a janus image-generation test failure." + }, + { + "left": "issue:44496", + "right": "issue:45561", + "accept": false, + "reason": "Unrelated: model config recognition vs a pytest-xdist file race." + }, + { + "left": "issue:43526", + "right": "issue:45561", + "accept": false, + "reason": "Unrelated: Beit label reduction bug vs captured_info.txt race." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "Different model behavior bugs: missing padding-mask support vs load-balancing loss gating." + }, + { + "left": "issue:43504", + "right": "issue:45561", + "accept": false, + "reason": "Unrelated: legacy preset loading vs test utility file race." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different symbols and likely different API breakages." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "Different code paths: GenerationMixin cache/chunked prefill vs attention kernel divergence under torch.compile." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "Unrelated: a loading hang vs embedding reinitialization after resize." + }, + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "Both tokenizer regressions in v5, but one is clean-up-space behavior and the other is special-token insertion." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "Unrelated: multimodal config defaults vs ModelOutput key assignment." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Different areas: multimodal config initialization vs Whisper timestamp decoding crash." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Unrelated: Python-version incompatibility vs missing image_utils import." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Unrelated: backend extras packaging vs missing import symbol." + }, + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Unrelated: Phi-4-multimodal loading vs DeepSpeed ZeRO3 deque error." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Both are import failures, but they involve different missing names and different packages/models." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Both involve pretrained I/O, but one is processor save-pretrained missing files and the other is a Qwen MoE load failure." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "Unrelated: kernel divergence bug vs docstring typo." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Same model family, but tied-weights warning and missing _tp_plan are different issues." + }, + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different models and different tokenizer internals." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer loading/behavior issues, but not the same concrete bug." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Different problem domains: dtype drift in evaluate vs DeepSpeed queue underflow." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Unrelated: server KV-cache continuation crash vs auto_docstring annotation parsing." + }, + { + "left": "issue:42898", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but they affect different tokenizers/models and different failure mechanisms." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "Both are multimodal-related, but one is a missing image-patches method and the other is a meta-init compatibility issue." + }, + { + "left": "issue:30333", + "right": "issue:33290", + "accept": false, + "reason": "Unrelated: MLflow reporting on failed training jobs vs AdaFactor/deepspeed OOM." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "Both mention v4-v5 behavior differences, but one is tokenizer cleanup and the other is Pegasus generation output drift." + }, + { + "left": "issue:41628", + "right": "issue:45459", + "accept": false, + "reason": "Unrelated: missing AutoImageProcessor import vs protobuf decode-error masking." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "Both involve save_pretrained, but one is MoE FP8 model saving and the other is tokenizer metadata serialization." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 34, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:30333", + "issue:30990", + "issue:34689", + "issue:35141", + "issue:36331", + "issue:38175", + "issue:41628", + "issue:42757", + "issue:42831", + "issue:42886", + "issue:42898", + "issue:42994", + "issue:43295", + "issue:43479", + "issue:43576", + "issue:43644", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:44038", + "issue:44206", + "issue:44222", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44442", + "issue:44483", + "issue:44484", + "issue:44485", + "issue:44514", + "issue:44623", + "issue:44871", + "issue:44913", + "issue:44977", + "issue:44991", + "issue:45092", + "issue:45103", + "issue:45278", + "issue:45310", + "issue:45357", + "issue:45406", + "issue:45459" + ], + "soft_pairs": [ + "issue:42898|issue:43295", + "issue:43479|issue:43644", + "issue:44442|issue:45459", + "issue:44222|issue:44623", + "issue:42831|issue:44623", + "issue:44206|issue:45103", + "issue:41628|issue:42757", + "issue:44297|issue:45310", + "issue:22355|issue:28282", + "issue:44222|issue:44484", + "issue:44483|issue:45406", + "issue:43723|issue:44977", + "issue:44263|issue:44360", + "issue:30333|issue:36331", + "issue:44263|issue:45092", + "issue:30333|issue:34689", + "issue:30333|issue:35141", + "issue:28282|issue:30333", + "issue:44295|issue:44623", + "issue:44514|issue:45103", + "issue:43479|issue:44913", + "issue:43723|issue:44871", + "issue:44261|issue:45092", + "issue:44373|issue:44485", + "issue:43723|issue:44991", + "issue:43723|issue:44442", + "issue:45092|issue:45357", + "issue:30990|issue:38175", + "issue:42886|issue:42994", + "issue:42831|issue:45310", + "issue:41628|issue:43824", + "issue:42831|issue:44038", + "issue:42757|issue:45278", + "issue:43576|issue:43784" + ], + "prepared_review_unit_hash": "8b516a83ef3993a2ebe5256072682c35a2936efaa38febd18f71151eabbb0f2f", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 34, + "serialized_chars": 37055, + "estimated_input_tokens": 9264, + "estimated_eval_tokens": 18784 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: most items are separate Transformers regressions/import issues rather than true duplicates. The closest thing to an umbrella issue is 45278, but it only loosely represents the group.", + "confidence": 0.42, + "canonical_issue_reason": "issue:45278 is the broadest and most central user-facing regression report in the set (many import errors after upgrading to v5.5.0), so it is the best loose anchor if a single issue must represent the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the most general, current, and cluster-like issue; it can serve as a high-level representative, but it should not be treated as a strict duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "Both are v5 regressions around tokenization/processor behavior, but they describe different failures and different code paths." + }, + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "One is config initialization for Phi4MultimodalConfig; the other is non-persistent buffers in v5. Different bugs." + }, + { + "left": "issue:44442", + "right": "issue:45459", + "accept": false, + "reason": "Different tokenizer-loading failures with different causes; sharing 'load tokenizer' is too broad." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "FP8 MoE save_pretrained and processor.save_pretrained missing files are unrelated save-path bugs." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "Accuracy issues in FineGrainedFP8 do not match processor save_pretrained file emission failures." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "A feature-extractor argument regression and an auto_docstring annotation crash are unrelated." + }, + { + "left": "issue:41628", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and different underlying dependency problems." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Tokenizer config mismatch on save_pretrained and Qwen3.5 MoE from_pretrained failure are different issues." + }, + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Both are import errors, but one is missing transformers.onnx and the other is missing PyTorch; not the same bug." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "Both mention save_pretrained, but one is FP8 MoE checkpoint saving and the other is max_shard_size behavior." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "Chat/completions request handling and Gemma4Processor _tokenizer attribute errors are different failures." + }, + { + "left": "issue:43723", + "right": "issue:44977", + "accept": false, + "reason": "AutoTokenizer loading regression and Qwen3.5 flash-attention generation issue are unrelated." + }, + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "torch.split return handling and missing ReLU in the DSA indexer are distinct implementation bugs." + }, + { + "left": "issue:30333", + "right": "issue:36331", + "accept": false, + "reason": "MLFlow reporting on failed training jobs and CustomTrainer compute_loss signature mismatch are unrelated." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "DSA indexer split handling and remote-code/meta-init incompatibility are not the same code path." + }, + { + "left": "issue:30333", + "right": "issue:34689", + "accept": false, + "reason": "MLFlow job state reporting and Llama 3.2 Vision model loading breakage are unrelated." + }, + { + "left": "issue:30333", + "right": "issue:35141", + "accept": false, + "reason": "Training-job reporting and embedding reinitialization on resize are different bugs." + }, + { + "left": "issue:28282", + "right": "issue:30333", + "accept": false, + "reason": "ImportError for AutoModel/PyTorch and MLFlow reporting of failed jobs are not the same issue." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "Registering position_ids as a buffer and missing processor.save_pretrained files are unrelated." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "Qwen2_5_VLProcessor chat template batching crash and auto_docstring annotation crash are unrelated." + }, + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Multimodal config default initialization and GPTNeoX rotary_pct reload behavior affect different components." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "AutoTokenizer loading in v5 and Gemma-3 eos_token_id inconsistency are different problems." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "MLA q_a_layernorm precision/config issue and InternVL2 meta-initialization incompatibility are not duplicates." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "A wrong docstring and a RoPE implementation discussion are clearly different." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Generic tokenizer load failure and EMBEDDIA/est-roberta-specific tokenizer loading failure are not the same bug." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Two different tokenizer-loading failures with different affected tokenizers and root causes." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Remote-code/meta-init incompatibility and incorrect visual encoder keys on save_pretrained are separate regressions." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence Transformers stuck loading and SigLIP2 zero-probability outputs are unrelated." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Offline tokenizer cache loading and quantized model saving failures are different paths." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "FineGrainedFP8 accuracy concerns and Qwen3.5 MoE from_pretrained errors do not match." + }, + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "AutoImageProcessor import failure and Qwen2_5_VLForConditionalGeneration import failure are different missing symbols." + }, + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "FineGrainedFP8 accuracy issue and Qwen3-VL-Moe bug are unrelated." + }, + { + "left": "issue:42757", + "right": "issue:45278", + "accept": false, + "reason": "An is_offline_mode import error may be one symptom of broader v5 import breakage, but it is still a distinct issue from the broad 'many import errors' report." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "Broken transformers env command and sentence-transformers import NameError are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 35, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:29127", + "issue:30990", + "issue:33357", + "issue:36296", + "issue:39401", + "issue:41628", + "issue:42371", + "issue:42831", + "issue:42886", + "issue:42907", + "issue:43066", + "issue:43116", + "issue:43122", + "issue:43232", + "issue:43408", + "issue:43452", + "issue:43475", + "issue:43784", + "issue:43937", + "issue:43976", + "issue:43992", + "issue:44038", + "issue:44117", + "issue:44222", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44485", + "issue:44488", + "issue:44514", + "issue:44521", + "issue:44625", + "issue:44704", + "issue:44743", + "issue:44779", + "issue:44829", + "issue:44945", + "issue:44964", + "issue:45200", + "issue:45245", + "issue:45278", + "issue:45290", + "issue:45593", + "issue:45600" + ], + "soft_pairs": [ + "issue:43122|issue:44521", + "issue:44263|issue:44373", + "issue:43232|issue:45593", + "issue:44117|issue:44704", + "issue:42886|issue:43066", + "issue:43122|issue:44514", + "issue:42371|issue:44263", + "issue:30990|issue:33357", + "issue:44704|issue:45290", + "issue:44295|issue:44297", + "issue:42907|issue:44945", + "issue:43976|issue:44945", + "issue:43232|issue:44743", + "issue:39401|issue:44488", + "issue:43232|issue:45245", + "issue:42907|issue:43937", + "issue:43408|issue:43475", + "issue:41628|issue:45278", + "issue:43784|issue:45600", + "issue:44625|issue:44829", + "issue:44360|issue:44485", + "issue:43116|issue:45200", + "issue:39401|issue:44521", + "issue:43122|issue:44779", + "issue:44038|issue:44297", + "issue:29127|issue:30990", + "issue:44261|issue:44373", + "issue:44038|issue:44295", + "issue:42371|issue:42831", + "issue:42907|issue:44964", + "issue:44038|issue:44222", + "issue:36296|issue:43116", + "issue:42907|issue:43452", + "issue:43992|issue:44704" + ], + "prepared_review_unit_hash": "2caff0631d0b232fb89de3ee318cb233441203d7acb3037a5a1e94f86b33e0ed", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 34, + "serialized_chars": 36860, + "estimated_input_tokens": 9215, + "estimated_eval_tokens": 18686 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All proposed soft pairs look like false positives: they share at most a broad subsystem label (tokenizers, loading, generation, multimodal, etc.) but describe different concrete bugs, regressions, or even docs issues.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Both mention tokenization/chat templates, but one is a tokenizer regression across versions and the other is an all-zero assistant_mask bug for multimodal inputs; different failure modes and code paths." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "One is a torch.split bug in a GLM MoE indexer; the other is a wrong docstring for position_ids. Docs issue vs runtime bug." + }, + { + "left": "issue:43232", + "right": "issue:45593", + "accept": false, + "reason": "Generation kwargs after sync_gpus is unrelated to D-FINE auxiliary losses being disabled; different models and different subsystems." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "Both touch from_pretrained/loading internals, but one is TOKENIZER_MAPPING_NAMES returning None and the other is AutoProcessor not forwarding kwargs to cached_file; not the same bug." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Offline cache loading failure and wrong tokenizer decoder type are separate tokenizer issues with different causes." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "Both involve tokenization-related APIs, but one is version-dependent tokenization drift and the other is a batched apply_chat_template crash with padding=False." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "TF32 behavior settings and a GLM MoE torch.split bug are unrelated." + }, + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "These are different loading failures: Sentence Transformers hanging vs a MacOS bus error with a community CLIP model." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "AutoProcessor/cached_file kwargs forwarding is unrelated to apply_chat_template crashing on tool-call assistant messages." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "A position_ids buffer read error and a tokenizer_class mismatch on save_pretrained are distinct issues." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Saving dequantized Ministrals/Devstrals and incorrect pipeline-parallel LLM output are different bugs." + }, + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Python version compatibility for transformers 5.1.0 is unrelated to pipeline parallelism output corruption." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both concern generation-state handling, but one is sync_gpus kwargs update logic and the other is recurrent state reset in Qwen3.5 modular code; not the same defect." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Qwen3 tokenizer offset_mapping is unrelated to loading cjvt/sleng-bert." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Generation kwargs handling has nothing to do with the 2^24 category limit error." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Dequantized model save failure and GLM-5 GenerationConfig validation are different problems." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "Both are SAM3-related, but one is a model-type/config mismatch warning and the other is a missing attribute on Sam3VisionEncoderOutput; different concrete bugs." + }, + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "An AutoImageProcessor import error is not the same as a broad set of import errors after upgrading versions." + }, + { + "left": "issue:43784", + "right": "issue:45600", + "accept": false, + "reason": "Importing sentence-transformers breaking because nn is undefined is unrelated to Sam3LiteText configs breaking CI." + }, + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "num_labels propagation in Qwen3.5 and degenerate training with flash_attention_3 are separate issues." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "A missing ReLU in a DSA indexer and a GLM-5 RoPE implementation issue are unrelated model-internal changes." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Empty results in a classification example and mm_token_type_ids defaulting for Gemma 4 are different bugs and different call paths." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Tokenizer offset mapping and all-zero assistant masks are both tokenization-adjacent, but they are distinct outputs and distinct bugs." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but they affect different model families and different symptoms; too broad to be the same underlying bug." + }, + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "A Qwen3-VL-Moe bug and a tokenizer save_pretrained tokenizer_class mismatch are unrelated." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "A LayoutLMv3 validation/error-message issue is unrelated to Sentence Transformers getting stuck while loading." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "A precision bug from missing rms_norm_eps and a wrong position_ids docstring are not the same issue." + }, + { + "left": "issue:44038", + "right": "issue:44295", + "accept": false, + "reason": "Qwen3-VL-Moe behavior and a position_ids buffer read error are separate bugs." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 API settings and FineGrainedFP8 accuracy regression are different precision-related issues with different scopes." + }, + { + "left": "issue:42907", + "right": "issue:44964", + "accept": false, + "reason": "Saving dequantized Ministrals/Devstrals and loading Phi-4 multimodal instruct are different model I/O failures." + }, + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Qwen3-VL-Moe compatibility and FP8 save_pretrained for moe are separate problems; same broad area, not the same bug." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor parallel training and an example-script multi-label classification issue are unrelated." + }, + { + "left": "issue:42907", + "right": "issue:43452", + "accept": false, + "reason": "A dequantized model save failure is unrelated to gguf_file breaking from_pretrained for tokenizer/model loading." + }, + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "UMT5Encoder missing embed_tokens.weight and AutoProcessor forwarding kwargs to cached_file are different from_pretrained bugs affecting different components." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 36, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:33453", + "issue:38617", + "issue:39401", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:42757", + "issue:42907", + "issue:43064", + "issue:43116", + "issue:43122", + "issue:43278", + "issue:43421", + "issue:43441", + "issue:43493", + "issue:43673", + "issue:43784", + "issue:43824", + "issue:43825", + "issue:43827", + "issue:43874", + "issue:43906", + "issue:43931", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44075", + "issue:44117", + "issue:44222", + "issue:44351", + "issue:44485", + "issue:44779", + "issue:44936", + "issue:44945", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45216", + "issue:45278", + "issue:45341", + "issue:45405", + "issue:45446", + "issue:45478", + "issue:45561", + "issue:45563", + "issue:45588" + ], + "soft_pairs": [ + "issue:45216|issue:45478", + "issue:43824|issue:44351", + "issue:43122|issue:44977", + "issue:42907|issue:44222", + "issue:42757|issue:44351", + "issue:43278|issue:44945", + "issue:43784|issue:45446", + "issue:42222|issue:43441", + "issue:42673|issue:45072", + "issue:38617|issue:45278", + "issue:43064|issue:43673", + "issue:43937|issue:44485", + "issue:22355|issue:29127", + "issue:42371|issue:43874", + "issue:43116|issue:43493", + "issue:44075|issue:44936", + "issue:44222|issue:45478", + "issue:44038|issue:45478", + "issue:43116|issue:43825", + "issue:43976|issue:45446", + "issue:43937|issue:45563", + "issue:39401|issue:44779", + "issue:33453|issue:39401", + "issue:43931|issue:44945", + "issue:45083|issue:45446", + "issue:44117|issue:45588", + "issue:43827|issue:45446", + "issue:43937|issue:44977", + "issue:45341|issue:45561", + "issue:42673|issue:43906", + "issue:45405|issue:45446", + "issue:39401|issue:43421" + ], + "prepared_review_unit_hash": "c778ae355e11467d04182bc03dcefbf81532465cc9e4a74dc069e4693daf13cb", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 36902, + "estimated_input_tokens": 9226, + "estimated_eval_tokens": 18708 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly a grab-bag of unrelated issues; the soft pairs are mostly broad topical similarities (imports, tokenizers, Qwen3, save/load) rather than the same concrete bug. I would not merge any of the proposed pairs.", + "confidence": 0.23, + "canonical_issue_reason": "No single canonical duplicate stands out: the items split into several unrelated problem areas, and the soft pairs do not share a sufficiently specific root cause.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the broadest umbrella-style report among the issues (many import errors after upgrading), so it is the closest thing to a representative issue, though it is not a true duplicate target for the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45216", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3.5 MoE save/load behavior, but one is a save_pretrained checkpoint issue and the other is a from_pretrained loading error; different concrete failure modes." + }, + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols from different modules; same symptom, not the same underlying bug." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer regression vs flash-attention generation failure; unrelated code paths and symptoms." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both are save/export problems, but for different model families and different quantization formats; not the same bug." + }, + { + "left": "issue:42757", + "right": "issue:44351", + "accept": false, + "reason": "Different missing imports from different packages; too generic to treat as one duplicate." + }, + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Dtype drift between train/eval is unrelated to trainer.evaluate() failing after trainer.train()." + }, + { + "left": "issue:43784", + "right": "issue:45446", + "accept": false, + "reason": "Separate import/version-guard failures involving different symbols and libraries." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "Different model families and different failures; only the broad 'model broke under v5/flash-attention' theme overlaps." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "VRAM leak in Qwen3ForCausalLM vs dtype mismatch in unrelated models; no shared concrete root cause." + }, + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "Both are import-related regressions, but one is a specific missing helper while the other is a broad upgrade report; not the same underlying issue." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Optimizer state corruption in FSDP/PEFT vs missing GenerationMixin cache during chunked prefill; different subsystems." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "Invalid GenerationConfig vs RoPE implementation discussion; distinct bugs." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "No overlap in functionality or failure mode; unrelated issues." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "TF32 behavior/API settings vs multimodal token counting bug; unrelated." + }, + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "Example-script multi-label output bug vs SigLIP2 implementation discrepancy; unrelated." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "Optimizer argument handling vs trainer.evaluate() after train(); not the same bug." + }, + { + "left": "issue:44222", + "right": "issue:45478", + "accept": false, + "reason": "Both mention Qwen3.5/MoE and save/load, but one is FP8 save_pretrained and the other is from_pretrained loading error; too different to merge." + }, + { + "left": "issue:44038", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3-ish MoE models, but the model/version and failure details differ enough that they are not the same concrete bug." + }, + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "Example-script classification bug vs pipeline API error messaging; unrelated." + }, + { + "left": "issue:43976", + "right": "issue:45446", + "accept": false, + "reason": "Python version compatibility problem vs a version check bug for a specific import; different issues." + }, + { + "left": "issue:43937", + "right": "issue:45563", + "accept": false, + "reason": "GenerationConfig validation vs a stale warning in paged generate(); not the same failure." + }, + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions, but in different model families and with different symptoms; too broad to call duplicate." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Tokenizer loading regression vs wrong offset mapping; same subsystem but not the same bug." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Model weight-shape mismatch on load vs pipeline-parallelism output issue; unrelated." + }, + { + "left": "issue:45083", + "right": "issue:45446", + "accept": false, + "reason": "Feature-extraction length helper issue in qwen3_omni_moe vs an import/version-check bug; different code paths." + }, + { + "left": "issue:44117", + "right": "issue:45588", + "accept": false, + "reason": "Tokenizer mapping None handling vs flash_attention crash on sink-less models; unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 37, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42371", + "issue:42617", + "issue:42673", + "issue:43012", + "issue:43066", + "issue:43097", + "issue:43122", + "issue:43408", + "issue:43441", + "issue:43450", + "issue:43452", + "issue:43577", + "issue:43582", + "issue:43673", + "issue:43742", + "issue:43874", + "issue:43931", + "issue:43937", + "issue:43986", + "issue:44117", + "issue:44220", + "issue:44261", + "issue:44263", + "issue:44291", + "issue:44410", + "issue:44483", + "issue:44484", + "issue:44485", + "issue:44493", + "issue:44496", + "issue:44829", + "issue:44933", + "issue:44987", + "issue:45003", + "issue:45071", + "issue:45081", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45341", + "issue:45459", + "issue:45464", + "issue:45478", + "issue:45542" + ], + "soft_pairs": [ + "issue:44291|issue:45071", + "issue:43450|issue:43986", + "issue:44484|issue:45310", + "issue:44829|issue:45216", + "issue:38617|issue:41628", + "issue:44410|issue:45310", + "issue:44117|issue:45081", + "issue:43408|issue:43742", + "issue:42673|issue:43582", + "issue:42222|issue:45542", + "issue:42673|issue:43577", + "issue:42371|issue:44261", + "issue:43937|issue:44496", + "issue:38617|issue:44220", + "issue:43012|issue:43577", + "issue:42175|issue:42222", + "issue:44933|issue:45278", + "issue:44483|issue:45464", + "issue:42617|issue:45341", + "issue:44484|issue:45478", + "issue:43066|issue:43122", + "issue:43673|issue:45341", + "issue:44410|issue:45478", + "issue:43097|issue:44493", + "issue:43874|issue:44261", + "issue:42222|issue:45459", + "issue:43441|issue:44829", + "issue:43986|issue:44484", + "issue:43012|issue:43408", + "issue:44263|issue:44485", + "issue:41628|issue:45071", + "issue:45071|issue:45542", + "issue:43931|issue:44410", + "issue:45003|issue:45341", + "issue:44117|issue:44987", + "issue:43986|issue:45341", + "issue:43097|issue:43452" + ], + "prepared_review_unit_hash": "90b9381620f9b37f8e9a38ba16d45db6dd9c4eb82e3dfe3ca764511fdac44944", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38591, + "estimated_input_tokens": 9648, + "estimated_eval_tokens": 19552 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly thematically similar Transformers regression reports, but they do not appear to be true duplicates. The strongest representative issue is the broad import-regression report, while the rest are separate bugs affecting different code paths or model families.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45278 is the broadest representative of the visible cluster theme: widespread import failures after upgrading Transformers. It is generic enough to stand in for the import-regression subset without overfitting to one specific missing symbol.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the best global issue candidate because it is the most umbrella-like and can anchor the cluster\u2019s import-breakage side, whereas the other issues are narrower model- or API-specific regressions.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "Both relate to v5-era model-loading/type issues, but one is about an unexpected _is_hf_initialized argument and the other about PretrainedConfig type checking; they are different failures." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "Both involve video-processing paths, but one is a batched-shape bug in processors and the other is a crash when AutoProcessor is used without torchvision." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "These are unrelated: max_shard_size/save_pretrained behavior versus a Qwen3.5 MoE from_pretrained load error." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "One is a training-degeneration problem with flash_attention_3, the other is a save_pretrained checkpoint regression for Qwen3.5; different code paths and symptoms." + }, + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different missing names from different modules; no evidence they are the same underlying bug." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "These describe different Qwen-family model loading issues: missing layer projections versus a MoE from_pretrained error." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "Tokenization-registry assumptions and Mistral regex patching are separate problems with different failure points." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "A model-type warning during instantiation is not the same bug as a key error loading MobileLLM-125M." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "VRAM leakage in multi-threaded Qwen3ForCausalLM use is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." + }, + { + "left": "issue:42222", + "right": "issue:45542", + "accept": false, + "reason": "VitPose breakage and a TensorBoard/TensorFlow backend mismatch are different installation/runtime issues." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "A multi-thread VRAM leak and BLIP2 dtype propagation are unrelated regressions." + }, + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 behavior warnings and MLA q_a_layernorm precision issues are different subsystems with different fixes." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "GenerationConfig validation for GLM-5 is not the same as an unrecognized model_type/config.json problem." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "These are both import/feature issues, but one is a missing symbol import and the other is an audio feature-extraction problem." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "A PyTorch warning during bfloat16 compilation is unrelated to BLIP2 dtype not being propagated." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Missing TensorFlow in a torch-only install and broken VitPose models are separate issues." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "The latter is a broad import-regression report, but the former is a specific missing import from image_utils; related theme, not the same concrete bug." + }, + { + "left": "issue:44483", + "right": "issue:45464", + "accept": false, + "reason": "Both mention chat/completions, but one is a generic API acceptance failure and the other is a streaming-inference failure on a specific Qwen3.5 model." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "Running 3d_parallel.py and a bug in testing_utils.py are unrelated." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "These are different Qwen3.5/save_pretrained vs from_pretrained problems, not the same bug." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both concern tokenizer behavior in v5, but one is about decoder type and the other about changed tokenization output; too different to merge." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "A GenerationMixin cache regression during chunked_prefill is unrelated to a testing_utils bug." + }, + { + "left": "issue:44410", + "right": "issue:45478", + "accept": false, + "reason": "Missing attention projections in qwen3next is not the same as a Qwen3.5 MoE from_pretrained error." + }, + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Removal of tie_embeddings_and_encoder_decoder is a config/API change; unexpected key warnings in models are a separate loading issue." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "A missing image-patch helper in GLM46V and MLA layernorm epsilon precision are unrelated multimodal/model bugs." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "AutoImageProcessor import failure and PretrainedConfig type-checking breakage are different regressions." + }, + { + "left": "issue:45071", + "right": "issue:45542", + "accept": false, + "reason": "PretrainedConfig type checking and TensorBoard-only TF backend handling are unrelated." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Qwen3-VL weight-shape mismatch is a loading/weights issue, while qwen3next missing projections is a model-definition issue." + }, + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Unsafe sys.modules access in modeling_utils is not the same as a testing_utils bug." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "A None entry in TOKENIZER_MAPPING_NAMES and a specific model-loading failure for physical-intelligence/fast are different issues." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "Video-model loading without torchvision and a small testing_utils bug are unrelated." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "A removed tie_embeddings_and_encoder_decoder setting is unrelated to gguf_file breaking tokenizer/model loading." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 38, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:38617", + "issue:42757", + "issue:42886", + "issue:42907", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43097", + "issue:43208", + "issue:43335", + "issue:43352", + "issue:43381", + "issue:43408", + "issue:43502", + "issue:43531", + "issue:43550", + "issue:43824", + "issue:43825", + "issue:43873", + "issue:43874", + "issue:43906", + "issue:43931", + "issue:43976", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44062", + "issue:44117", + "issue:44261", + "issue:44263", + "issue:44315", + "issue:44351", + "issue:44368", + "issue:44380", + "issue:44410", + "issue:44484", + "issue:44485", + "issue:44655", + "issue:44704", + "issue:44829", + "issue:45071", + "issue:45103", + "issue:45245", + "issue:45341", + "issue:45372", + "issue:45375", + "issue:45479", + "issue:45588" + ], + "soft_pairs": [ + "issue:43931|issue:44038", + "issue:43335|issue:45071", + "issue:43408|issue:44368", + "issue:43874|issue:44263", + "issue:43976|issue:45071", + "issue:44829|issue:45245", + "issue:43986|issue:45372", + "issue:43064|issue:44315", + "issue:43531|issue:45341", + "issue:38617|issue:42757", + "issue:44261|issue:44485", + "issue:44410|issue:45375", + "issue:38617|issue:44351", + "issue:44062|issue:44117", + "issue:44038|issue:44410", + "issue:43097|issue:45588", + "issue:43986|issue:44117", + "issue:44704|issue:45103", + "issue:43502|issue:43992", + "issue:43992|issue:44484", + "issue:43097|issue:43873", + "issue:43824|issue:43986", + "issue:43066|issue:43906", + "issue:43352|issue:44380", + "issue:43825|issue:44655", + "issue:38617|issue:43097", + "issue:43012|issue:44368", + "issue:42886|issue:43381", + "issue:43012|issue:43550", + "issue:42907|issue:44655", + "issue:43208|issue:43550", + "issue:44380|issue:45479", + "issue:43352|issue:43976", + "issue:43064|issue:44380" + ], + "prepared_review_unit_hash": "681c0a5e32c3908f1b4cc0e5b4b46229963f380fb240bcf3e00adb9c8fdfb528", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37911, + "estimated_input_tokens": 9478, + "estimated_eval_tokens": 19212 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated bug reports from the same repo, with only superficial theme overlap (imports, loading, attention, quantization, etc.). None of the soft pairs look like the same underlying fixable bug/change, so I would not merge any of them as duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "No clear canonical issue emerges: the cluster is heterogeneous and the soft-pair overlaps are mostly broad symptom similarity rather than the same root cause.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a strong representative for the cluster because the reports span many distinct subsystems and failure modes.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "Both are Qwen3/VL-related model-loading problems, but one is a specific weight-shape mismatch and the other is a version/model-family issue; not the same bug." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "One is a SwitchTransformers config edge case; the other is a broad PretrainedConfig type-checking regression. Different failures and code paths." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "Different warning classes in different model families; no shared root cause." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "Different GLM components and different errors: a missing image-processor method vs a torch.split return-value issue." + }, + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "A Python-version compatibility report is not the same bug as PretrainedConfig type checking." + }, + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "Completely different domains: flash-attention training degeneration vs category cardinality limit." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "Both involve loading processors, but one is about missing torchvision and the other about a missing mistral_common symbol; different root causes." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "Training-state corruption with FSDP2/PEFT is unrelated to Liger Kernel application under model_init." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and a testing_utils bug are unrelated." + }, + { + "left": "issue:38617", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but for different symbols from different packages; no shared underlying fix." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "Different model-specific numerical/configuration issues (rms_norm_eps precision vs RoPE implementation)." + }, + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Different model config problems: missing attention projections vs a missing vision config field." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "Different missing exports from transformers; similar symptom, not the same code-path problem." + }, + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "Tokenizer construction error vs tokenizer mapping lookup assumption; not the same bug." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "Different Qwen-related model bugs with different symptoms and fixes." + }, + { + "left": "issue:43097", + "right": "issue:45588", + "accept": false, + "reason": "A config API removal is unrelated to a flash-attention crash on sink-less models." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "Different areas: processor loading without torchvision vs tokenizer mapping None-handling." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "Different preprocessing/docstring-related failures and different code paths." + }, + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "Offline/network behavior is unrelated to a missing checkpoint weight during UMT5 loading." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "Model-loading weight mismatch and save_pretrained shard-size behavior are unrelated." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "Removed config API and quantization offloading behavior are different issues." + }, + { + "left": "issue:43824", + "right": "issue:43986", + "accept": false, + "reason": "Different import/load failures for different multimodal models and dependencies." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same bug." + }, + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "Flash-Attn support gating for Nemotron is unrelated to GPT2 attention scaling being ignored under SDPA/FlashAttention." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "Error messaging for pipeline tasks and saving Pipeline objects are different concerns." + }, + { + "left": "issue:38617", + "right": "issue:43097", + "accept": false, + "reason": "Different import/config regressions with no shared root cause." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "A PyTorch precision warning on bfloat16 compilation is unrelated to a tie_word_embeddings warning." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Offline cache loading and gradient-checkpointing in eval mode are unrelated." + }, + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "Different runtime behaviors in different subsystems: warning on compile vs SDPA compile failure." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Saving dequantized models and saving Pipeline objects share only a broad save_pretrained theme, not the same bug." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "xLSTM training blockers and Bamba torch.compile/SDPA failure are unrelated." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "Attention-backend config handling is unrelated to zero-loss classification degeneration." + }, + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "Flash-Attention support for a specific model is unrelated to Python-version compatibility." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "Distributed training optimizer-state corruption is unrelated to attention scaling config being ignored." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 39, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:36010", + "issue:36683", + "issue:38617", + "issue:42757", + "issue:42994", + "issue:43010", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43208", + "issue:43299", + "issue:43352", + "issue:43475", + "issue:43502", + "issue:43525", + "issue:43526", + "issue:43582", + "issue:43618", + "issue:43756", + "issue:43761", + "issue:43824", + "issue:43867", + "issue:43901", + "issue:43937", + "issue:43976", + "issue:43992", + "issue:44038", + "issue:44077", + "issue:44188", + "issue:44230", + "issue:44242", + "issue:44263", + "issue:44380", + "issue:44483", + "issue:44485", + "issue:44492", + "issue:44514", + "issue:44655", + "issue:44683", + "issue:44938", + "issue:45216", + "issue:45278", + "issue:45290", + "issue:45310", + "issue:45381", + "issue:45440", + "issue:45478", + "issue:45593" + ], + "soft_pairs": [ + "issue:44483|issue:44514", + "issue:43208|issue:43526", + "issue:43618|issue:44380", + "issue:36683|issue:43582", + "issue:43618|issue:43761", + "issue:44483|issue:45290", + "issue:43012|issue:44683", + "issue:44038|issue:44230", + "issue:42757|issue:43824", + "issue:44483|issue:45381", + "issue:43299|issue:43867", + "issue:38617|issue:43502", + "issue:43756|issue:44485", + "issue:43901|issue:44655", + "issue:42994|issue:44655", + "issue:43867|issue:45216", + "issue:36683|issue:43475", + "issue:44655|issue:45478", + "issue:43066|issue:45381", + "issue:43352|issue:43937", + "issue:44077|issue:44380", + "issue:43064|issue:44492", + "issue:44263|issue:45440", + "issue:43976|issue:44938", + "issue:36683|issue:44077", + "issue:36683|issue:43525", + "issue:36010|issue:42757", + "issue:44188|issue:44380", + "issue:43618|issue:45216", + "issue:43352|issue:45593", + "issue:43992|issue:45310", + "issue:36683|issue:44242", + "issue:43010|issue:45278", + "issue:36683|issue:43352" + ], + "prepared_review_unit_hash": "768fe13701556680904c5d2e6175e1334f16348b75634556c033b367abee8fe1", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37466, + "estimated_input_tokens": 9367, + "estimated_eval_tokens": 18990 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a grab bag of unrelated Transformers issues, mostly around v5 regressions and import/load failures. None of the soft pairs look like the same concrete bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.88, + "canonical_issue_reason": "issue:45278 is the broadest umbrella item here: it explicitly describes post-upgrade import failures after moving from 4.57.0 to 5.5.0, which is the closest shared theme among several import-regression reports.", + "canonical_pr_reason": null, + "best_issue_reason": "If a single representative issue is needed, issue:45278 is the most general and cluster-like anchor because it captures the widest version-regression/import-failure pattern rather than a model-specific edge case.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "Both mention chat-related behavior, but one is an API request rejection and the other is a tokenizer/template crash; different code paths and symptoms." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "Completely different models and bugs: xLSTM training blockers versus BEIT image processor label reduction." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "One is a CLIP output regression and the other is GPT2 attention scaling under SDPA/FlashAttention; not the same failure." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "A missing Gemma3 config attribute is unrelated to an Apple Silicon allocator TypeError." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": false, + "reason": "Both are CLIP regressions, but one is missing attentions and the other is missing hidden_states; related area, different concrete bugs." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Chat-completions request validation and apply_chat_template tool-call handling are distinct problems." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "A bfloat16 compile warning is not the same as a torch>=2.9 compiled flex_attention failure." + }, + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "One is a Qwen3-VL-Moe bug report, the other is an fp8 support request; not the same underlying issue." + }, + { + "left": "issue:42757", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different symbols and different root causes." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "A chat/completions acceptance issue is unrelated to qwen2.5-vl video vision_position_ids being wrong." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Qwen3VL-Moe loading failure and a sorted-state_dict load error are not the same bug." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "An import error in configuration_utils is unrelated to local_files_only still allowing API requests." + }, + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Both mention RoPE, but one is about Smollm3 layer dropping and the other is a GLM-5 RoPE implementation discussion." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Documentation mismatch for return_all_scores is unrelated to save_pretrained on Pipeline objects." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Quantized model saving failure and pipeline save_pretrained failure are different save-path bugs." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "A load error with sorted state_dict and a Qwen3.5 save_pretrained regression are not the same defect." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Missing Gemma3Config.vocab_size and missing Sam3VisionEncoderOutput.fpn_position_embeddings are unrelated model-specific attribute bugs." + }, + { + "left": "issue:44655", + "right": "issue:45478", + "accept": false, + "reason": "Saving Pipeline objects and Qwen3.5 MoE from_pretrained errors affect different APIs and code paths." + }, + { + "left": "issue:43066", + "right": "issue:45381", + "accept": false, + "reason": "Tokenizer decoder type selection and qwen2.5-vl video position ids are different issues." + }, + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "Nemotron FlashAttention support and GLM-5 generation config validation are unrelated." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "A PatchTSMixer post_init policy change is not the same as GPT2 attention scaling being ignored under specific backends." + }, + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "Distributed optimizer-state corruption and a cache-strategy typo are clearly unrelated." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "Both are MoE-adjacent, but one is a torch.split indexer bug and the other is a DeepseekV3MoE implementation mismatch." + }, + { + "left": "issue:43976", + "right": "issue:44938", + "accept": false, + "reason": "Python-version compatibility problems in 5.1.0 and 3.14 load failures are not the same bug." + }, + { + "left": "issue:36683", + "right": "issue:44077", + "accept": false, + "reason": "A Gemma3 config attribute error has nothing to do with PatchTSMixer post_init handling." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Both are missing config fields, but they affect different models and different attributes." + }, + { + "left": "issue:36010", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but they come from different packages and symbols with no shared concrete fix." + }, + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "A torch.compile branching divergence in attention kernels is not the same as GPT2 attention scaling being ignored." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "A CLIP attentions regression and a Qwen3.5 checkpoint regression do not share the same underlying bug." + }, + { + "left": "issue:43352", + "right": "issue:45593", + "accept": false, + "reason": "Nemotron FA2 support and D-FINE auxiliary-loss handling are unrelated model issues." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "UMT5 missing embed_tokens.weight during loading and Qwen3.5 MoE from_pretrained failure are different loading bugs." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "A missing Gemma3Config attribute is unrelated to load balancing loss not being added for MoE routing." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Cache update no_grad decoration and broad import failures after upgrade are different parts of the library." + }, + { + "left": "issue:36683", + "right": "issue:43352", + "accept": false, + "reason": "A Gemma3 config error and Nemotron FlashAttention support are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 40, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:31795", + "issue:33453", + "issue:36296", + "issue:36683", + "issue:41669", + "issue:42548", + "issue:42890", + "issue:43010", + "issue:43064", + "issue:43065", + "issue:43232", + "issue:43502", + "issue:43575", + "issue:43618", + "issue:43723", + "issue:43867", + "issue:43976", + "issue:43992", + "issue:44062", + "issue:44075", + "issue:44077", + "issue:44112", + "issue:44222", + "issue:44246", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44387", + "issue:44484", + "issue:44485", + "issue:44556", + "issue:44749", + "issue:44857", + "issue:44938", + "issue:44964", + "issue:45071", + "issue:45125", + "issue:45341", + "issue:45357", + "issue:45375", + "issue:45381", + "issue:45440", + "issue:45478", + "issue:45561", + "issue:45588", + "issue:45600" + ], + "soft_pairs": [ + "issue:41669|issue:44246", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:43992|issue:45478", + "issue:42890|issue:45561", + "issue:44938|issue:45071", + "issue:43867|issue:45357", + "issue:43010|issue:43976", + "issue:44938|issue:45381", + "issue:44360|issue:45440", + "issue:43618|issue:44077", + "issue:36296|issue:43575", + "issue:43867|issue:44484", + "issue:43618|issue:45588", + "issue:43618|issue:44857", + "issue:28282|issue:33453", + "issue:42890|issue:44964", + "issue:42890|issue:45600", + "issue:42890|issue:44112", + "issue:43010|issue:44075", + "issue:43723|issue:44556", + "issue:43502|issue:44062", + "issue:44222|issue:45125", + "issue:42890|issue:45341", + "issue:44387|issue:44556", + "issue:43010|issue:43065", + "issue:43010|issue:43232", + "issue:42548|issue:44351", + "issue:45125|issue:45478", + "issue:44355|issue:44749", + "issue:45125|issue:45375", + "issue:43502|issue:44485", + "issue:30990|issue:31795" + ], + "prepared_review_unit_hash": "1db37b0ccab3b01daa062b2d1faf402c46f70bb0dfeb21ea0575bdcd96059578", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37033, + "estimated_input_tokens": 9259, + "estimated_eval_tokens": 18774 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes import/API breakages, tokenizer/model loading regressions, tensor-parallel/training bugs, and unrelated model-specific issues. The soft links are mostly keyword similarity, not the same underlying bug/change, so I reject them all.", + "confidence": 0.93, + "canonical_issue_reason": "issue:42548 is the clearest concrete user-facing import failure in the set and the best representative of the recurring import/load theme, though the cluster is too mixed for it to be a true duplicate hub.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42548 has the strongest, most actionable signal among the issues: a specific API break (`cannot import name 'PreTrainedModel'`) with direct impact and clear repro surface.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "Both concern import performance, but one is about removing `import *` and the other is about slow intermittent `import transformers`; related theme, not the same bug." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "Both mention loading/config behavior, but one is a generic state_dict loading error and the other is a missing `deepstack_visual_indexes` config field for Qwen3.5 MoE." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "Gemma3 `vocab_size` missing is a model-config attribute issue; FSDP2+PEFT optimizer-state corruption is a distributed training bug." + }, + { + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "UMT5 encoder missing `embed_tokens.weight` during load is unrelated to Qwen3.5 MoE `from_pretrained` failure." + }, + { + "left": "issue:42890", + "right": "issue:45561", + "accept": false, + "reason": "One is flaky SAM HQ tests from missing seeds; the other is an xdist race on `captured_info.txt` in testing utils." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Python 3.14 import/load failure is a runtime-compatibility issue; v5.4.0 `PretrainedConfig` type checking is a separate typing regression." + }, + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "Both involve loading/saving regressions, but one is generic state_dict loading and the other is incorrect visual encoder keys in Qwen3.5 `save_pretrained`." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "One is about cache/layer `update()` needing `torch.no_grad`; the other is a Python version compatibility problem in Transformers 5.1.0." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Python 3.14 import failure and Qwen2.5-VL video `vision_position_ids` mismatch are different symptoms and code paths." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "The DSA indexer/ReLU discussion is unrelated to the DeepseekV3MoE divergence from remote implementation." + }, + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "`CLIPOutput.attentions` disappearing is an output-structure regression; `patchtsmixer` `post_init` allowance is a separate model-API validation issue." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Tensor-parallel training bug and Qwen2-57B-A14B-Instruct TP OOM are both TP-related but not the same concrete failure." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "A generic load error from sorted state_dict keys is not the same as the `max_shard_size` default question in `save_pretrained`." + }, + { + "left": "issue:43618", + "right": "issue:45588", + "accept": false, + "reason": "`CLIPOutput.attentions` missing is unrelated to `flash_attention.py` crashing on `s_aux=None`." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "Output field regression in CLIP is not the same as the float16 AMP crash in `LwDetrImageLoss`." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "Both are loading/import problems, but one is a missing PyTorch dependency error and the other is a tokenizer loading regression." + }, + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Seed-related test flakiness and inability to load `microsoft/Phi-4-multimodal-instruct` are unrelated." + }, + { + "left": "issue:42890", + "right": "issue:45600", + "accept": false, + "reason": "A flaky integration test and `auto_mappings.py` referencing removed Sam3LiteText configs are different CI failures." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Missing seeds in tests and a stale device-override test failure in GraniteSpeech are different test issues." + }, + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "`torch.no_grad` on cache/layer updates is unrelated to SGD args not being used." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Tokenizer loading via `AutoTokenizer.from_pretrained` is not the same as checkpoint reload failures after upgrading from v4.57 to v5.x." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "`local_files_only=True` still making API requests is a download-policy bug, not the `AddedToken(..., special=...)` argument collision." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "FP8 `save_pretrained` issues in MoE and missing `_tp_plan` for Qwen3_5Moe are distinct model-export vs tensor-parallel setup problems." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "General seed flakiness in tests is not the same as a specific bug in `testing_utils.py`." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "Int4 quantization memory regression is different from checkpoint reload breakage after upgrading versions." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "`@torch.no_grad` on cache updates and a dummy `nn.Conv2d` in `Sam3PixelDecoder` do not share the same code-path bug." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Cache/layer update decoration and `_update_model_kwargs_for_generation` after `sync_gpus` are separate generation bugs." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import-name failures, but they target different missing symbols (`PreTrainedModel` vs `HybridCache`) and likely different export paths." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Missing `_tp_plan` and Qwen3.5 MoE `from_pretrained` error are related to the same family, but the reported failures are not the same concrete bug." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Compiled Python file loading errors and slow filtering after upgrading from 4.57.3 to 5.3.0 are unrelated." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Tensor-parallel plan missing is different from `deepstack_visual_indexes` being dropped by strict config handling." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Offline-download leakage and GLM-5 RoPE implementation discussion are not the same underlying bug." + }, + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Sentence Transformers loading hangs and confusing `input_ids`/`past_key_values` documentation are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 41, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:30333", + "issue:31795", + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38617", + "issue:41628", + "issue:41669", + "issue:42548", + "issue:42757", + "issue:42831", + "issue:43010", + "issue:43299", + "issue:43502", + "issue:43526", + "issue:43576", + "issue:43673", + "issue:43824", + "issue:43827", + "issue:43874", + "issue:43901", + "issue:44038", + "issue:44075", + "issue:44222", + "issue:44230", + "issue:44261", + "issue:44273", + "issue:44315", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44393", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44655", + "issue:44871", + "issue:44908", + "issue:44961", + "issue:45003", + "issue:45071", + "issue:45092", + "issue:45125", + "issue:45230", + "issue:45468", + "issue:45542" + ], + "soft_pairs": [ + "issue:44075|issue:44871", + "issue:44315|issue:44360", + "issue:30333|issue:33453", + "issue:43010|issue:44368", + "issue:43502|issue:44222", + "issue:43299|issue:44556", + "issue:43502|issue:44961", + "issue:44222|issue:44230", + "issue:44355|issue:44655", + "issue:30333|issue:36296", + "issue:44355|issue:45071", + "issue:42831|issue:43874", + "issue:33453|issue:34689", + "issue:42757|issue:43576", + "issue:33453|issue:36296", + "issue:43827|issue:44961", + "issue:43576|issue:45071", + "issue:43576|issue:43824", + "issue:38617|issue:43576", + "issue:43673|issue:44492", + "issue:44961|issue:45468", + "issue:31795|issue:38617", + "issue:41628|issue:42548", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:44485|issue:44961", + "issue:36296|issue:43901", + "issue:36296|issue:43526", + "issue:41628|issue:44355", + "issue:44273|issue:45003", + "issue:44261|issue:44360", + "issue:44623|issue:45230", + "issue:44273|issue:45542", + "issue:43576|issue:44556", + "issue:44038|issue:45125", + "issue:44393|issue:45071", + "issue:41669|issue:43673", + "issue:44556|issue:45092" + ], + "prepared_review_unit_hash": "5ae75ddd3cf52fbd57b9539fbcf5b84e476e41e863f29198221f5c0ab0b25875", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 38, + "serialized_chars": 38406, + "estimated_input_tokens": 9602, + "estimated_eval_tokens": 19460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated or only loosely similar v5/Transformers regression reports. The soft similarities are mostly broad-topic overlaps (imports, loading, docs, FP8, cache) rather than the same concrete bug, so no soft-edge merge is justified.", + "confidence": 0.69, + "canonical_issue_reason": "issue:43576 is the clearest, most actionable regression report in the set and sits near several v5-related similarity links, while the others are either narrower, more ambiguous, or about different code paths.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43576 is the strongest standalone candidate because it describes a specific, user-visible breakage with a clear surface area and better triage value than the more ambiguous or peripheral reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44075", + "right": "issue:44871", + "accept": false, + "reason": "Different problems: optimizer argument handling vs Gemma eos_token_id config mismatch." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Liger Kernel application bug and DSA indexer ReLU issue are unrelated code paths." + }, + { + "left": "issue:30333", + "right": "issue:33453", + "accept": false, + "reason": "MLFlow failure reporting and tokenizer loading regression are different bugs." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "Cache update no_grad decoration and tie_word_embeddings warning are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:44222", + "accept": false, + "reason": "local_files_only network leakage and FP8 save_pretrained behavior are different issues." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Both concern v5 loading regressions, but one is Qwen3VL MoE-specific while the other is a broader checkpoint reload incompatibility." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "API requests despite local_files_only and the placeholder racoon issue are unrelated." + }, + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "save_pretrained FP8 MOE bug vs FP8 inference support; different operations and failure modes." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Compiled-file import errors and Pipeline.save_pretrained are unrelated." + }, + { + "left": "issue:30333", + "right": "issue:36296", + "accept": false, + "reason": "MLFlow reporting failure and tensor parallel training bug are different problems." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "Runtime import failure and PretrainedConfig type-checking regression are different code paths." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "FineGrainedFP8 accuracy issue and missing image patch method are unrelated." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Both are loading regressions, but they affect different models and failure modes." + }, + { + "left": "issue:42757", + "right": "issue:43576", + "accept": false, + "reason": "Missing hub symbol import and v5 env CLI breakage are distinct surfaces." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading regression and tensor parallel training bug are unrelated." + }, + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Docs about pipeline removals and the placeholder issue are unrelated." + }, + { + "left": "issue:43576", + "right": "issue:45071", + "accept": false, + "reason": "CLI env regression and config type checking are different issues." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "CLI env regression and Qwen2.5-VL import error are unrelated." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "Missing exported symbol import error and CLI env breakage are different bugs." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "Chunked_prefill cache bug and cache-strategy typo are not the same underlying problem." + }, + { + "left": "issue:44961", + "right": "issue:45468", + "accept": false, + "reason": "Placeholder title and Gemma-4 audio positional encoding bug are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Documentation confusion and missing import symbol are unrelated." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "Both are import/export failures, but they involve different symbols and likely different root causes." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "CLI env regression and HybridCache import error are different failure modes." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Scheduler kwargs bug and placeholder issue are unrelated." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "GLM-5 RoPE implementation issue and placeholder issue are unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "Tensor parallel training bug and TextClassificationPipeline docs mismatch are unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43526", + "accept": false, + "reason": "Tensor parallel training bug and BeitImageProcessorFast label bug are unrelated." + }, + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "ImportError for a public API and compiled-file errors are unrelated." + }, + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Lazy loading malfunction and unsafe sys.modules access are related only broadly, not the same concrete bug." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "MLA layernorm epsilon precision issue and DSA indexer ReLU issue are unrelated." + }, + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "processor.save_pretrained missing files and a generic bug report are not the same issue." + }, + { + "left": "issue:44273", + "right": "issue:45542", + "accept": false, + "reason": "Lazy loading bug and TensorFlow backend installation issue are unrelated." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "v5 env CLI regression and checkpoint reload incompatibility after upgrade are both version-related, but not the same concrete bug." + }, + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Qwen3-VL/Qwen3.5-Moe bug and missing tensor-parallel plan are different issues." + }, + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Qwen3-VL 2D bbox output problem and PretrainedConfig type checking regression are unrelated." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "Removing import * usage and chunked_prefill cache missing are unrelated." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both are old-checkpoint/v5 compatibility complaints, but the model families and failure contexts differ enough that they do not look like the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 42, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:31515", + "issue:31795", + "issue:33453", + "issue:34689", + "issue:41669", + "issue:42371", + "issue:42890", + "issue:42913", + "issue:43066", + "issue:43352", + "issue:43408", + "issue:43502", + "issue:43519", + "issue:43576", + "issue:43606", + "issue:43704", + "issue:43723", + "issue:43827", + "issue:43976", + "issue:44077", + "issue:44246", + "issue:44261", + "issue:44263", + "issue:44273", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44351", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44492", + "issue:44623", + "issue:44655", + "issue:44704", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:44998", + "issue:45071", + "issue:45092", + "issue:45230", + "issue:45290", + "issue:45310", + "issue:45341", + "issue:45468", + "issue:45478" + ], + "soft_pairs": [ + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44704|issue:44908", + "issue:44246|issue:44273", + "issue:43408|issue:44315", + "issue:44297|issue:45230", + "issue:43352|issue:44393", + "issue:43576|issue:44908", + "issue:43976|issue:44393", + "issue:44908|issue:45071", + "issue:44295|issue:45230", + "issue:41669|issue:44351", + "issue:41669|issue:45341", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:44315|issue:44829", + "issue:44393|issue:45478", + "issue:31795|issue:33453", + "issue:43827|issue:44908", + "issue:44077|issue:44861", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:44655|issue:44861", + "issue:43576|issue:44273", + "issue:44360|issue:45468", + "issue:45230|issue:45310", + "issue:44273|issue:44908", + "issue:41669|issue:43704", + "issue:43519|issue:44485", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31515|issue:31795", + "issue:44315|issue:45092", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704", + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:42913|issue:43066", + "issue:42890|issue:43704", + "issue:44908|issue:44998", + "issue:44861|issue:45290", + "issue:44998|issue:45341", + "issue:44492|issue:44998" + ], + "prepared_review_unit_hash": "910ca75e55aea22a478fbdaee55c4db7ad829a233b2769b1bd08019b7be98c52", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 48, + "serialized_chars": 41396, + "estimated_input_tokens": 10349, + "estimated_eval_tokens": 20954 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous: tokenizer/loading, model init, scheduler, docs, tests, and various model-specific bugs. I did not find a strong duplicate cluster; the closest pair is the AutoProcessor/cached_file kwargs vs local_files_only network-request issue, but evidence is still insufficient to merge them as the same bug.", + "confidence": 0.61, + "canonical_issue_reason": "Issue 44704 is the most concrete, actionable bug in the set: it names a specific code path, a likely root cause, and a clear failure mode around kwargs propagation in from_pretrained.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44704 is a strong representative issue because it describes a specific loading-path bug with an identifiable fix surface and good triage value.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 configuration behavior is unrelated to a cache-strategy typo." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Test seed coverage and inverse_sqrt scheduler kwargs are different failure domains." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "One is about cached_file kwargs propagation during loading; the other is scheduler kwarg handling. Different code paths." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": false, + "reason": "Import-time slowness and lazy loading not working are related broadly, but not the same concrete bug." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "Model-type warning in SAM3 is unrelated to Liger Kernel application with model_init." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "Tokenizer-class save_pretrained mismatch is too specific to equate with an unnamed generic bug report." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash Attention support and Qwen3-VL bounding-box output errors are distinct model issues." + }, + { + "left": "issue:43576", + "right": "issue:44908", + "accept": false, + "reason": "Broken env command and scheduler kwargs are unrelated." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility is unrelated to Qwen3-VL output correctness." + }, + { + "left": "issue:44908", + "right": "issue:45071", + "accept": false, + "reason": "Scheduler kwargs and PretrainedConfig type checking are different bug classes." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "position_ids buffer access and a generic bug report do not show the same underlying defect." + }, + { + "left": "issue:41669", + "right": "issue:44351", + "accept": false, + "reason": "Import-* cleanup causing import cost is not the same as HybridCache import failure." + }, + { + "left": "issue:41669", + "right": "issue:45341", + "accept": false, + "reason": "Import performance refactor and a testing_utils bug are unrelated." + }, + { + "left": "issue:44492", + "right": "issue:44945", + "accept": false, + "reason": "Cache-strategy typo and pipeline parallelism output error are different issues." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import-system cleanup and cache-strategy typo are not the same code-path bug." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Liger Kernel not applied with model_init is unrelated to degenerate training under flash_attention_3." + }, + { + "left": "issue:44393", + "right": "issue:45478", + "accept": false, + "reason": "Qwen3-VL 2D bbox hallucination and Qwen3.5 MoE from_pretrained error are different model problems." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Forward doc confusion is not the same as a tokenizer loading regression." + }, + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Pipeline docs after v5 removals and scheduler kwargs are unrelated." + }, + { + "left": "issue:44077", + "right": "issue:44861", + "accept": false, + "reason": "Optional post_init policy and _tied_weight_keys list crash are different implementation bugs." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "Import-time cleanup and scheduler kwarg handling are unrelated." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading in v5 is unrelated to Qwen3-VL bounding-box output errors." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Saving Pipeline objects and tied-weight key handling do not share the same bug." + }, + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Broken env command and lazy loading behavior are different surfaces." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "DSA indexer ReLU discussion is unrelated to Gemma-4 audio positional encoding." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "A generic bug report is not enough to conclude it duplicates a specific Qwen3.5 MoE from_pretrained error." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading and scheduler kwargs are unrelated." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "Import-side performance cleanup and VRAM leakage in multi-threaded dataloading are different problems." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Timestamp calculation in Qwen3VL is unrelated to GLM-5 RoPE implementation." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Timestamp calculation and scheduler kwargs are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Forward documentation confusion does not match a model-loading breakage regression." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Slow checkpoint loading and forward-API documentation confusion are different issues." + }, + { + "left": "issue:44315", + "right": "issue:45092", + "accept": false, + "reason": "Model_init not applying Liger Kernel and meta-init incompatibility for old InternVL2 are not the same bug." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak under dataloader threads and RoPE implementation concerns are unrelated." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "GLM-5 RoPE implementation and Gemma-4 audio positional encoding are different model-specific concerns." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 settings and VRAM leakage are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "local_files_only network requests and Qwen3VL timestamp math are different bugs." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Timestamp math in a processor and missing files on processor save are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Processor timestamp math and multi-threaded VRAM leak are unrelated." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return values in an indexer are unrelated to Gemma-4 audio positional encoding." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "MLA q_a_layernorm epsilon precision and Gemma-4 audio positional encoding are different issues." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch and audio positional encoding are unrelated." + }, + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Generic tokenizer behavior change and wrong tokenizer decoder type in v5 are related in theme but not clearly the same concrete bug." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "Test seeding instability and VRAM leakage are unrelated." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "Scheduler kwargs and an irrelevant placeholder issue are unrelated." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "Tied-weight key handling and apply_chat_template crashes have different failure surfaces." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "Unemployment is not a technical duplicate of a testing_utils bug." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "Cache-strategy typo and unrelated placeholder issue are not duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 43, + "review_unit_count": 43, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-374", + "nodes": [ + "issue:30333", + "issue:31515", + "issue:33453", + "issue:36296", + "issue:38617", + "issue:43298", + "issue:43525", + "issue:43572", + "issue:43618", + "issue:44230", + "issue:44556", + "issue:44623", + "issue:44861", + "issue:44998", + "issue:45310" + ], + "soft_pairs": [ + "issue:43618|issue:44998", + "issue:44556|issue:44998", + "issue:43525|issue:44861", + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:30333|issue:31515", + "issue:44230|issue:45310", + "issue:31515|issue:33453", + "issue:43298|issue:44230" + ], + "prepared_review_unit_hash": "46450ed417b801420991bf8a4d5b421e4e6ce97b172dd4319aad7740d3d8e31f", + "component_budget": { + "node_count": 374, + "item_count": 374, + "soft_pair_count": 1395, + "serialized_chars": 720193, + "estimated_input_tokens": 180049, + "estimated_eval_tokens": 360354 + }, + "budget": { + "node_count": 15, + "item_count": 15, + "soft_pair_count": 11, + "serialized_chars": 11707, + "estimated_input_tokens": 2927, + "estimated_eval_tokens": 6110 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; it contains several unrelated Transformers bugs that only loosely overlap around model loading/configuration. None of the soft-similarity pairs look like the same underlying bug or a mergeable single PR.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue fits this cluster: the items span distinct failure modes (checkpoint loading speed, tokenizer regression, config attribute errors, processor save gaps, FP8 support, tensor-parallel bugs, and an MLflow reporting issue).", + "canonical_pr_reason": null, + "best_issue_reason": "If a representative issue must be chosen, #44556 is the broadest checkpoint-reload regression, but it still does not cover the other issues in the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "Completely unrelated: CLIPOutput attentions regression vs an off-topic unemployment issue." + }, + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Checkpoint reload regression has no substantive relation to the unrelated unemployment issue." + }, + { + "left": "issue:43525", + "right": "issue:44861", + "accept": false, + "reason": "Different bugs: missing pad_token_id on Llama4Config vs AttributeError in _get_tied_weight_keys for list-valued _tied_weights_keys." + }, + { + "left": "issue:43572", + "right": "issue:44861", + "accept": false, + "reason": "StableLmConfig pad_token_idx regression and tied-weight key handling are separate code paths and different symptoms." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "Slow from_pretrained checkpoint loading is unrelated to an ImportError for layer_type_validation." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "FP8 inference support for Qwen models is a different problem from processor.save_pretrained missing files." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Checkpoint load slowness is not the same as a tensor-parallel training bug." + }, + { + "left": "issue:30333", + "right": "issue:31515", + "accept": false, + "reason": "MLflow job-state reporting failure is unrelated to from_pretrained checkpoint loading performance." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen3.5, but one is FP8 inference support and the other is a from_pretrained error; different concrete bugs." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Tokenizer loading regression and slow checkpoint loading are different failure modes with different likely fixes." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Continuous batching audio-model support is a different feature gap than Qwen FP8 inference support." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-26350-2", + "nodes": [ + "issue:26350", + "issue:33260" + ], + "soft_pairs": [ + "issue:26350|issue:33260" + ], + "prepared_review_unit_hash": "a9835bd4aabdbc5ad31a2d13ea6404649c591098bafe1fff971374f4cd5c8009", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two distinct feature requests that only share a broad \"support for more architectures\" theme: one is about Flash Attention 2, the other about GGUF. They do not describe the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:26350", + "right": "issue:33260", + "accept": false, + "reason": "Different feature areas and likely different implementation paths: Flash Attention 2 support versus GGUF support. The similarity is only generic wording about architectures, not a shared concrete issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "3b2f1d1d41167c32fdd1eb99a3f9b136134f7a497ce260d1cd85d3c95cbf1f53", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4454, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4454, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The three 2026 templated i18n translation issues are near-verbatim duplicates of one another. The 2023 Turkish-specific request is similar in theme but distinct enough to keep separate.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43834 is the earliest of the near-identical templated translation issues and is the best representative of that duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43834 is the cleanest canonical representative among the duplicate placeholder issues; the Turkish issue is a different concrete request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": false, + "reason": "Turkish-specific request versus a generic placeholder-language translation request; not clearly the same concrete issue." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": false, + "reason": "Turkish-specific request versus a generic placeholder-language translation request; not clearly the same concrete issue." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": false, + "reason": "Turkish-specific request versus a generic placeholder-language translation request; not clearly the same concrete issue." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": true, + "reason": "Near-verbatim templated titles/bodies for the same translation-docs request; these look like duplicates." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": true, + "reason": "Near-verbatim templated titles/bodies for the same translation-docs request; these look like duplicates." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": true, + "reason": "Near-verbatim templated titles/bodies for the same translation-docs request; these look like duplicates." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet. The three 2026 issues share the same templated title/body pattern, and choosing 43834 as the canonical representative is supported by its earliest creation date. The decision to keep issue:27088 separate is also conservative and consistent with its Turkish-specific title. The soft-edge verdicts do not overstate the evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-14", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41115", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:36817|issue:41211", + "issue:36817|issue:43671", + "issue:37015|issue:43837", + "issue:37015|issue:44121", + "issue:37015|issue:43909", + "issue:37015|issue:42503", + "issue:37015|issue:45027", + "issue:41115|issue:43837", + "issue:41115|issue:44121", + "issue:41115|issue:42503", + "issue:41115|issue:45027", + "issue:41211|issue:43671", + "issue:42503|issue:43837", + "issue:42503|issue:43671", + "issue:42503|issue:42738", + "issue:42738|issue:45027", + "issue:43671|issue:43837", + "issue:43671|issue:45027", + "issue:43671|issue:45177", + "issue:43671|issue:44121", + "issue:43671|issue:43909", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:43837|issue:44121", + "issue:43909|issue:44205", + "issue:44121|issue:45322", + "issue:43837|issue:45027", + "issue:43837|issue:45177" + ], + "prepared_review_unit_hash": "388ad34e057a0e6f01fbceea9f76130a726873f9171310f8ff23a236062db4cb", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 17647, + "estimated_input_tokens": 4412, + "estimated_eval_tokens": 9080 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 17647, + "estimated_input_tokens": 4412, + "estimated_eval_tokens": 9080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All items are separate feature requests for different models or modalities (BERT variants, audio ASR/TTS, OCR, sparse transformer, etc.), so there is no true duplicate cluster here.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a true canonical duplicate; the requests target different models and code paths.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:37015 is the broadest/most generic model-add request in the set, so it works best as a loose representative, but not as a duplicate canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "Different model families: EuroBert vs DEIMv2; separate integration requests." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBert config request vs Qwen3-TTS audio support; unrelated model/task." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT vs Qwen3-ASR; different model and modality." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT vs OpenAI weight-sparse transformer; distinct architectures." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT vs LFM2.5 Audio; unrelated model targets." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "NeoBERT vs ModernVBERT; separate model additions." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT vs Voxtral TTS; different model/task and code path." + }, + { + "left": "issue:41115", + "right": "issue:43837", + "accept": false, + "reason": "MiniCPM3 vs Qwen3-ASR; different model families and support work." + }, + { + "left": "issue:41115", + "right": "issue:44121", + "accept": false, + "reason": "MiniCPM3 integration vs weight-sparse transformer request; not the same change." + }, + { + "left": "issue:41115", + "right": "issue:42503", + "accept": false, + "reason": "MiniCPM3 vs ModernVBERT; separate model support requests." + }, + { + "left": "issue:41115", + "right": "issue:45027", + "accept": false, + "reason": "MiniCPM3 vs Voxtral TTS; distinct models and modalities." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 vs Qwen3-TTS; different model families and tasks." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT vs Qwen3-ASR; different model and modality." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT vs Qwen3-TTS; separate support requests." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "ModernVBERT vs BERT-like RoPE models; related theme but not the same concrete model addition." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "Generic BERT-like RoPE request vs Voxtral TTS; unrelated changes." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "Qwen3-TTS vs Qwen3-ASR are related vendor-wise, but they are different concrete models and likely different implementations." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-TTS vs Voxtral TTS; both audio, but different model support work." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-TTS vs DeepSeek-OCR2; different modalities and model code paths." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "Audio TTS support vs weight-sparse transformer architecture; not the same underlying change." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-TTS vs LFM2.5 Audio; separate audio models and integrations." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-ASR vs LFM2.5 Audio; different audio models and support requirements." + }, + { + "left": "issue:43837", + "right": "issue:45322", + "accept": false, + "reason": "ASR support vs EUPE model request; unrelated model types." + }, + { + "left": "issue:43837", + "right": "issue:44121", + "accept": false, + "reason": "Qwen3-ASR vs sparse transformer; different architectures and tasks." + }, + { + "left": "issue:43909", + "right": "issue:44205", + "accept": false, + "reason": "LFM2.5 Audio vs SAM3-LiteText; audio model vs multimodal segmentation-related request." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "Weight-sparse transformer vs EUPE; distinct architectures and model targets." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-ASR vs Voxtral TTS; both audio, but not the same model or fix." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-ASR vs DeepSeek-OCR2; different modality and implementation." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "639f6eb9a2e9138d5296676d59ffad3cc1a0e4e0764a3d2c0b048980388849d5", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6290, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6290, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster is heterogeneous: these issues touch different models/components and describe different bugs or cleanup tasks. The soft similarities look superficial, so none of the candidate pairs should be merged as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": "No true canonical issue: the reports span unrelated problem areas (normalization semantics, Mask2Former loss handling, config typing, typo, tokenizer warning).", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must anchor the set, 44031 is the strongest standalone report because it describes a broad regression with active discussion, but it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "Different topics: Llama4TextL2Norm/RMS norm semantics vs ModernBERTConfig type-hint typing." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Different bugs in the same broad area; one is Mask2Former ignore_value usage, the other is loss kwargs not being forwarded." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "Mask2Former loss/ignore_value bug is unrelated to tokenizer regex warning regression." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "Different components: Mask2Former model loss handling vs a typo in a quantization attribute string." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Unrelated issues: Mask2Former ignore_value behavior vs a ModernBERT config annotation problem." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "Loss kwargs forwarding and config type-hint correctness are separate concerns with no shared code path." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "ModernBERTConfig typing issue is not the same bug as the tokenizer regex warning regression." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "Different fix types: incorrect type hint vs a typo in a quantization operation name." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "ae58c05ba2b129c175b77656184d7a1ee0d77a7240ac6cbeb648be4b5c08b685", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1480, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1480, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues are not duplicates: one is a concrete block decoding change/bug, the other is a request for DeepSeek v3.2 model support. They touch different underlying problems with no shared implementation target.", + "confidence": 0.97, + "canonical_issue_reason": "Issue #41084 is the better canonical representative because it is a specific, actionable code-path problem rather than a broad model-support request.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #41084 is the best issue in this cluster to keep as the representative item; #43037 is a separate feature request and not a duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Reject: the titles and implied scopes are unrelated. #41084 concerns block decoding, while #43037 requests DeepSeek v3.2 support; there is no evidence they describe the same underlying bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "a6d6e202281dc164c14edb34667db2ac30d53f88d963e75eb5ab53da9bac2866", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two issues are about different Trainer concerns: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They do not describe the same underlying bug or change.", + "confidence": 0.96, + "canonical_issue_reason": "No canonical issue: the issues address separate Trainer topics and are not duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick one representative, #43598 is the more concrete Trainer behavior request, but neither issue represents the other.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Different problems: customizability of `remove_unused_column` vs FLOPS accounting for custom trainers. Same broad area, but not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "67eb70049922307d8875099ed63951f0d4a1dadf3294482ef48ce485a5474e58", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Not duplicates: one asks for Qwen3-Omni registration in AutoModel/AutoModelForConditionalGeneration, the other asks for Qwen3.5 AutoModelForSequenceClassification support. Different model families and different auto-class targets.", + "confidence": 0.95, + "canonical_issue_reason": "No canonical issue: these are different feature requests aimed at different model/config families and different auto classes.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick a representative, 43684 is the broader model-registration request, but it does not cover 44405\u2019s separate sequence-classification support request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different underlying changes: Qwen3-Omni auto-registration vs. Qwen3.5 sequence-classification support. They are not the same bug or code-path fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "4198f11950bedec72a52d32cbac3a2e0b82ed5fbd0d35cbc6621ed93cea36305", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4264, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4264, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All four issues are extremely low-information. The two identical \"huggingface \" issues look like the closest pair, but there isn\u2019t enough description-level evidence to confirm a true duplicate. I\u2019m rejecting all soft edges.", + "confidence": 0.61, + "canonical_issue_reason": "Issue 43845 is the earliest of the repeated \"huggingface \" entries, so it is the best representative placeholder among the near-identical issue titles.", + "canonical_pr_reason": null, + "best_issue_reason": "43845 is the most representative issue in this cluster because it is the earliest of the duplicate-looking \"huggingface \" reports; however, the evidence is weak.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Only weak title similarity; no body detail, targets, or other evidence that these are the same underlying issue." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "Only weak title similarity; the reports are too underspecified to confirm a true duplicate." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "No concrete evidence beyond vague similarity; insufficient to treat as the same issue." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "Identical short titles and close timestamps suggest a possible duplicate, but the bodies provide no confirmatory detail, so this is not safe to merge." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "Shared generic title is not enough to establish the same bug report." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "Shared generic title is not enough to establish the same bug report." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "790e3534d5202454802f099bc72f4f2c88261e2ba66f570823d83ec2f586a536", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is mostly unrelated low-information spam/generic issues. The three exact \"\" reports look like the same duplicate placeholder; the documentation suggestion and \"Google LLC\" issue do not match that same underlying content.", + "confidence": 0.82, + "canonical_issue_reason": "issue:44371 is the earliest of the exact \"\" duplicates, so it is the best canonical representative for that small duplicate subgroup.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44371 is the strongest issue representative because it is an exact title/body match with the other spam placeholders and was created first.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": true, + "reason": "Exact same placeholder title/body pattern; these are indistinguishable spam duplicates." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": true, + "reason": "Exact same placeholder title/body pattern; these are indistinguishable spam duplicates." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "One is a spam placeholder, the other is a generic documentation suggestion; no evidence of the same underlying issue." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "Different titles and apparent intent; \"Google LLC\" is not the same placeholder spam report." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "Generic documentation suggestion versus placeholder spam; not the same issue." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Generic documentation suggestion versus placeholder spam; not the same issue." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": true, + "reason": "Exact same placeholder title/body pattern; these are indistinguishable spam duplicates." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "Different titles and likely different spam themes; no concrete shared issue." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "Different titles and likely different spam themes; no concrete shared issue." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The clustering summary is grounded in the packet. The three issues are the only clearly near-identical items, and selecting the earliest as canonical is conservative and supported by creation time. The non- issues are correctly treated as separate, with the reject decisions staying cautious given only soft similarity evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "4a6442540601583dcddf3e6fb5b4d8779d79f272d9c87b7d81eced31c009ae7e", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues appear to be duplicates of the same energy-optimization writeup, with identical titles and nearly identical body sizes/content signals. The earlier issue is the better canonical representative.", + "confidence": 0.99, + "canonical_issue_reason": "Issue #45145 was created earlier and already has an inbound reference, making it the better canonical duplicate target for this same report.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45145 is the clearest representative of the cluster because it is earlier and slightly more established in discussion metadata.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": true, + "reason": "Same title, same apparent topic, and highly similar body footprint; these look like duplicate reports of the same underlying issue." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The conclusion is grounded in the packet: the issues have identical titles, very similar body lengths, and a soft-similarity link, while issue 45145 is earlier and has an inbound reference. The canonical choice is conservative and the soft-edge verdict does not overreach beyond the available evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 13, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues describe the same missing Gemma 4 sequence-classification support; the later, more specific open issue is the better cluster representative.", + "confidence": 0.94, + "canonical_issue_reason": "issue #45373 is the clearest canonical tracker: it is open, explicitly names the missing class `Gemma4ForSequenceClassification`, and states the module-level gap relative to Gemma 2/3.", + "canonical_pr_reason": null, + "best_issue_reason": "issue #45373 best captures the concrete request and likely remaining work, while #45295 is a shorter earlier duplicate with less detail and is already closed.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-13", + "nodes": [ + "pull_request:39895", + "pull_request:41855", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45170", + "pull_request:45213", + "pull_request:45587", + "pull_request:45622" + ], + "soft_pairs": [ + "pull_request:39895|pull_request:43424", + "pull_request:41855|pull_request:43823", + "pull_request:41855|pull_request:43424", + "pull_request:42668|pull_request:43823", + "pull_request:43251|pull_request:43823", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:44827", + "pull_request:43424|pull_request:43823", + "pull_request:43823|pull_request:43995", + "pull_request:43823|pull_request:45622", + "pull_request:43995|pull_request:45213", + "pull_request:43995|pull_request:44827", + "pull_request:44827|pull_request:45622", + "pull_request:44827|pull_request:45587", + "pull_request:44827|pull_request:45170", + "pull_request:44827|pull_request:45213", + "pull_request:43823|pull_request:45213", + "pull_request:43251|pull_request:43254" + ], + "prepared_review_unit_hash": "466f1881ca618d9feb0f24683691a5f77c8dab46ddd1154641f611d35685933d", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 18, + "serialized_chars": 35181, + "estimated_input_tokens": 8796, + "estimated_eval_tokens": 17848 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 18, + "serialized_chars": 35181, + "estimated_input_tokens": 8796, + "estimated_eval_tokens": 17848 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. The only true duplicate-style pair is PR #43251 and PR #43254, both implementing the same `fixed_cross_entropy` kwargs passthrough change. All other soft links are topical overlap only and should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43251 is the best canonical PR for the real duplicate pair: it has the explicit issue target (#43240) and a complete, readable description of the same `fixed_cross_entropy` fix.", + "best_issue_reason": null, + "best_pr_reason": "PR #43251 is the best global PR candidate because it anchors the only genuine duplicate in the cluster and is the more explicit issue-linked implementation.", + "soft_edge_verdicts": [ + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "Unrelated PRs: VideoPrism model/docs addition vs ExecuTorch dynamic-shape test. Similarity is only superficial." + }, + { + "left": "pull_request:41855", + "right": "pull_request:43823", + "accept": false, + "reason": "Different model work: Mistral tokenizer methods vs adding MobileLLM. No shared concrete bug or change." + }, + { + "left": "pull_request:41855", + "right": "pull_request:43424", + "accept": false, + "reason": "Tokenizer API fix vs ExecuTorch export test; these are unrelated changes." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Processor auto-loading robustness vs adding a new model. Same broad area, but not the same underlying change." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss-function kwargs passthrough vs MobileLLM model addition are unrelated." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "Loss utility kwargs fix and Mistral4 test/model changes are different code paths and different problems." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "ExecuTorch dynamic-shape testing and Mistral4 test/model changes are unrelated." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "Dynamic export test vs MobileLLM model addition; no same bug or merged patch." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "New MobileLLM model vs Falcon output-interface refactor; only broad model-related overlap." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45622", + "accept": false, + "reason": "MobileLLM addition and PEFT constructor fix are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Falcon refactor vs model-creation skill/docs tooling; different artifact types and goals." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon interface refactor and Mistral4 test fixes are different model-specific changes." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45622", + "accept": false, + "reason": "Mistral4 test/model updates vs PEFT constructor fix are not the same concrete code-path problem." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45587", + "accept": false, + "reason": "Mistral4-related code changes vs continuous batching docs; only subsystem-level similarity." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 tests vs layrnorm->layernorm renaming across CLIP-like models are unrelated." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45213", + "accept": false, + "reason": "Mistral4 fixes and model-creation skill repo changes do not address the same bug/change." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "Model implementation PR vs skill/tooling PR; no shared concrete fix." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Both PRs fix the same `fixed_cross_entropy` wrapper by adding/forwarding supported kwargs to `nn.functional.cross_entropy`; they are mergeable into one patch." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The analysis is grounded in the packet: only PR #43251 and #43254 share the same issue target and the same `fixed_cross_entropy` kwargs passthrough fix, so treating them as mergeable duplicates is conservative. The other pairwise rejections stay within the evidence and do not overclaim duplication from topical similarity alone." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "480dee09185110a432e6ef2f8c41027168791e026ee0105dfa98a332123a032f", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7226, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7226, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related VibeVoice PRs but not duplicates: one is the base VibeVoice integration, the other adds VibeVoice Realtime plus an acoustic tokenizer and generation changes.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 40546 is the better canonical PR because it is the earlier, non-draft foundational VibeVoice integration and is more self-contained than the later realtime extension.", + "best_issue_reason": null, + "best_pr_reason": "PR 40546 is the best representative of the cluster since it establishes the core VibeVoice implementation; PR 42781 is a separate follow-on feature area.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "Not the same underlying change: 40546 adds core VibeVoice support/docs, while 42781 adds VibeVoice Realtime with additional tokenizer and generation work. They overlap in model names and shared files but are distinct feature sets and would not plausibly be merged into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "fc3b593469cccf8507d1d8cabdbfb385185f7be3fa86f433268455301b08dc56", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are unrelated feature PRs: one adds covariate forecasting to TimesFM, the other adds DINOv3 image classification support. They share only a loose \u201cmodel capability expansion\u201d theme, not a concrete code path or fix.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "Neither PR is a clear duplicate canonical candidate for the other; they target different models, files, APIs, and use cases.", + "best_issue_reason": null, + "best_pr_reason": "No issue is present; as PRs, neither should be treated as the canonical representative of the other because they address distinct changes in separate model families.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Reject: TimesFM covariate forecasting and DINOv3 image classification are unrelated features with different code paths, documentation, and tests; they could not plausibly be merged into one PR as the same underlying change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "5ca5622c9633bbe7eef9d70a2eb0e3cf7982d42dc10a27ffb1990f021d804327", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6050, + "estimated_input_tokens": 1513, + "estimated_eval_tokens": 3282 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6050, + "estimated_input_tokens": 1513, + "estimated_eval_tokens": 3282 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are PRs for adding DEIMv2 support to Transformers. PR #44339 is the more complete later submission and appears to subsume the earlier #41356 work, so it is the better canonical PR candidate. There is no issue in this cluster.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": "#44339 is the broader, later DEIMv2 implementation and includes the core model integration plus additional framework wiring (auto-mappings, conversion, loss/modular updates), making it the best representative PR.", + "best_issue_reason": null, + "best_pr_reason": "#44339 is the most complete and maintainable PR for the DEIMv2 addition; it covers the same feature area as #41356 but with substantially more finished integration work.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": true, + "reason": "Both PRs target the same DEIMv2 model addition, and #44339 looks like a superset that could plausibly replace the earlier implementation in one merged change." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both items are PRs tied to the same issue target (41211) and the later PR #44339 clearly adds broader DEIMv2 integration while overlapping core files with #41356. The soft-edge verdict is conservative, phrased as a plausible supersession rather than a certainty. No issue is present in this cluster." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "bdee097d586db57f55fa4957acd1a88c750427ef412d2af16e8b97a1ba5b8b30", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate model-integration PRs that share Transformers boilerplate, but they add different models (DeepSeek OCR vs Isaac) with different code paths, configs, and tests. They are not duplicates and should not be clustered together as the same change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the PRs implement different model additions and do not fix or introduce the same underlying change.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a better duplicate representative; both are independent additions for different models, so there is no single best PR for this cluster.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Reject: both PRs add new model support, but for different models and distinct implementation files. The overlap is generic framework scaffolding, not the same concrete bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42166-2", + "nodes": [ + "pull_request:42166", + "pull_request:45426" + ], + "soft_pairs": [ + "pull_request:42166|pull_request:45426" + ], + "prepared_review_unit_hash": "0354163fc213dcc129350051495f5dfc250fd9ef9398e5de095b3aa5499414be", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5408, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5408, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two unrelated feature PRs that both touch Transformers auto-registration boilerplate, but they add different model families (InternVLFlash vs AXK1) and do not appear to fix the same underlying problem.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the changes are for different model additions with different code paths, so they should not be deduplicated.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a good global representative for the cluster because the cluster is not actually a duplicate set; the overlap is just shared framework registration files.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42166", + "right": "pull_request:45426", + "accept": false, + "reason": "Reject: these are distinct model-introduction PRs for different architectures and model packages, not the same bug or change. Shared auto files are expected boilerplate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42227-4", + "nodes": [ + "pull_request:42227", + "pull_request:43291", + "pull_request:43665", + "pull_request:44815" + ], + "soft_pairs": [ + "pull_request:42227|pull_request:43291", + "pull_request:42227|pull_request:43665", + "pull_request:42227|pull_request:44815", + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "9a913c35724b9a68dfe4e6464493dfdd738b6ab5b8c52cb7dc494088a1841379", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10164, + "estimated_input_tokens": 2541, + "estimated_eval_tokens": 5338 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10164, + "estimated_input_tokens": 2541, + "estimated_eval_tokens": 5338 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are four unrelated pull requests with one weakly related Whisper pair. The strongest standalone candidate is the Whisper return-language fix; the others are test-only edits, a bogus test PR, and an unrelated FP8/model-loading change.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #42227 is the clearest real bug fix: it directly addresses Whisper return-language propagation, has an explicit linked issue, and includes a focused code-path change in the ASR pipeline.", + "best_issue_reason": null, + "best_pr_reason": "PR #42227 is the best representative of the cluster because it is the only substantive, targeted fix with a clear user-facing bug and explicit issue linkage. The others are either test scaffolding or unrelated changes.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42227", + "right": "pull_request:43291", + "accept": false, + "reason": "Both are Whisper-related, but they fix different concrete problems: return-language plumbing vs. tokenizer decode/test behavior. Too broad to treat as the same underlying change." + }, + { + "left": "pull_request:42227", + "right": "pull_request:43665", + "accept": false, + "reason": "No shared bug or code path. #43665 is just artificial test edits across CLIP and ViT, unrelated to Whisper return-language handling." + }, + { + "left": "pull_request:42227", + "right": "pull_request:44815", + "accept": false, + "reason": "Completely different subsystems and goals: Whisper ASR pipeline vs. FP8 dequant/model loading changes." + }, + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "#43291 is a Whisper tokenizer fix/test PR; #43665 is unrelated test noise in vision model tests. Not the same underlying issue." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "Different domains and fixes. Whisper decoding/tests do not match FP8 quantization/dequantization changes." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "No substantive overlap: fabricated model tests versus a real quantization/loading fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "29384b8a8304f31ef7d82932c8ad8088b7f76a4eb0446634842820456ea1de44", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two unrelated workflow-maintenance PRs: one upgrades several JS-based GitHub Actions for Node 24 compatibility, the other bumps docker/build-push-action and a few other workflow actions to newer versions. They overlap in some workflow files but do not fix the same underlying problem.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 42975 is the better cluster anchor because its title and diff clearly describe a concrete compatibility upgrade across many workflows, making the intent more specific and representative.", + "best_issue_reason": null, + "best_pr_reason": "PR 42975 is the best representative PR for this cluster because it is the more clearly scoped Actions-runtime compatibility update and touches the broadest set of affected workflows.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "Both are general GitHub Actions version bumps, but they change different action families and address different concerns (Node 24 compatibility vs docker/build-push-action updates). Shared workflow filenames alone are not enough to treat them as the same change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-9", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451", + "pull_request:43973", + "pull_request:44601", + "pull_request:45283", + "pull_request:45490", + "pull_request:45626", + "pull_request:45630" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:42978|pull_request:45490", + "pull_request:43448|pull_request:43451", + "pull_request:43448|pull_request:45490", + "pull_request:43451|pull_request:45490", + "pull_request:43973|pull_request:45283", + "pull_request:44601|pull_request:45626", + "pull_request:45283|pull_request:45626", + "pull_request:45490|pull_request:45626", + "pull_request:45490|pull_request:45630", + "pull_request:45626|pull_request:45630" + ], + "prepared_review_unit_hash": "02a4edf8b96b1885e6b0966fa32e36e16aea8de3b8b6a695ac5a8529461e7f63", + "component_budget": { + "node_count": 9, + "item_count": 9, + "soft_pair_count": 12, + "serialized_chars": 29350, + "estimated_input_tokens": 7338, + "estimated_eval_tokens": 14932 + }, + "budget": { + "node_count": 9, + "item_count": 9, + "soft_pair_count": 12, + "serialized_chars": 29350, + "estimated_input_tokens": 7338, + "estimated_eval_tokens": 14932 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The soft matches are superficial: they mostly reflect boilerplate for new model integrations or unrelated infrastructure work. No pair appears to be the same underlying bug/change, so none should be merged as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR emerges; the items split into unrelated PRs (different model integrations, PP-loading infra, GGUF support), so none is a duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "45626 is the broadest single PR by scope, but it is still a distinct model addition and not a duplicate of the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "Different model additions (ViT NEPA vs Molmo); shared auto/docs files are standard scaffolding, not the same change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "Different model additions (ViT NEPA vs Molmo2); overlap is only in generic registration/docs files." + }, + { + "left": "pull_request:42978", + "right": "pull_request:45490", + "accept": false, + "reason": "ViT NEPA and CTSM are distinct model integrations with different files and behavior." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are separate model families; similar file patterns do not make them duplicates." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo and CTSM are unrelated model additions." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo2 and CTSM are different model integrations, not one concrete fix." + }, + { + "left": "pull_request:43973", + "right": "pull_request:45283", + "accept": false, + "reason": "Lfm2 audio support and Qwen3.5 GGUF loading support are unrelated code paths." + }, + { + "left": "pull_request:44601", + "right": "pull_request:45626", + "accept": false, + "reason": "Native pipeline-parallel loading support is infrastructure work; PP-FormulaNet is a model integration, not the same change." + }, + { + "left": "pull_request:45283", + "right": "pull_request:45626", + "accept": false, + "reason": "GGUF loading support and PP-FormulaNet model support address different subsystems and problems." + }, + { + "left": "pull_request:45490", + "right": "pull_request:45626", + "accept": false, + "reason": "CTSM model support and PP-FormulaNet model support are distinct model additions." + }, + { + "left": "pull_request:45490", + "right": "pull_request:45630", + "accept": false, + "reason": "CTSM and Kimi2-6 are separate model additions with different architectures and files." + }, + { + "left": "pull_request:45626", + "right": "pull_request:45630", + "accept": false, + "reason": "PP-FormulaNet and Kimi2-6 are different model integrations; shared auto-registration is boilerplate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-3", + "nodes": [ + "pull_request:43488", + "pull_request:43532", + "pull_request:45476" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532", + "pull_request:43488|pull_request:45476", + "pull_request:43532|pull_request:45476" + ], + "prepared_review_unit_hash": "80aefc7aff3baf6b2f2da1b615ae283146cac32da69af74feb870edf468f7fa7", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7468, + "estimated_input_tokens": 1867, + "estimated_eval_tokens": 3990 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7468, + "estimated_input_tokens": 1867, + "estimated_eval_tokens": 3990 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Three open synthetic \"[don't merge]\" PRs with unrelated changes: one about setup/version/dependency metadata, one about comment/CI workflow automation, and one about CI workflow calling plus placeholder edits. The shared vit test file is incidental and does not indicate the same underlying fix.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43488 is the clearest standalone bot-check style PR: it is small, explicitly labeled as a repo-bot format test, and has the most coherent single-purpose change set.", + "best_issue_reason": null, + "best_pr_reason": "PR #43488 is the best representative of the cluster because it is the most self-contained and clearly intentional test/dummy change, whereas the others mix broader workflow or CI edits with unrelated placeholder modifications.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "Different change intents: #43488 tweaks setup/version/dependency metadata, while #43532 rewrites CI/comment workflow automation and testing utilities. Shared vit test edits look incidental." + }, + { + "left": "pull_request:43488", + "right": "pull_request:45476", + "accept": false, + "reason": "#43488 is a setup/version/dependency bot-check PR; #45476 adds a PR-CI caller workflow and unrelated tokenization/test placeholder edits. They do not fix the same concrete problem." + }, + { + "left": "pull_request:43532", + "right": "pull_request:45476", + "accept": false, + "reason": "Both touch workflows/tests, but the concrete changes are unrelated: #43532 changes self-comment/self-scheduled/slack-report logic, while #45476 adds a CI caller workflow plus placeholder code/test edits. Not mergeable as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "142996ed3743430afe4b1d6ea878cfd8f92c76f051eb71905a5acd3910aee3a4", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two separate PRs add different segmentation pipelines: one for promptable concept segmentation and one for promptable visual segmentation. They share scaffolding/docs patterns but target different tasks, models, and tests, so they are not duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the two pull requests implement distinct pipeline additions with different task scopes and model integrations.", + "best_issue_reason": null, + "best_pr_reason": "PR #43613 is the broader representative of the cluster because it integrates more model/test coverage and the more general visual segmentation pipeline, but it is still not the same change as #43612.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Different underlying features: concept segmentation vs visual segmentation. They share documentation and pipeline-registration boilerplate, but the changed filenames, model integrations, and tests show separate code paths and not a single mergeable PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "b5f1211e5d86db489e9e70df2946784fef578760d5245ba7018183e9b38dd805", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs touch `trainer.py` and add extensibility hooks for custom trainers, but they change different code paths: one addresses gradient-accumulation loss scaling, the other column filtering based on model signatures. They are not duplicates.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43663 is the cleaner canonical representative: it introduces a focused helper for signature-column filtering, exposing a clear override point in `Trainer`.", + "best_issue_reason": null, + "best_pr_reason": "PR #43663 is the best representative of the cluster\u2019s theme of trainer extensibility, with a narrowly scoped API change in a single code path.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Different underlying changes: #43651 alters gradient-accumulation loss normalization, while #43663 refactors signature-based column filtering. They do not fix the same bug or implement the same feature." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "400b49e4a193c37c3db695614eab29ec57c14a6c89896a34fc69088b25a4af63", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs address the same CLI TypeAdapter NameError / annotation-evaluation problem in `src/transformers/cli/serve.py`, with 43842 being the cleanest minimal fix.", + "confidence": 0.82, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43842 is the most focused and explicit fix for the runtime `TypeAdapter` NameError when pydantic is absent, with a minimal change set and a clear title.", + "best_issue_reason": null, + "best_pr_reason": "PR 43842 best represents the cluster because it directly targets the concrete failure mode with the smallest, most surgical workaround.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": true, + "reason": "Both PRs are fixing the same underlying runtime annotation/name-resolution bug in the CLI serve module, and the changes are plausibly mergeable into one implementation." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "These appear to be alternative workarounds for the same `TypeAdapter` NameError in the same file and issue scope, so they represent the same underlying bug/change." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is grounded in the packet: all three PRs touch `src/transformers/cli/serve.py` and are clearly annotation/name-resolution fixes around `TypeAdapter`, with 43842 being the most surgical change. The soft-edge accepts are conservative enough, and the paired PRs do look mergeable as alternative fixes for the same bug family." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests appear to implement the same SwanLab integration change: adding run id/resume support in `integration_utils.py` for issue #43698, with one variant also expanding the docs. They are strong duplicate PR candidates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43816 is the cleaner representative: it carries the same SwanLab id/resume support plus the accompanying documentation updates, making it the more complete version of the shared change.", + "best_issue_reason": null, + "best_pr_reason": "PR #43816 is the best global representative because it appears to include the full feature change and docs for SwanLab run id/resume support, while #43779 is the narrower implementation-focused variant.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": true, + "reason": "Same underlying SwanLab callback enhancement for id/resume support in the same file and same issue target; the doc differences do not change the core code-path change, so these are duplicate PRs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target issue #43698, touch the same file, and the previews indicate the same SwanLab id/resume support with #43816 adding docs. The duplicate/mergeable verdict is conservative enough given the shared target and overlapping code change." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44013", + "pull_request:44017", + "pull_request:44018", + "pull_request:44019", + "pull_request:44024", + "pull_request:44044", + "pull_request:44056", + "pull_request:44059", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44116", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44066|pull_request:44071", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44068|pull_request:44086", + "pull_request:44071|pull_request:44085", + "pull_request:44059|pull_request:44086", + "pull_request:44085|pull_request:44086", + "pull_request:44059|pull_request:44722", + "pull_request:44098|pull_request:44101", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:43997|pull_request:44073", + "pull_request:44017|pull_request:44071", + "pull_request:44059|pull_request:44071", + "pull_request:44073|pull_request:44101", + "pull_request:44059|pull_request:44068", + "pull_request:44018|pull_request:44059", + "pull_request:44056|pull_request:44101", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:43997|pull_request:44017", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101", + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44018", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154", + "pull_request:44024|pull_request:44076" + ], + "prepared_review_unit_hash": "7620acdd8b47a3867613ef1ee4b0cbd05747e0bdb34e685eebca5b9558d167d3", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84505, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 72, + "serialized_chars": 76938, + "estimated_input_tokens": 19235, + "estimated_eval_tokens": 38726 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is dominated by model-specific refactors to the new standardized output tracing/capture_outputs interface. Most PRs are only thematically related, not true duplicates; the clearest duplicates are within the GPT-J/CodeGen line and the GPT-Neo line.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #44722 is the strongest representative of the cluster\u2019s core theme: it explicitly targets the shared tracking issue, touches the same GPT-J/CodeGen output-tracing code path, and appears to be the most complete/latest version of that specific refactor.", + "best_issue_reason": null, + "best_pr_reason": "PR #44722 best represents the underlying change family because it is explicit about the shared issue, spans the central GPT-J/CodeGen refactor, and is the closest match to the duplicated GPT-J-related PRs in the cluster.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and different code paths; only the broad output-tracing theme is shared." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "Both are output-tracing refactors, but they target different architectures and concrete implementations." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": true, + "reason": "Both are GPT-J output-tracing refactors on the same file/code path and look like overlapping implementations of the same change." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "Same broad standardized-output refactor theme, but different models and no shared concrete bug/change path." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "EfficientNet output tracing and GPT-J/CodeGen output tracing are unrelated concrete changes." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "Different model implementations; similarity is only at the refactor pattern level." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "MobileNetV2 and DeBERTa are separate model-specific refactors with no shared code path." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J/CodeGen versus MGP-STR is not the same underlying bug or change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "Same refactor style, but MPT and GPT-J/CodeGen are distinct implementations." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Both are GPT-Neo output-tracing refactors on the same model/code path, so they plausibly duplicate the same change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-J/CodeGen and GPT-Neo are different models; shared issue target alone is not enough." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44066", + "accept": false, + "reason": "Different model families and separate concrete implementations." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet output tracing and GPT-J/CodeGen output tracing are unrelated changes." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "MGP-STR and GPT-J/CodeGen are different code paths with only thematic overlap." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet and GPT-J are distinct models; not the same underlying fix." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44019", + "accept": false, + "reason": "Both are refactors, but they affect different models and cannot be merged as one concrete fix." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44071", + "accept": false, + "reason": "ResNet and MPT are unrelated model-specific changes." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert are separate code paths; no shared concrete bug." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo is a different model from GPT-J/CodeGen, despite similar decorator refactoring." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44722", + "accept": false, + "reason": "MPT and GPT-J/CodeGen are different implementations, not one duplicate fix." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert do not share the same underlying change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo and MGP-STR are unrelated concrete refactors." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44085", + "accept": false, + "reason": "GPT-Neo versus GPT-J is only a loose theme match." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44161", + "accept": false, + "reason": "GPT-Neo and LongT5 are separate model-specific implementations." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44071", + "accept": false, + "reason": "Different models and no shared concrete code-path problem." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT2 and MGP-STR are unrelated beyond using output-tracing helpers." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J and MGP-STR are different model refactors." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT2 and GPT-J/CodeGen are distinct concrete changes." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT and Flaubert/XLM are not the same underlying bug or change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert and LongT5 are separate model-specific output refactors." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44018", + "accept": false, + "reason": "SegFormer and GPT-Neo are different models; shared issue target is not enough." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44056", + "accept": false, + "reason": "SegFormer and MPNet are unrelated concrete changes." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44076", + "accept": false, + "reason": "MPT and ImageGPT are distinct model paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet and Flaubert/XLM are unrelated model-specific refactors." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "MPNet and LongT5 do not appear to be the same bug or change." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet and GPT-Neo are different model implementations." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "RegNet and LongT5 are unrelated concrete changes." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44085", + "accept": false, + "reason": "MPT and GPT-J are different code paths." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "CVT/FNet and SegFormer are unrelated model-specific refactors." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBert and MPT are different concrete implementations." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "SqueezeBert and VisualBert are separate model changes." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44722", + "accept": true, + "reason": "Same GPT-J/CodeGen output-tracing refactor on the same code path; these are plausible duplicates." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44722", + "accept": true, + "reason": "Both touch GPT-J output tracing in the same area and are close variants of the same refactor." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-J and SpeechT5 are unrelated model changes." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44007", + "accept": false, + "reason": "CVT/FNet and RegNet/ResNet/RT-DETR are separate model-specific refactors." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "CVT/FNet and SpeechT5 do not share the same concrete change." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44129", + "accept": false, + "reason": "DeBERTa and SpeechT5 are unrelated model implementations." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44154", + "accept": false, + "reason": "DeBERTa and ViT-S are separate model-specific refactors." + }, + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "FocalNet and ImageGPT are different model code paths; only the decorator pattern overlaps." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded and conservative overall. The summary matches the packet: most PRs are thematic refactors around standardized output tracing, while the only clearly overlapping clusters are GPT-J/CodeGen and GPT-Neo. The selected representative PR #44722 is well supported by the shared issue target and overlapping code path. The true soft-edge verdicts are limited to pairs that look mergeable as the same bugfix family." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "5fec5a49e0f7ca5d9e596e94b63853f9de7732bfcbea958f544a6394cf882d20", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84505, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 5, + "serialized_chars": 13067, + "estimated_input_tokens": 3267, + "estimated_eval_tokens": 6790 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs all implement the same broad pattern\u2014migrating different model files to standardized output capture\u2014but they are separate model-specific refactors, not duplicates of the same concrete change.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44129 is the strongest representative: it has an explicit issue target, a larger and more self-contained output-tracing refactor, and the diff shows a concrete implementation change rather than just a small follow-up tweak.", + "best_issue_reason": null, + "best_pr_reason": "44129 is the best single PR to anchor the cluster because it is issue-linked and clearly expresses the standardized output-capture migration in SpeechT5; the other PRs are similar but model-specific variants of the same refactor pattern.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "Both are standardized output-tracing refactors, but they touch different models (VisualBert vs TextNet) and distinct code paths, so they are not the same change and would not reasonably merge into one PR." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "Same refactor theme, but MPNet and VisualBert are separate implementations with different model internals and filenames; this is a pattern match, not a duplicate underlying fix." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "These are parallel refactors in different model codebases, not the same bug or code-path fix. The shared capture_outputs migration is too broad to treat as a duplicate." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert are unrelated model implementations. Even though both adopt the same output-capture approach, they do not target the same underlying issue." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "Both migrate output handling, but SqueezeBert and SpeechT5 are different models with different forward paths and no evidence of a shared concrete defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:43998|pull_request:44001", + "pull_request:43998|pull_request:44003", + "pull_request:43998|pull_request:44029", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:44000|pull_request:44001", + "pull_request:44000|pull_request:44002", + "pull_request:44000|pull_request:44003", + "pull_request:44000|pull_request:44025", + "pull_request:44001|pull_request:44002", + "pull_request:44001|pull_request:44003", + "pull_request:44001|pull_request:44025", + "pull_request:44002|pull_request:44029", + "pull_request:44002|pull_request:44003", + "pull_request:44003|pull_request:44028", + "pull_request:44003|pull_request:44004", + "pull_request:43998|pull_request:44004", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44029", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44028", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44029|pull_request:44030" + ], + "prepared_review_unit_hash": "83b95bb0cdb797375bfff98af281829e0ac4f9109bbe603be3d4ea0287cd294b", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 40, + "serialized_chars": 40169, + "estimated_input_tokens": 10043, + "estimated_eval_tokens": 20342 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 40, + "serialized_chars": 40169, + "estimated_input_tokens": 10043, + "estimated_eval_tokens": 20342 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs are thematically similar output-tracing refactors, but they target different model implementations and code paths. They are not duplicates, so the cluster should not be collapsed.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: each pull request changes a different model or wrapper, so they are related by theme only, not by the same underlying change.", + "best_issue_reason": null, + "best_pr_reason": "No single best PR for deduping; the set is a collection of separate model-specific refactors rather than one shared fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model files and code paths: timm_backbone vs upernet. Same refactor theme, not the same change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "Different architectures and wrappers: timm_backbone vs vision_text_dual_encoder. Not a duplicate bug/change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "Different model implementations: timm_backbone and univnet. Similar output-tracing cleanup only." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "Different subsystems and files: timm_backbone vs falcon_mamba/mamba. Not the same concrete fix." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "Backbone refactor vs RWKV recurrent model refactor. Shared wording, separate code paths." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "MobileNetV1 vs UnivNet are unrelated model families; both refactor tracing, but not the same change." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "MobileNetV1 and UperNet touch different model stacks and outputs. Not duplicates." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "MobileNetV1 vs Depth Anything are different models with different output wrappers; only the refactor theme matches." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "MobileNetV1 and VisionEncoderDecoder are unrelated implementations; no shared underlying bug." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "MobileNetV1 vs SpeechEncoderDecoder are distinct code paths and model families." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "Vision-text dual encoder and UnivNet are unrelated; same refactor style only." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Vision-text dual encoder vs UperNet are different model wrappers and outputs." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "Vision-text dual encoder is unrelated to Mamba/Falcon-Mamba; not one shared fix." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model domains and files: vision-text dual encoder vs Depth Anything." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "UnivNet vs UperNet are different model families; not the same bug or change." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "UnivNet and Mamba/Falcon-Mamba touch separate code paths, despite similar refactor language." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "UnivNet vs Depth Anything are unrelated model implementations." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "UperNet and RWKV are unrelated architectures; no shared concrete change." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "UperNet vs Mamba/Falcon-Mamba are distinct model stacks with different output behavior." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44028", + "accept": false, + "reason": "Mamba/Falcon-Mamba vs SuperPoint are different domains and files; not mergeable as one PR." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "Mamba/Falcon-Mamba refactor is unrelated to CodeGen output handling." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "timm_backbone and CodeGen are unrelated model code paths; same theme only." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "UnivNet and CodeGen are different models with different output semantics." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "UperNet and CodeGen are separate implementations; no common underlying bug." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "Depth Anything and VisionEncoderDecoder are different model wrappers, not the same change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "Depth Anything vs SpeechEncoderDecoder are unrelated refactors across different code paths." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "Depth Anything and SuperPoint are distinct vision models; no shared bug fix." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "Depth Anything and RWKV are unrelated model families." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "Depth Anything vs DPR are different model stacks and output contracts." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are parallel wrappers, but they are separate implementations and PRs." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "VisionEncoderDecoder vs SuperPoint are unrelated models; not the same underlying fix." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "VisionEncoderDecoder and RWKV change different code paths and output handling." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "VisionEncoderDecoder and DPR are separate model families and refactors." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "SpeechEncoderDecoder and SuperPoint are unrelated model implementations." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "SpeechEncoderDecoder vs RWKV are different architectures; no duplicate change." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "SpeechEncoderDecoder and DPR touch different model code and output behavior." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "SuperPoint and RWKV are unrelated subsystems and cannot be merged as one PR." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "SuperPoint vs DPR are separate model implementations with different output paths." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "RWKV and DPR are distinct models; similar refactor theme does not make them duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "c617a66fbd9f69e1d3001ac8592df288a9d483bcc7bb3cf7516a13169b2ab085", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10844, + "estimated_input_tokens": 2711, + "estimated_eval_tokens": 5678 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10844, + "estimated_input_tokens": 2711, + "estimated_eval_tokens": 5678 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs are separate model integrations (PenguinVL, DeepSeek-OCR-2, and Voxtral TTS). They share only generic Transformers plumbing like auto-mapping and docs, not the same underlying code change or bug fix, so none should be clustered as duplicates.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR 45075 is the strongest standalone representative in this set: it is a complete model addition with substantial implementation/test coverage and high review activity, but it is still unrelated to the other two PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model additions for PenguinVL vs Voxtral TTS; overlap is limited to shared framework registration/docs files, not the same concrete model codepath or fix." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "PenguinVL and DeepSeek-OCR-2 are distinct model implementations with different architectures and files; shared auto-registration and docs plumbing is too generic to treat as a duplicate." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "DeepSeek-OCR-2 and Voxtral TTS address different models and functionality; they only coincide in generic Transformers integration points, so they are not the same change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "caf349f15c8fc438109ff978706c49231c840c6091f556ab2bf075a853469268", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs are related in the fusion/kernels area, but they are not the same change: one adds a generic module-fusion API scaffold, while the other wires `KernelConfig`-driven n-to-1 fusion into model loading and runtime usage.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45363 is the more concrete, end-user-facing change: it integrates `KernelConfig` fusion into `modeling_utils`/integrations and includes a usage example, while PR 44979 is the lower-level API foundation.", + "best_issue_reason": null, + "best_pr_reason": "PR 45363 is the best representative for the cluster because it covers the actual functional behavior and integration path, not just the API scaffolding.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "Different layers of the feature: 44979 introduces a generic `Module Fusion API` file and tests, while 45363 implements `KernelConfig`-based n-to-1 kernel fusion and runtime wiring. They are related, but not the same concrete fix and would not be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "b1960996ce8915af7052847dd1bacfe742a8c1f1f004d781769b53c64306558d", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs fix the same Mistral regex patching AttributeError in the same function and file; 45317 looks like the fuller follow-up with tests, while 45086 is the smaller earlier variant.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45317 is the better canonical PR because it addresses the same bug as 45086 but includes tests and appears to be the more complete implementation of the fix.", + "best_issue_reason": null, + "best_pr_reason": "PR 45317 is the best representative because it contains the same code-path fix plus test coverage, making it the most complete and reviewable version.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Same underlying AttributeError in _patch_mistral_regex, same file and code path, with nearly identical fix; these could plausibly be merged into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45115-2", + "nodes": [ + "pull_request:45115", + "pull_request:45569" + ], + "soft_pairs": [ + "pull_request:45115|pull_request:45569" + ], + "prepared_review_unit_hash": "11d4f8f78672ff43078b60613d4b6bdc3c1101c1bedf6357332df1bc8e2a9c45", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6066, + "estimated_input_tokens": 1517, + "estimated_eval_tokens": 3290 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6066, + "estimated_input_tokens": 1517, + "estimated_eval_tokens": 3290 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs are both about NemotronH, but they tackle different changes: PR 45115 is a refactor/compatibility and conversion-mapping update for the existing NemotronH architecture, while PR 45569 introduces separate NemotronH Dense/Sparse model variants, docs, and auto/model wiring. They are related by subsystem, not the same underlying change.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45569 is the better representative of the cluster because it is the broader, more user-visible NemotronH family change and includes the new dense/sparse model split plus docs and integration updates.", + "best_issue_reason": null, + "best_pr_reason": "PR 45569 is the strongest standalone candidate in this cluster: it covers the larger end-to-end model-family addition, not just an internal refactor/conversion mapping adjustment.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45115", + "right": "pull_request:45569", + "accept": false, + "reason": "They share NemotronH code paths, but 45115 is a refactor/conversion-mapping change and 45569 is a separate feature adding Dense/Sparse model variants and docs; they are not the same concrete fix or mergeable into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "913328da88e1fbcd0f191981b78ae4cf4ea3c169e065253c3acee43d28eb662c", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5397, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5397, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related heterogeneous-support PRs, but they are not duplicates: one introduces per-layer config plumbing, the other wires that config into cache/modeling behavior. The pair should stay separate.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45332 is the broader end-to-end feature implementation, covering model/cache/masking integration on top of heterogeneous config support.", + "best_issue_reason": null, + "best_pr_reason": "PR 45332 is the best representative of the overall feature cluster because it is the most complete user-facing implementation; 45333 is a narrower foundation PR.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "Related but distinct: 45333 adds the per-layer config API, while 45332 applies heterogeneous behavior across model/cache code paths. They share infrastructure, but they are not the same underlying change and would not naturally collapse into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45394-5", + "nodes": [ + "pull_request:45394", + "pull_request:45487", + "pull_request:45592", + "pull_request:45596", + "pull_request:45615" + ], + "soft_pairs": [ + "pull_request:45394|pull_request:45596", + "pull_request:45487|pull_request:45592", + "pull_request:45487|pull_request:45596", + "pull_request:45487|pull_request:45615", + "pull_request:45592|pull_request:45596", + "pull_request:45592|pull_request:45615", + "pull_request:45596|pull_request:45615" + ], + "prepared_review_unit_hash": "67b18b0b30292a5c02b9c8c41d899a9ab90f81b9fd36847271240838d8055795", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 7, + "serialized_chars": 13273, + "estimated_input_tokens": 3319, + "estimated_eval_tokens": 6894 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 7, + "serialized_chars": 13273, + "estimated_input_tokens": 3319, + "estimated_eval_tokens": 6894 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is mostly a bundle of unrelated model-specific fixes and test expectation updates. The only substantive code fix is the AltCLIP/ChineseCLIP model-parallel patch, but the other PRs target different models and different failure modes, so they should not be merged together.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR #45487 is the strongest canonical candidate because it makes an actual code-path fix for model-parallel behavior, whereas the others are narrower test-only expectation updates or model-specific patches.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45394", + "right": "pull_request:45596", + "accept": false, + "reason": "Different models and different changes: XCLIP no_split_modules fix vs BLT XPU test expectation update. Same general theme, but not the same bug or mergeable PR." + }, + { + "left": "pull_request:45487", + "right": "pull_request:45592", + "accept": false, + "reason": "AltCLIP/ChineseCLIP model-parallel code fix is not the same as FastVLM test padding-side adjustment. Different code paths and failure modes." + }, + { + "left": "pull_request:45487", + "right": "pull_request:45596", + "accept": false, + "reason": "Model-parallel fixes in altclip/chinese_clip are unrelated to BLT XPU expected-output updates. Test-only expectation changes do not match the same underlying bug." + }, + { + "left": "pull_request:45487", + "right": "pull_request:45615", + "accept": false, + "reason": "AltCLIP/ChineseCLIP model code fix and Qianfan OCR XPU expectation additions are different artifacts with different behavior and targets." + }, + { + "left": "pull_request:45592", + "right": "pull_request:45596", + "accept": false, + "reason": "Both are test-related, but they address different models and different issues: FastVLM padding-side handling vs BLT XPU generation expectations." + }, + { + "left": "pull_request:45592", + "right": "pull_request:45615", + "accept": false, + "reason": "FastVLM test padding-side fix and Qianfan OCR XPU expectations are unrelated model-specific test changes." + }, + { + "left": "pull_request:45596", + "right": "pull_request:45615", + "accept": false, + "reason": "Both add XPU expectations, but for different models and outputs; they are separate test updates, not one underlying bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "0c2b974bcafabe9e8847de5f9f175733aa8506fb487246b23fc7c6406e0e70f3", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5513, + "estimated_input_tokens": 1379, + "estimated_eval_tokens": 3014 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5513, + "estimated_input_tokens": 1379, + "estimated_eval_tokens": 3014 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs both touch typing/type-checking, but they address different changes: one is a broad repo-wide typing pass, the other adds specific typing aliases/protocols for modeling_utils and related modules. They are not the same underlying fix.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45425 is the more focused, self-contained typing change; PR 45415 is a broad sweep across many files and looks like a separate umbrella cleanup.", + "best_issue_reason": null, + "best_pr_reason": "PR 45425 is the best representative of the cluster because it has a clearer concrete typing objective and narrower scope than the generalized type-checking pass in PR 45415.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "Both are about typing, but 45415 is a broad type-checking sweep across many files while 45425 adds specific typing protocols/aliases for modeling_utils and related code paths. They do not look like the same concrete bug/change and would not plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45639-2", + "nodes": [ + "pull_request:45639", + "pull_request:45645" + ], + "soft_pairs": [ + "pull_request:45639|pull_request:45645" + ], + "prepared_review_unit_hash": "857f1f468105e11437eef78c31ec91e8db5fd1b95cc91b6a8f3ce560954095a9", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4589, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4589, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs fix the same xdist collision around captured_info artifacts; PR 45645 is the broader superset and includes the core testing_utils change from 45639 plus CI log-handling updates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45645 is the better canonical PR because it includes the core xdist-safe captured_info fix from 45639 and also updates the CI workflow to handle multiple per-worker log files, making it the more complete change.", + "best_issue_reason": null, + "best_pr_reason": "PR 45645 best represents the cluster since it subsumes the underlying debug-log collision fix and extends it into the artifact collection path; 45639 is a narrower subset of the same fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45639", + "right": "pull_request:45645", + "accept": true, + "reason": "Accepted: 45645 includes the same concrete xdist-safe captured_info fix in testing_utils and tests, and expands it with workflow changes; this is plausibly one combined PR for the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +} diff --git a/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/manifest.json b/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..f6b0e8db95cbe92e8ef5ecdf3fd67e9aecce70fc --- /dev/null +++ b/snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/manifest.json @@ -0,0 +1,14 @@ +{ + "analysis_id": "hybrid-model-20260425t120019z", + "artifacts": { + "hybrid": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "model": null, + "published_at": "2026-04-25T12:06:36Z", + "repo": "huggingface/transformers", + "schema_version": 1, + "snapshot_id": "20260425T120019Z", + "variant": "hybrid" +} diff --git a/snapshots/20260425T120019Z/manifest.json b/snapshots/20260425T120019Z/manifest.json index 3da8f2355f99d0e80b79d1c8e28bf73de997efc9..31dc4260a1e63b5fcda9d5fa00f4ae57afd91ee2 100644 --- a/snapshots/20260425T120019Z/manifest.json +++ b/snapshots/20260425T120019Z/manifest.json @@ -32,6 +32,24 @@ "timeline_events": 275 }, "extracted_at": "2026-04-25T12:00:19Z", + "published_analysis": { + "canonical_analysis_id": "hybrid-model-20260425t120019z", + "runs": { + "hybrid-model-20260425t120019z": { + "analysis_id": "hybrid-model-20260425t120019z", + "artifacts": { + "hybrid": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "manifest_path": "snapshots/20260425T120019Z/analysis-runs/hybrid-model-20260425t120019z/manifest.json", + "model": null, + "published_at": "2026-04-25T12:06:36Z", + "variant": "hybrid" + } + }, + "schema_version": 1 + }, "repo": "huggingface/transformers", "snapshot_id": "20260425T120019Z", "watermark": {