diff --git a/analysis/current/analysis-report-hybrid.json b/analysis/current/analysis-report-hybrid.json index bbcd44df7915deb953cfc523aa27ddc2a279642b..8e247fe394d9df4eefa2c6509f663af3cf9c1964 100644 --- a/analysis/current/analysis-report-hybrid.json +++ b/analysis/current/analysis-report-hybrid.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260421T000044Z", - "generated_at": "2026-04-21T00:08:11Z", + "snapshot_id": "20260421T060039Z", + "generated_at": "2026-04-21T06:06:35Z", "evidence_quality": "full", "llm_enrichment": true, "meta_bugs": [ @@ -1727,6 +1727,281 @@ ] } ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] } ], "duplicate_issues": [], @@ -1795,6 +2070,15 @@ "target_issue_number": 43979, "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, { "cluster_id": "cluster-45081-3", "canonical_pr_number": 45317, diff --git a/analysis/current/analysis-report-hybrid.llm-reviews.json b/analysis/current/analysis-report-hybrid.llm-reviews.json index b0264003c79ab2983fd57333aacede405ad1fd92..77cb05758f7a406dbe0d9db7a56d9cbf7620ce3e 100644 --- a/analysis/current/analysis-report-hybrid.llm-reviews.json +++ b/analysis/current/analysis-report-hybrid.llm-reviews.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260421T000044Z", - "generated_at": "2026-04-21T00:08:11Z", + "snapshot_id": "20260421T060039Z", + "generated_at": "2026-04-21T06:06:35Z", "model": "gpt-5.4-mini?service_tier=flex", "reviews": [ { @@ -26,12 +26,12 @@ "issue:13244|issue:16618", "issue:15980|issue:16618" ], - "prepared_review_unit_hash": "d07ee95248d2dbb1121c608dd30143d436aa68c59745ceff065f04f499399ea2", + "prepared_review_unit_hash": "31dd82c6fb7a9693f3ae7988bd88e9fed476e751189d484c1bbf29009a125ea4", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 2478, + "serialized_chars": 2479, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, @@ -39,7 +39,7 @@ "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 2478, + "serialized_chars": 2479, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, @@ -48,24 +48,24 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Only one pair shows possible topical overlap: the generic model-loading failure (#16618) and the private-model auth/error-message issue (#15980). But the titles point to different concrete problems, so I would not merge them as duplicates. #13244 is unrelated.", - "confidence": 0.72, - "canonical_issue_reason": "#16618 is the broadest, most central report in the set: it describes a model-loading failure with a general user-facing symptom, whereas #15980 is a narrower auth/error-message case.", + "summary": "These do not look like duplicates: one is about Tapas tokenization differing from TensorFlow, one is about a bad login/auth error when downloading a private model, and one is a general failure to load bert-base-uncased. They touch different code paths and failure modes.", + "confidence": 0.96, + "canonical_issue_reason": "Issue #16618 is the broadest, most general model-loading report, so it is the best single issue to represent the cluster if one must be chosen.", "canonical_pr_reason": null, - "best_issue_reason": "#16618 is the best representative issue for this cluster because it captures the general inability to load a model. #15980 is more specific and likely a distinct root cause; #13244 is unrelated.", + "best_issue_reason": "Issue #16618 is the most general and user-facing of the three; the other two are narrower and clearly about different problems.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:13244", "right": "issue:16618", "accept": false, - "reason": "The Tapas tokenization mismatch is a different subsystem and problem type than a model-loading failure; no shared underlying bug is evident." + "reason": "Tokenization mismatch for Tapas is a different problem from failing to load bert-base-uncased; they involve different subsystems and symptoms." }, { "left": "issue:15980", "right": "issue:16618", "accept": false, - "reason": "Both concern loading models, but #15980 is specifically about a bad private-model/no-login error message while #16618 is a generic failure to load 'bert-base-uncased'; these do not clearly share the same root cause." + "reason": "Private-model download/auth error is a distinct login/authorization issue, not the same as a generic model-loading failure." } ] }, @@ -87,7 +87,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:24643", "issue:30277", @@ -148,8 +148,8 @@ "issue:35545|issue:42915", "issue:35545|issue:41720", "issue:44509|issue:44512", - "issue:43638|issue:44805", "issue:35545|issue:36010", + "issue:43638|issue:44805", "issue:43596|issue:44805", "issue:44805|issue:45161", "issue:44805|issue:44918", @@ -194,8 +194,8 @@ "issue:43425|issue:45070", "issue:39290|issue:43531", "issue:41093|issue:43638", - "issue:45362|issue:45464", "issue:43404|issue:43866", + "issue:45362|issue:45464", "issue:43792|issue:45070", "issue:43541|issue:43828", "issue:36010|issue:39290", @@ -203,20 +203,20 @@ "issue:41093|issue:41762", "issue:43296|issue:43366" ], - "prepared_review_unit_hash": "13cea400d64671ff0736c54708b19757f67d074bd586fa223ef1281d1d6a0436", + "prepared_review_unit_hash": "538109df4c9a9e0dd7be883c0b25881fc90bb4a5eb48c1b663147c582af79d04", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 63, - "serialized_chars": 46479, + "serialized_chars": 46478, "estimated_input_tokens": 11620, "estimated_eval_tokens": 23496 }, @@ -225,444 +225,366 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous: many unrelated model-loading, docs, and training/runtime bugs are mixed together. The only clear duplicate pair is the DeepSpeed ZeRO-3 + BERT index-out-of-bounds reports (43596/43638).", - "confidence": 0.78, - "canonical_issue_reason": "issue:43596 is the broadest, clearest description of the only true duplicate pair in the cluster: the same DeepSpeed ZeRO-3/BERT index-0-out-of-bounds failure.", + "summary": "This cluster is mostly heterogeneous: many issues share broad themes like model loading, DeepSpeed/ZeRO, or docs cleanup, but they point to different concrete failures. Only one soft pair looks like a true duplicate: the two ZeRO-3/Bert index-out-of-bounds reports.", + "confidence": 0.89, + "canonical_issue_reason": "issue:43596 is the cleanest representative of the ZeRO-3/Bert \"index 0 is out of bounds for dimension 0 with size 0\" bug; issue:43638 appears to be the same failure with a slightly different wording.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43596 is the best representative issue because it names the underlying ZeRO-3/BERT loading bug in a more general way than the narrower non-pretrained-BERT wording.", + "best_issue_reason": "issue:43596 is the strongest canonical issue for the only clear duplicate subcluster in this set; it is specific, reproducible, and has a near-identical twin in issue:43638.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44322", "right": "issue:45464", "accept": false, - "reason": "Different Qwen3.5 failures: config attribute error vs streaming chat/completions runtime failure." + "reason": "Same Qwen3.5 area, but different failure modes: config attribute error vs streaming chat/completions failure." }, { "left": "issue:43716", "right": "issue:45237", "accept": false, - "reason": "Both mention dtype/GPU issues, but one is an image preprocessor/model mismatch and the other is an AMD GPU support failure." + "reason": "Different models and different bugs; one is a dtype mismatch in Mistral-3 preprocessing, the other is an AMD GPU runtime issue for GPT-OSS." }, { "left": "issue:43828", "right": "issue:45237", "accept": false, - "reason": "Autocast dtype mismatch is a different concrete bug from the AMD GPU execution failure." + "reason": "Both are runtime failures, but the concrete code paths and models differ: autocast dtype mismatch vs AMD GPU incompatibility." }, { "left": "issue:45237", "right": "issue:45507", "accept": false, - "reason": "GPT-OSS AMD GPU failure and GraniteMoEHybrid invalid-method call are different code paths." + "reason": "Different model families and different failure types; not the same underlying change or bug." }, { "left": "issue:44292", "right": "issue:45237", "accept": false, - "reason": "Qwen-3 NVFP4 runtime issue is not the same as the GPT-OSS AMD GPU failure." + "reason": "Different model-specific failures; NVFP4 runtime error is unrelated to AMD GPU support." }, { "left": "issue:30277", "right": "issue:35545", "accept": false, - "reason": "DeepSpeed collective mismatch and ModernBERT ONNX export are unrelated failures." + "reason": "DeepSpeed collective mismatch and ONNX export are unrelated code paths." }, { "left": "issue:35545", "right": "issue:42915", "accept": false, - "reason": "ONNX export error and Qwen3Moe FP8 config failure are different bugs." + "reason": "Both mention model export/loading issues, but the concrete problems are unrelated." }, { "left": "issue:35545", "right": "issue:41720", "accept": false, - "reason": "ONNX export failure and Qwen3 auto device-map CUDA assert are unrelated." + "reason": "ONNX export error versus auto device mapping cuda assert; different bugs." }, { "left": "issue:44509", "right": "issue:44512", "accept": false, - "reason": "Both are docs regressions in v5, but they reference different removed features (pipeline tasks vs run command)." + "reason": "Both are v5 docs cleanups, but they target different removed features and would not merge into one fix." }, { - "left": "issue:43638", - "right": "issue:44805", + "left": "issue:35545", + "right": "issue:36010", "accept": false, - "reason": "Both are index-related errors, but one is ZeRO-3/BERT loading and the other is a mask/tensor shape mismatch." + "reason": "Model export error and GenerationMixin import failure are unrelated." }, { - "left": "issue:35545", - "right": "issue:36010", + "left": "issue:43638", + "right": "issue:44805", "accept": false, - "reason": "Model export to ONNX is unrelated to the GenerationMixin import error." + "reason": "Both are shape/index errors, but the models and failure contexts differ." }, { "left": "issue:43596", "right": "issue:44805", "accept": false, - "reason": "Different failures: ZeRO-3/BERT out-of-bounds vs mask-shape mismatch." + "reason": "Different symptom details and different model/setup; not the same concrete bug." }, { "left": "issue:44805", "right": "issue:45161", "accept": false, - "reason": "Mask/tensor shape mismatch is not the same as GPT-OSS tensor-parallel failure." + "reason": "Mask-shape IndexError is unrelated to GPT-OSS tensor-parallel behavior." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Different model families and failure points; not a duplicate." }, { "left": "issue:44661", "right": "issue:44805", "accept": false, - "reason": "Tokenizer-mapping tooling bug and runtime mask-shape error are different issues." + "reason": "Tokenizer-mapping guardrail bug is unrelated to the mask/tensor shape error." }, { "left": "issue:30277", "right": "issue:43638", "accept": false, - "reason": "DeepSpeed collective mismatch is not the same bug as the BertModel ZeRO-3 index error." + "reason": "DeepSpeed collective mismatch and Bert zero-3 index error are different bugs." }, { "left": "issue:34634", "right": "issue:35545", "accept": false, - "reason": "BarkProcessor voice preset handling is unrelated to ModernBERT export." + "reason": "Processor preset handling and ONNX export are unrelated." }, { "left": "issue:43596", "right": "issue:43638", "accept": true, - "reason": "Same concrete ZeRO-3/BERT index-0-out-of-bounds failure; 43638 is just a narrower variant of 43596." + "reason": "Same DeepSpeed ZeRO-3/Bert index-0-out-of-bounds failure; these look like the same underlying bug." }, { "left": "issue:43975", "right": "issue:44457", "accept": false, - "reason": "Detokenization bug and LoRA merge/save mismatch are different model behaviors." + "reason": "Different problems: detokenization regression versus LoRA merge/save mismatch." }, { "left": "issue:43039", "right": "issue:45161", "accept": false, - "reason": "Liger Kernel cross-entropy invocation and GPT-OSS tensor-parallel issues are unrelated." + "reason": "Liger Kernel cross-entropy dispatch and GPT-OSS tensor parallel failure are unrelated." }, { "left": "issue:44292", "right": "issue:44457", "accept": false, - "reason": "Qwen-3 NVFP4 runtime failure is unrelated to LoRA merge/save inconsistency." + "reason": "Different models and different failure modes." }, { "left": "issue:34928", "right": "issue:44805", "accept": false, - "reason": "Activation-checkpointing/FSDP recomputation mismatch is different from the mask-shape index error." + "reason": "Activation-checkpointing/FSDP tensor-size recomputation is unrelated to the mask shape IndexError." }, { "left": "issue:24643", "right": "issue:30277", "accept": false, - "reason": "Both are DeepSpeed-related, but one is a weight dimensionality error and the other is a collective mismatch." + "reason": "Both involve DeepSpeed, but the concrete failures differ completely." }, { "left": "issue:43975", "right": "issue:45237", "accept": false, - "reason": "Detokenization bug and AMD GPU runtime failure are unrelated." + "reason": "Unrelated model/runtime issues." }, { "left": "issue:43638", "right": "issue:45161", "accept": false, - "reason": "ZeRO-3/BERT index error and GPT-OSS tensor-parallel failure are different bugs." + "reason": "Bert ZeRO-3 index error and GPT-OSS TP failure are different bugs." }, { "left": "issue:34928", "right": "issue:36331", "accept": false, - "reason": "Activation-checkpointing mismatch and Trainer API signature change are unrelated." + "reason": "Training/checkpointing tensor mismatch and custom trainer signature mismatch are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "Different models and different failure classes." }, { "left": "issue:43716", "right": "issue:45507", "accept": false, - "reason": "Image-preprocessor dtype mismatch and GraniteMoEHybrid invalid-method call are different failures." + "reason": "Image-preprocessor dtype mismatch and invalid method call in GraniteMoEHybrid are unrelated." }, { "left": "issue:39290", "right": "issue:45081", "accept": false, - "reason": "Gemma3 config attribute error and mistral regex patch crash are unrelated." + "reason": "Gemma3 sliding_window_pattern missing and Mistral tokenizer regex patch crash are different model bugs." }, { "left": "issue:43828", "right": "issue:45161", "accept": false, - "reason": "Autocast dtype mismatch is not the same as the GPT-OSS tensor-parallel issue." + "reason": "Autocast dtype mismatch is unrelated to GPT-OSS MoE tensor parallel behavior." }, { "left": "issue:43425", "right": "issue:44292", "accept": false, - "reason": "Torch-version incompatibility is unrelated to the Qwen-3 NVFP4 runtime bug." + "reason": "Torch version compatibility and Qwen NVFP4 runtime failure are unrelated." }, { "left": "issue:34928", "right": "issue:41720", "accept": false, - "reason": "FSDP/activation-checkpointing mismatch and auto device-map CUDA assert are different bugs." + "reason": "FSDP/activation-checkpointing recomputation issue is unrelated to Qwen auto device mapping." }, { "left": "issue:44918", "right": "issue:45161", "accept": false, - "reason": "SFT-trainer embedding unpacking and GPT-OSS tensor-parallel issues are unrelated." + "reason": "TRL SFT embedding unpacking failure and GPT-OSS TP failure are different problems." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": false, + "reason": "Both are ZeRO-3 loading errors, but different models and not clearly the same bug." }, { "left": "issue:43827", "right": "issue:44512", "accept": false, - "reason": "Both are docs regressions, but they concern different removed CLI/pipeline surfaces." + "reason": "Both are docs issues, but they refer to different removed commands/features." }, { "left": "issue:43716", "right": "issue:44560", "accept": false, - "reason": "Dtype mismatch and StopIteration in video loading are different problems." + "reason": "Different models and different error types." }, { "left": "issue:43716", "right": "issue:44918", "accept": false, - "reason": "Image-preprocessor dtype mismatch is unrelated to SFT trainer embedding unpacking." + "reason": "Different model families and unrelated failure paths." }, { "left": "issue:43039", "right": "issue:43828", "accept": false, - "reason": "Liger Kernel cross-entropy and autocast dtype mismatch are different concrete code-path failures." + "reason": "Different runtime problems; no shared concrete bug." }, { "left": "issue:30277", "right": "issue:36331", "accept": false, - "reason": "DeepSpeed collective mismatch and custom Trainer loss-signature change are unrelated." + "reason": "DeepSpeed collective mismatch and trainer API signature mismatch are unrelated." }, { "left": "issue:43039", "right": "issue:44805", "accept": false, - "reason": "Liger Kernel cross-entropy and mask-shape index error are different bugs." + "reason": "Liger Kernel cross-entropy call and mask-shape IndexError are unrelated." }, { "left": "issue:34928", "right": "issue:36010", "accept": false, - "reason": "Activation-checkpointing mismatch and GenerationMixin import error are unrelated." + "reason": "Training shape-recompute bug and GenerationMixin import error are unrelated." }, { "left": "issue:41762", "right": "issue:43596", "accept": false, - "reason": "Both are ZeRO-3 loading failures, but the affected models and error contexts differ enough to treat them as separate bugs." - }, - { - "left": "issue:43827", - "right": "issue:44512", - "accept": false, - "reason": "Duplicate of the docs-regression pair above; still different removed features, not one bug." + "reason": "Similar ZeRO-3 symptoms, but different models and insufficient evidence of the same underlying bug." }, { "left": "issue:43901", "right": "issue:44512", "accept": false, - "reason": "return_all_scores docs mismatch is a different docs issue than the removed run command reference." + "reason": "Different docs updates: text classification docs versus run command removal." }, { "left": "issue:39290", "right": "issue:41720", "accept": false, - "reason": "Gemma3 sliding-window attribute error and Qwen3 auto device-mapping CUDA assert are unrelated." + "reason": "Gemma3/vLLM config attribute error and Qwen device-mapping cuda assert are different issues." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross-entropy behavior and Qwen3-vl video StopIteration are unrelated." }, { "left": "issue:43866", "right": "issue:44863", "accept": false, - "reason": "Both mention loading problems, but one is a corrupted checkpoint and the other is a model-implementation loading failure." + "reason": "Corrupted checkpoint and NemotronH loading implementation bug are not the same failure." }, { "left": "issue:43854", "right": "issue:43866", "accept": false, - "reason": "Unit-test load failure and corrupted checkpoint are different issues." + "reason": "Unit-test model-loading failure and checkpoint corruption are different problems." }, { "left": "issue:43425", "right": "issue:45070", "accept": false, - "reason": "Torch compatibility and pydantic PretrainedConfig breakage are unrelated." + "reason": "Torch version compatibility and pydantic PretrainedConfig field regression are unrelated." }, { "left": "issue:39290", "right": "issue:43531", "accept": false, - "reason": "Both involve sliding_window, but the models/configs and reported failures are distinct." + "reason": "Both mention sliding_window-like config issues, but they affect different model families and code paths." }, { "left": "issue:41093", "right": "issue:43638", "accept": false, - "reason": "Mask-length mismatch and ZeRO-3 Bert index error are different failure modes." + "reason": "Mask/tensor shape mismatch and Bert ZeRO-3 index error are different failures." }, { - "left": "issue:45362", - "right": "issue:45464", + "left": "issue:43404", + "right": "issue:43866", "accept": false, - "reason": "Both involve Qwen3.5, but one is a chat crash and the other is a streaming inference API failure." + "reason": "Tied-weight bug in Mistral3 and corrupted Ovis2 checkpoint are unrelated." }, { - "left": "issue:43404", - "right": "issue:43866", + "left": "issue:45362", + "right": "issue:45464", "accept": false, - "reason": "Mistral3 lm_head tying bug is unrelated to the Ovis2 checkpoint corruption report." + "reason": "Both are Qwen3.5 chat-related, but one is a chat-template crash and the other is a streaming API failure." }, { "left": "issue:43792", "right": "issue:45070", "accept": false, - "reason": "Whisper runtime failure and pydantic config-field breakage are unrelated." + "reason": "Whisper runtime failure and pydantic model regression are unrelated." }, { "left": "issue:43541", "right": "issue:43828", "accept": false, - "reason": "Grouped-mm torch dynamo tracing failure is unrelated to autocast dtype mismatch." + "reason": "Different MoE dtype/tracing failures with different models and code paths." }, { "left": "issue:36010", "right": "issue:39290", "accept": false, - "reason": "GenerationMixin import error and Gemma3 sliding-window attribute error are unrelated." + "reason": "GenerationMixin import failure and Gemma3 sliding_window config regression are unrelated." }, { "left": "issue:44841", "right": "issue:45084", "accept": false, - "reason": "Voxtral processor failure and non-template compilation error are different bugs." - }, - { - "left": "issue:43866", - "right": "issue:45161", - "accept": false, - "reason": "Corrupted Ovis2 checkpoint and GPT-OSS tensor-parallel failure are unrelated." - }, - { - "left": "issue:43854", - "right": "issue:43866", - "accept": false, - "reason": "Test-time model loading failure is not the same as a corrupted checkpoint." - }, - { - "left": "issue:43596", - "right": "issue:45161", - "accept": false, - "reason": "ZeRO-3 Bert loading error and GPT-OSS tensor-parallel issue are unrelated." - }, - { - "left": "issue:43828", - "right": "issue:44292", - "accept": false, - "reason": "Autocast dtype mismatch and Qwen-3 NVFP4 failure are different bugs." - }, - { - "left": "issue:43716", - "right": "issue:44292", - "accept": false, - "reason": "Image-preprocessor dtype mismatch is unrelated to the Qwen-3 NVFP4 runtime problem." - }, - { - "left": "issue:44292", - "right": "issue:44918", - "accept": false, - "reason": "Qwen-3 NVFP4 runtime error and SFT trainer embedding unpacking are unrelated." - }, - { - "left": "issue:43039", - "right": "issue:44918", - "accept": false, - "reason": "Liger Kernel cross-entropy issue and SFT trainer embedding unpacking are different code paths." - }, - { - "left": "issue:41762", - "right": "issue:43638", - "accept": false, - "reason": "Both are ZeRO-3 index errors, but they affect different model families and are not clearly the same bug." - }, - { - "left": "issue:43901", - "right": "issue:44512", - "accept": false, - "reason": "Docs mention of return_all_scores is a different documentation regression than the removed run command." - }, - { - "left": "issue:39290", - "right": "issue:41720", - "accept": false, - "reason": "Same as above: distinct model-specific runtime failures." - }, - { - "left": "issue:43828", - "right": "issue:45161", - "accept": false, - "reason": "Same as above: dtype/autocast bug versus tensor-parallel bug." - }, - { - "left": "issue:43039", - "right": "issue:44560", - "accept": false, - "reason": "Liger Kernel cross-entropy and video StopIteration are unrelated." - }, - { - "left": "issue:43866", - "right": "issue:44863", - "accept": false, - "reason": "Different models and different failure modes; not a duplicate." - }, - { - "left": "issue:43425", - "right": "issue:44292", - "accept": false, - "reason": "Same as above: version incompatibility versus model runtime failure." - }, - { - "left": "issue:44661", - "right": "issue:44805", - "accept": false, - "reason": "Tooling failure in add-new-model-like is unrelated to the runtime mask-shape error." + "reason": "Processor failure for Voxtral and template compilation error are unrelated." }, { "left": "issue:41093", "right": "issue:41762", "accept": false, - "reason": "Mask-shape mismatch and Gemma3 ZeRO-3 loading error are different issues." + "reason": "Both are IndexErrors, but the model/setup and actual failure context differ." }, { "left": "issue:43296", "right": "issue:43366", "accept": false, - "reason": "PaddleOCR-VL loading in vLLM and GGUF gpt-oss support are distinct requests/issues." - }, - { - "left": "issue:44322", - "right": "issue:45464", - "accept": false, - "reason": "Duplicate of the earlier Qwen3.5 pair: different runtime surfaces, not the same bug." + "reason": "PaddleOCR-VL vLLM loading failure and GGUF gpt-oss support request are different issues." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative on the soft edges. The only accepted duplicate pair, 43596/43638, is well supported by the titles as the same ZeRO-3/BERT index-out-of-bounds failure, and the rest of the rejections do not overclaim equivalence from superficial similarity." + "feedback": "Grounded and conservative overall. The duplicate call on issue:43596 / issue:43638 is well supported by the very similar ZeRO-3/Bert index-out-of-bounds reports, and the remaining soft-edge rejections stay cautious without overstating equivalence." }, "evaluator_used": true, "retried": false, @@ -681,7 +603,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:36331", "issue:39186", @@ -741,47 +663,47 @@ "issue:43366|issue:45464", "issue:43531|issue:45070", "issue:44661|issue:45464", - "issue:43541|issue:44589", "issue:39290|issue:43742", "issue:43054|issue:43994", + "issue:43541|issue:44589", "issue:41093|issue:43596", "issue:42915|issue:43541", - "issue:39186|issue:44918", "issue:40990|issue:44960", + "issue:39186|issue:44918", "issue:43404|issue:45005", "issue:43645|issue:45399", "issue:44530|issue:45005", "issue:44863|issue:45005", "issue:43873|issue:45005", - "issue:42915|issue:45399", "issue:44360|issue:44512", + "issue:42915|issue:45399", "issue:44918|issue:45507", "issue:43975|issue:44292", "issue:44492|issue:44512", "issue:43950|issue:44534", - "issue:44292|issue:45507", "issue:43644|issue:43950", + "issue:44292|issue:45507", "issue:43828|issue:44928", "issue:39692|issue:43054", "issue:43866|issue:44403", "issue:41553|issue:43883", - "issue:44560|issue:45507", - "issue:36331|issue:41762" + "issue:36331|issue:41762", + "issue:44560|issue:45507" ], - "prepared_review_unit_hash": "c418c0343b49b02c7d73268d7e92222c81977d4a33fe81667db69af1d7de56a5", + "prepared_review_unit_hash": "48e9e32137015ae86a38cf6980c8aead85328ce7c25a78c9a89a6869484eb6c8", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 34, - "serialized_chars": 37561, + "serialized_chars": 37563, "estimated_input_tokens": 9391, "estimated_eval_tokens": 19038 }, @@ -790,222 +712,222 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "One clear duplicate cluster emerged around the non-persistent-buffer regression in Transformers 5.x; the other soft pairs are only loosely related by subsystem/model family and should stay separate.", - "confidence": 0.83, - "canonical_issue_reason": "issue:43644 is the earlier, broader report of the same non-persistent-buffer corruption regression in v5; issue:43950 reads like a later, more specific description of that bug.", + "summary": "This cluster is mostly unrelated issue candidates from different regressions; only the non-persistent-buffer corruption reports are clear duplicates. I selected that regression as the canonical issue set.", + "confidence": 0.92, + "canonical_issue_reason": "Issue 43950 is the most explicit statement of the bug: `from_pretrained()` silently corrupts non-persistent buffers in transformers 5.x, which matches the closely related reports in 43644 and 44534.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43644 is the best canonical issue here because it is the clearest broad description of the regression and already appears to be the anchor for a follow-up report.", + "best_issue_reason": "Issue 43950 is the best representative because it names the exact failing API and the corruption mechanism, making triage and fixing easier than the shorter duplicate titles.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43572", "right": "issue:45070", "accept": false, - "reason": "Both are v5 config-related regressions, but one is a StableLmConfig field issue and the other is a generic PretrainedConfig/pydantic change; not the same bug." + "reason": "Both involve v5 config-related breakage, but one is a missing `pad_token_idx` field and the other is a pydantic/PretrainedConfig field regression; not the same concrete bug." }, { "left": "issue:44387", "right": "issue:45005", "accept": false, - "reason": "Int4 quantization OOM and tied-weight translation-model bugs are different failure modes with different code paths." + "reason": "Different failures: int4 quantization memory growth vs tied-weights handling for translation models." }, { "left": "issue:43541", "right": "issue:45161", "accept": false, - "reason": "Same broad Qwen3-MoE area, but one is a torch dynamo/grouped_mm tracing error and the other is a tensor-parallel support problem." + "reason": "Different code paths and symptoms: torch dynamo/grouped_mm tracing failure vs tensor-parallel-only failure on GPT-OSS MoE." }, { "left": "issue:43828", "right": "issue:45464", "accept": false, - "reason": "Autocast dtype mismatch and streaming chat/completions failure are unrelated symptoms in different execution paths." + "reason": "Different model families and failure modes: autocast dtype mismatch vs streaming chat/completions API failure." }, { "left": "issue:44291", "right": "issue:45070", "accept": false, - "reason": "Unexpected init-empty-weights constructor argument vs. pydantic field breakage are distinct regressions." + "reason": "`init_empty_weights` unexpected-argument bug is unrelated to the pydantic `PretrainedConfig` field issue." }, { "left": "issue:43366", "right": "issue:45464", "accept": false, - "reason": "GGUF/gpt-oss architecture support and Qwen3.5 streaming inference are unrelated issues." + "reason": "GGUF architecture support request is not the same as a Qwen3.5 streaming inference bug." }, { "left": "issue:43531", "right": "issue:45070", "accept": false, - "reason": "Qwen3-MoE sliding_window handling is not the same concrete bug as the StableLmConfig/pydantic regression." + "reason": "Qwen3-MoE sliding-window behavior is a model-specific runtime issue, not the config-field regression in 45070." }, { "left": "issue:44661", "right": "issue:45464", "accept": false, - "reason": "Tokenizer-mapping failure while adding a model is unrelated to a Qwen3.5 streaming inference error." - }, - { - "left": "issue:43541", - "right": "issue:44589", - "accept": false, - "reason": "Grouped_mm tracing failure and missing Float8 storage lookup are different bugs." + "reason": "Tokenizer mapping/add-new-model-like failure is unrelated to the Qwen3.5 streaming inference error." }, { "left": "issue:39290", "right": "issue:43742", "accept": false, - "reason": "Both are model-loading issues, but Gemma3 sliding-window/vLLM and MobileLLM key errors are not the same defect." + "reason": "Different models and failures: Gemma3/vLLM missing attribute vs MobileLLM key error." }, { "left": "issue:43054", "right": "issue:43994", "accept": false, - "reason": "Both concern SigLIP2 output quality, but one is a text-embedding regression and the other is a broader AutoModel/pipeline result complaint; not enough to treat as the same concrete bug." + "reason": "Both mention SigLIP2, but one is about worse text embeddings while the other is a broader nonsensical AutoModel/pipeline result; the concrete bug is not established as the same." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Grouped_mm tracing failure and missing Float8 storage are distinct runtime errors." }, { "left": "issue:41093", "right": "issue:43596", "accept": false, - "reason": "Both are IndexErrors, but the shapes/stacks differ and the reported loading paths are different." + "reason": "Both are IndexErrors, but they happen in different loading paths and involve different shapes/components." }, { "left": "issue:42915", "right": "issue:43541", "accept": false, - "reason": "Both touch MoE/advanced execution, but FineGrainedFP8Config failure and dynamo grouped_mm failure are separate issues." + "reason": "FP8 config failure on Qwen3-MoE is unrelated to Mixtral dynamo tracing/grouped_mm." }, { - "left": "issue:39186", - "right": "issue:44918", + "left": "issue:40990", + "right": "issue:44960", "accept": false, - "reason": "FSDP 2-D weight runtime error and TRL SFT unpacking failure are unrelated training bugs." + "reason": "No evidence they share a bug; perplexity regression on GPT-OSS and a generic GLM5 issue are different." }, { - "left": "issue:40990", - "right": "issue:44960", + "left": "issue:39186", + "right": "issue:44918", "accept": false, - "reason": "Different models and different symptoms; no indication of the same root cause." + "reason": "FSDP `'weight' must be 2-D` is unrelated to TRL embedding unpacking." }, { "left": "issue:43404", "right": "issue:45005", "accept": false, - "reason": "Both involve tied weights, but one is a model-specific lm_head tying bug and the other is a broader translation-model v5 issue." + "reason": "Both concern tied weights, but the affected models and failure modes differ; this is too broad to treat as the same concrete bug." }, { "left": "issue:43645", "right": "issue:45399", "accept": false, - "reason": "Custom-model notebook initialization and flash-attn fallback gating are unrelated." + "reason": "Custom model/Jupyter notebook initialization and flash-attn2 fallback logic are separate issues." }, { "left": "issue:44530", "right": "issue:45005", "accept": false, - "reason": "PagedAttentionCache linear_attention crash is unrelated to tied-weight translation-model behavior." + "reason": "PagedAttentionCache linear_attention crash is unrelated to tied-weight regressions." }, { "left": "issue:44863", "right": "issue:45005", "accept": false, - "reason": "NemotronH checkpoint loading and translation-model tied weights are different problems." + "reason": "NemotronH checkpoint loading failure is not the same as translation-model tied-weight issues." }, { "left": "issue:43873", "right": "issue:45005", "accept": false, - "reason": "Quantization/offloading behavior is not the same issue as tied weights." + "reason": "Quantization/offloading behavior and tied-weight handling are different problems." }, { - "left": "issue:42915", - "right": "issue:45399", + "left": "issue:44360", + "right": "issue:44512", "accept": false, - "reason": "FP8 Qwen3MoE failure and flash-attn fallback checks are unrelated." + "reason": "A DSA indexer design discussion is unrelated to stale docs mentioning a removed command." }, { - "left": "issue:44360", - "right": "issue:44512", + "left": "issue:42915", + "right": "issue:45399", "accept": false, - "reason": "DSA indexer/relu discussion and a docs-only run-command typo are not the same bug." + "reason": "FP8 config support and flash-attn2 fallback checks are separate subsystems and failures." }, { "left": "issue:44918", "right": "issue:45507", "accept": false, - "reason": "TRL embedding unpacking failure and GraniteMoEHybrid invalid method calls are unrelated." + "reason": "TRL embedding unpacking and GraniteMoEHybrid invalid method calls do not look like the same bug." }, { "left": "issue:43975", "right": "issue:44292", "accept": false, - "reason": "Detokenization regression and NVFP4 model-run failure are separate issues." + "reason": "Incorrect detokenization and Qwen-3-8B-NVFP4 runtime error are different issues." }, { "left": "issue:44492", "right": "issue:44512", "accept": false, - "reason": "Cache-strategy typo and removed-run-command docs are both documentation-level but not the same change." + "reason": "A typo in cache-strategy docs is unrelated to removed CLI-command documentation drift." }, { "left": "issue:43950", "right": "issue:44534", "accept": true, - "reason": "These describe the same non-persistent-buffer corruption regression in Transformers v5, just with slightly different wording." - }, - { - "left": "issue:44292", - "right": "issue:45507", - "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime failure and GraniteMoEHybrid invalid method are unrelated." + "reason": "Both report the same non-persistent-buffer regression in transformers v5, with the latter phrased as the same junk-filled-buffer symptom." }, { "left": "issue:43644", "right": "issue:43950", "accept": true, - "reason": "Same regression: v5 mishandles non-persistent buffers during load/init; the later issue is a more specific restatement." + "reason": "These describe the same v5 regression where non-persistent buffers are corrupted/fill with junk during `from_pretrained()`." + }, + { + "left": "issue:44292", + "right": "issue:45507", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 execution failure and GraniteMoEHybrid invalid-method calls are different model/runtime issues." }, { "left": "issue:43828", "right": "issue:44928", "accept": false, - "reason": "Both are model/training failures, but autocast dtype mismatch and 3D position_ids-induced gradient explosion are different bugs." + "reason": "Autocast dtype mismatch and RLHF gradient explosion from 3D `position_ids` are different failure mechanisms." }, { "left": "issue:39692", "right": "issue:43054", "accept": false, - "reason": "SigLIP2 docs example problems and SigLIP2 text-embedding quality regression overlap in model family, but not in the same concrete defect." + "reason": "SigLIP2 docs-example errors and SigLIP2 text-embedding quality complaints are related by model family but not clearly the same concrete bug." }, { "left": "issue:43866", "right": "issue:44403", "accept": false, - "reason": "Checkpoint corruption and generic loading noise are not the same underlying issue." + "reason": "A corrupted checkpoint and an unrelated loading-noise issue are not the same underlying defect." }, { "left": "issue:41553", "right": "issue:43883", "accept": false, - "reason": "Bad AutoTokenizer error messaging and missing all_tied_weights_keys are unrelated." + "reason": "Bad AutoTokenizer error messaging and missing `all_tied_weights_keys` are unrelated." }, { - "left": "issue:44560", - "right": "issue:45507", + "left": "issue:36331", + "right": "issue:41762", "accept": false, - "reason": "Qwen3-vl embedding video StopIteration and GraniteMoEHybrid invalid method calls are unrelated." + "reason": "`compute_loss(num_items_in_batch)` compatibility and DeepSpeed ZeRO-3 loading `IndexError` are different bugs." }, { - "left": "issue:36331", - "right": "issue:41762", + "left": "issue:44560", + "right": "issue:45507", "accept": false, - "reason": "Trainer keyword-argument breakage and Gemma3 ZeRO-3 loading IndexError are different code paths." + "reason": "Qwen3-vl video StopIteration and GraniteMoEHybrid invalid method calls are separate failures." } ] }, "evaluator_result": { "accept": true, - "feedback": "The duplicate cluster selection is well grounded: issue:43644 and issue:43950/44534 all describe the same non-persistent-buffer corruption regression in Transformers v5.x, with 43644 plausibly serving as the earlier, broader canonical issue. The other pair decisions are conservative and consistent with the packet titles; I didn\u2019t see unsupported overreach." + "feedback": "Grounded in the packet. The canonical issue choice is supported by the titles/body themes, and the only accepted soft-edge pairs are the clear non-persistent-buffer regression duplicates (43950/43644/44534). The remaining verdicts stay conservative and do not overclaim sameness." }, "evaluator_used": true, "retried": false, @@ -1024,7 +946,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:36331", "issue:38175", @@ -1084,8 +1006,8 @@ "issue:43881|issue:43883", "issue:44403|issue:44451", "issue:42947|issue:43856", - "issue:43643|issue:45020", "issue:43975|issue:45520", + "issue:43643|issue:45020", "issue:44403|issue:44488", "issue:43782|issue:44960", "issue:42491|issue:44368", @@ -1093,40 +1015,40 @@ "issue:43975|issue:44451", "issue:45020|issue:45356", "issue:44661|issue:45020", - "issue:43824|issue:43872", "issue:43054|issue:43278", + "issue:43824|issue:43872", "issue:44991|issue:45399", - "issue:43792|issue:44220", "issue:43827|issue:44509", + "issue:43792|issue:44220", "issue:45081|issue:45399", "issue:43404|issue:43950", - "issue:42915|issue:45020", "issue:43257|issue:43866", + "issue:42915|issue:45020", "issue:43742|issue:45399", "issue:43742|issue:45020", "issue:41553|issue:44991", "issue:43940|issue:45313", "issue:43872|issue:44291", - "issue:42915|issue:43872", "issue:43854|issue:44863", - "issue:41553|issue:44488", "issue:43257|issue:45305", + "issue:42915|issue:43872", + "issue:41553|issue:44488", "issue:36331|issue:41093" ], - "prepared_review_unit_hash": "99a8b66b2a033b53fd9c4c80f4a5b04e0e299d793c251a92af4bec4ebc32a1d1", + "prepared_review_unit_hash": "cd54162ac02442011d7480b3d90ca64caf687ea69765ee8e33f62b638851dc20", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38578, + "serialized_chars": 38579, "estimated_input_tokens": 9645, "estimated_eval_tokens": 19546 }, @@ -1135,240 +1057,222 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly heterogeneous issue reports with a few tight duplicate subpairs. The strongest true duplicates are the SigLIP2 discrepancy reports, the v5 docs/pipeline-removal reports, and the `_is_hf_initialized` / `init_empty_weights` loading regression reports; most other pairs are only broadly related and should stay separate.", - "confidence": 0.77, - "canonical_issue_reason": "issue:43827 is the cleanest canonical for its subcluster: it matches 44509 on the same removed v5 pipeline docs problem with essentially the same fix scope.", + "summary": "The set is mostly heterogeneous: many pairs share a subsystem or model family but not the same concrete bug. The clearest duplicate clusters are the stale v5 docs references, the `_is_hf_initialized` loading regression, and the SigLIP2 implementation discrepancy pair.", + "confidence": 0.74, + "canonical_issue_reason": "No single canonical issue spans the whole list; the items split into several unrelated bug reports. Among the true duplicate-like pairs, issue 43827 is the cleanest canonical anchor for the docs/pipeline-removal cluster.", "canonical_pr_reason": null, - "best_issue_reason": "There is no strong single global canonical across the full set because the items span many unrelated bugs; 43827 is only the best representative of one small exact-duplicate subcluster.", + "best_issue_reason": "Issue 43827 is the strongest representative issue in this set because 44509 is essentially the same v5 docs problem with outdated pipeline/task references.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43872", "right": "issue:44589", "accept": false, - "reason": "Both are load-time incompatibilities, but one is bitsandbytes `_is_hf_initialized` handling and the other is a Float8 storage lookup failure; different root causes." + "reason": "Both are loading-time type errors, but one is an `_is_hf_initialized`/bitsandbytes incompatibility and the other is a float8 storage lookup failure; different bugs." }, { "left": "issue:41553", "right": "issue:43881", "accept": false, - "reason": "Different models and different failure modes: Voxtral tokenizer messaging vs GLM-4v loading failure." + "reason": "Different models and different failure modes; one is an AutoTokenizer error message issue, the other is a model loading failure." }, { "left": "issue:38175", "right": "issue:43054", "accept": false, - "reason": "One is zero probabilities for SigLIP2, the other is degraded SigLIP2 text embeddings; related model family but not the same concrete bug." + "reason": "Both mention SigLIP2, but one is zero probabilities and the other is lower embedding quality; not enough evidence they are the same concrete defect." }, { "left": "issue:42947", "right": "issue:44387", "accept": false, - "reason": "LoRA gradient checkpointing behavior and int4 CUDA reserved-memory OOM are different training regressions." + "reason": "Both are memory-related, but one is LoRA/gradient-checkpointing ineffectiveness and the other is int4 CUDA reserved-memory growth causing OOM." }, { "left": "issue:43054", "right": "issue:43493", "accept": true, - "reason": "Both point to the same SigLIP2 implementation discrepancy versus the reference/original behavior, with the same user-visible embedding quality problem." + "reason": "Both point to the same SigLIP2 HF-vs-JAX implementation discrepancy, with degraded text embeddings as the symptom." }, { "left": "issue:43881", "right": "issue:43883", "accept": false, - "reason": "A model load failure and a missing `all_tied_weights_keys` attribute are different issues." + "reason": "Different models and different issues: glm-4v loading vs a missing `all_tied_weights_keys` attribute in Molmo." }, { "left": "issue:44403", "right": "issue:44451", "accept": false, - "reason": "Generic loading noise and a specific ScandiBERT tokenizer/model load failure are not the same bug." + "reason": "One is about noisy loading warnings, the other is an actual inability to load a specific model." }, { "left": "issue:42947", "right": "issue:43856", "accept": false, - "reason": "PEFT/gradient-checkpointing behavior is distinct from Qwen3 MoE memory usage." + "reason": "Both concern memory, but one is checkpointing/LoRA behavior and the other is Qwen3 MoE training memory usage." }, { - "left": "issue:43643", - "right": "issue:45020", + "left": "issue:43975", + "right": "issue:45520", "accept": false, - "reason": "Both involve remote code, but one is missing fields from `AutoConfig`, while the other is a broader regression across remote-code models." + "reason": "Unrelated problems: tokenizer detokenization regression vs Python 3.13 flash_attn import bookkeeping." }, { - "left": "issue:43975", - "right": "issue:45520", + "left": "issue:43643", + "right": "issue:45020", "accept": false, - "reason": "Tokenizer detokenization regression and a Python 3.13 flash-attn import key error are unrelated." + "reason": "Both involve `remote_code`, but one is missing fields from `AutoConfig` and the other is broad breakage in recent versions." }, { "left": "issue:44403", "right": "issue:44488", "accept": false, - "reason": "Two different model-loading complaints; one is generic noise, the other is a specific model load failure." + "reason": "Both are loading-related, but one is a warning/noise report and the other is a specific model load failure." }, { "left": "issue:43782", "right": "issue:44960", "accept": false, - "reason": "Qwen3VL `weight_only=True` loading and GLM5 are different model-specific failures." + "reason": "Different model families and errors; no clear shared code-path bug." }, { "left": "issue:42491", "right": "issue:44368", "accept": false, - "reason": "LoRA compatibility for qwen3_moe across versions is not the same as the `tie_word_embeddings` warning." + "reason": "Both mention Qwen LoRA/tie-word-embeddings, but they describe different behaviors and code paths." }, { "left": "issue:41762", "right": "issue:42915", "accept": false, - "reason": "Gemma3 ZeRO-3 weight loading and Qwen3 MoE fine-grained FP8 failure are separate code paths." + "reason": "Different model-specific DeepSpeed failures; one is a Gemma3 ZeRO-3 load error, the other is Qwen3 MoE plus FP8 config." }, { "left": "issue:43975", "right": "issue:44451", "accept": false, - "reason": "Different bugs: detokenization mismatch vs inability to load a specific checkpoint." + "reason": "Different issues entirely: tokenizer detokenization regression vs model loading failure." }, { "left": "issue:45020", "right": "issue:45356", "accept": false, - "reason": "Remote-code regressions are too broad here; Kimi-K2.5 tokenizer codec/warning behavior is a distinct tokenizer issue." + "reason": "Both involve recent-version regressions, but one is broad `remote_code` breakage and the other is a Kimi-K2.5 tokenizer codec/warning regression." }, { "left": "issue:44661", "right": "issue:45020", "accept": false, - "reason": "`add-new-model-like` tokenizer mapping failure is not the same as general remote-code breakage." - }, - { - "left": "issue:43824", - "right": "issue:43872", - "accept": false, - "reason": "Importing Qwen2.5-VL and the bitsandbytes `_is_hf_initialized` regression are unrelated." + "reason": "`add-new-model-like`/tokenizer mapping failure is a different bug from the broader remote_code regressions." }, { "left": "issue:43054", "right": "issue:43278", "accept": false, - "reason": "SigLIP2 embedding quality and an embedding dtype change during evaluate are different symptoms and code paths." + "reason": "Both mention embeddings, but one is a SigLIP2 quality discrepancy and the other is a dtype change between train and eval." }, { - "left": "issue:44991", - "right": "issue:45399", + "left": "issue:43824", + "right": "issue:43872", "accept": false, - "reason": "Tokenizer loading for est-roberta and flash-attn fallback blocking are unrelated." + "reason": "Different load errors: import failure for Qwen2.5-VL vs bitsandbytes `_is_hf_initialized` incompatibility." }, { - "left": "issue:43792", - "right": "issue:44220", + "left": "issue:44991", + "right": "issue:45399", "accept": false, - "reason": "Both are audio/Whisper-adjacent, but the titles do not establish the same concrete bug or code path." + "reason": "Unrelated: tokenizer loading regression vs flash-attn fallback gating logic." }, { "left": "issue:43827", "right": "issue:44509", "accept": true, - "reason": "Same documentation regression: v5 still references removed pipeline tasks in the summarization/translation docs." + "reason": "Same docs bug: stale v5 references to removed text-generation/summarization/translation pipeline tasks." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "Likely related to Whisper audio preprocessing, but the reports are too different to confidently call the same concrete bug." }, { "left": "issue:45081", "right": "issue:45399", "accept": false, - "reason": "A Mistral regex patch crash and flash-attn fallback logic are different problems." + "reason": "Tokenizer regex crash and flash-attn fallback gating are separate issues." }, { "left": "issue:43404", "right": "issue:43950", "accept": false, - "reason": "Untied `lm_head` weights in Mistral3 and silent corruption of non-persistent buffers are unrelated regressions." - }, - { - "left": "issue:42915", - "right": "issue:45020", - "accept": false, - "reason": "Qwen3 MoE FP8 training failure is not the same as broad remote-code compatibility regressions." + "reason": "Different problems: Mistral3 weight tying vs silent corruption of non-persistent buffers." }, { "left": "issue:43257", "right": "issue:43866", "accept": false, - "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed and Ovis2 checkpoint corruption are different issues." + "reason": "Different model and failure mode; Qwen3 MoE weight conversion vs a corrupted Ovis2 checkpoint." }, { - "left": "issue:43742", - "right": "issue:45399", + "left": "issue:43872", + "right": "issue:44291", + "accept": true, + "reason": "Same `_is_hf_initialized` loading regression; one report surfaces through bitsandbytes, the other through `init_empty_weights`." + }, + { + "left": "issue:43854", + "right": "issue:44863", "accept": false, - "reason": "MobileLLM key errors and flash-attn fallback gating are unrelated." + "reason": "Different models and different loading failures; not the same bug." }, { - "left": "issue:43742", - "right": "issue:45020", + "left": "issue:43257", + "right": "issue:45305", "accept": false, - "reason": "MobileLLM loading failure is not the same as the remote-code regression." + "reason": "Both involve Qwen/DeepSpeed, but one is weight conversion and the other is gradient averaging with GAS/model_accepts_loss_kwargs." }, { "left": "issue:41553", - "right": "issue:44991", + "right": "issue:44488", "accept": false, - "reason": "Voxtral AutoTokenizer messaging and est-roberta tokenizer loading failure are different model-specific issues." + "reason": "Different models and different failure modes; one is a bad tokenizer error, the other a model load failure." }, { "left": "issue:43940", "right": "issue:45313", "accept": false, - "reason": "Both are ZeRO-3 loading failures for Qwen models, but the model variants and missing-weight patterns are too different to treat as one concrete bug from titles alone." + "reason": "Both are DeepSpeed ZeRO-3 load failures, but they affect different Qwen model variants and do not read as the same concrete bug." }, { - "left": "issue:43872", - "right": "issue:44291", - "accept": true, - "reason": "Same `_is_hf_initialized` / `init_empty_weights` incompatibility: one reports the bitsandbytes param TypeError directly, the other reports the same root regression while loading models in transformers 5.x." - }, - { - "left": "issue:36331", - "right": "issue:41093", + "left": "issue:43742", + "right": "issue:45399", "accept": false, - "reason": "A custom trainer keyword-argument API break and a tensor mask shape mismatch are unrelated." + "reason": "KeyError during model loading vs flash-attn fallback gating; unrelated." }, { - "left": "issue:43854", - "right": "issue:44863", + "left": "issue:43742", + "right": "issue:45020", "accept": false, - "reason": "Different model-loading failures for different model families; no shared concrete bug is evident from the titles." + "reason": "Different bugs: a specific MobileLLM key error versus broad remote_code regressions." }, { "left": "issue:41553", "right": "issue:44488", "accept": false, - "reason": "Voxtral tokenizer error handling and a separate model load failure for `cjvt/sleng-bert` are not the same issue." + "reason": "Different models and different loading paths; no shared concrete defect." }, { - "left": "issue:43257", - "right": "issue:45305", - "accept": false, - "reason": "Qwen3 MoE weight-conversion/loading behavior and DeepSpeed+GAS gradient averaging are distinct training regressions." - }, - { - "left": "issue:42915", - "right": "issue:43872", - "accept": false, - "reason": "Qwen3 MoE FP8 training and the bitsandbytes `_is_hf_initialized` loading regression are unrelated." - }, - { - "left": "issue:43824", - "right": "issue:44291", + "left": "issue:36331", + "right": "issue:41093", "accept": false, - "reason": "Importing Qwen2.5-VL and loading models with `init_empty_weights` hit different failure paths." + "reason": "Trainer loss-signature mismatch and tensor-mask shape mismatch are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative overall. The accepted soft edges are well-supported by the titles: 43827\u219444509 is the same v5 docs/pipeline-removal regression, and 43872\u219444291 is the same `_is_hf_initialized` / `init_empty_weights` incompatibility. The SigLIP2 pairing is also plausible from the titles alone. The remaining verdicts are appropriately cautious and do not overstate evidence." + "feedback": "The summary is grounded in the packet, and the accepted soft-edge pairs are conservative enough to look mergeable into single bugfix PRs. The rejected pairs are appropriately cautious about shared subsystems without enough evidence of the same concrete defect." }, "evaluator_used": true, "retried": false, @@ -1387,7 +1291,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:24643", "issue:30064", @@ -1433,23 +1337,23 @@ "issue:44960", "issue:44977", "issue:45030", - "issue:45084", "issue:45313", "issue:45325", - "issue:45357" + "issue:45357", + "issue:45538" ], "soft_pairs": [ "issue:41093|issue:44805", "issue:44479|issue:44560", + "issue:43872|issue:45538", "issue:44530|issue:44863", - "issue:43872|issue:45084", "issue:43366|issue:43828", "issue:24643|issue:30064", "issue:43366|issue:43531", "issue:43550|issue:44910", "issue:43645|issue:43646", - "issue:43366|issue:44589", "issue:43950|issue:45325", + "issue:43366|issue:44589", "issue:44560|issue:44918", "issue:43824|issue:43957", "issue:42915|issue:43957", @@ -1461,29 +1365,29 @@ "issue:43638|issue:44661", "issue:43596|issue:44661", "issue:44877|issue:45030", - "issue:43317|issue:44756", "issue:43644|issue:44534", + "issue:43317|issue:44756", "issue:38175|issue:39692", "issue:45313|issue:45357", "issue:42491|issue:43720", - "issue:44451|issue:44488", "issue:44960|issue:44977", + "issue:44451|issue:44488", "issue:43708|issue:44075" ], - "prepared_review_unit_hash": "20427bd52618cc4515e67739f30236e7aa4057d413bc9f6b5b799ab1c2f66af7", + "prepared_review_unit_hash": "82a22d2b9d6bea5bebaa223468763440acac526f44b2909f668d5be4c321fc28", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36648, + "serialized_chars": 36645, "estimated_input_tokens": 9162, "estimated_eval_tokens": 18580 }, @@ -1492,198 +1396,198 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a mixed set of unrelated transformer/model-loading issues; only a few pairs look like true duplicates. The clearest duplicate groups are the two non-persistent-buffer regressions, the two custom-model-initialization regressions, and the two generic mask-shape IndexError reports.", - "confidence": 0.79, - "canonical_issue_reason": "issue:43644 is the clearest, most specific regression report in the set and has an almost identical companion (issue:44534), making it the best anchor for the duplicate cluster.", + "summary": "This cluster is mostly heterogeneous, with several unrelated model-loading/regression reports. Only a few soft pairs look like true duplicates; the strongest ones are the Transformers 5 non-persistent-buffer regression and the custom model initialization regression.", + "confidence": 0.52, + "canonical_issue_reason": "issue:43644 is the clearest broad regression in the set and has an obvious duplicate in issue:44534; it best represents a concrete, actionable underlying bug.", "canonical_pr_reason": null, - "best_issue_reason": "Strong representative issue: concrete regression, easy to understand, and tightly scoped to a known broken code path rather than a broad feature request or model-specific anecdote.", + "best_issue_reason": "issue:43644 is the best single representative of the duplicate-prone Transformers 5 regression subset, with a clear and general failure mode.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41093", "right": "issue:44805", "accept": true, - "reason": "Same generic mask/tensor shape-mismatch IndexError; titles describe the same failure mode with only placeholder dimensions differing." + "reason": "Same IndexError class and the same mask-vs-tensor shape mismatch pattern; these look like the same underlying bug." }, { "left": "issue:44479", "right": "issue:44560", "accept": false, - "reason": "Both are Qwen video-related, but they point to different models and different symptoms; not enough evidence of the same underlying bug." + "reason": "Both are Qwen video-related, but the affected models and failure modes differ enough that they do not clearly point to the same concrete bug." }, { - "left": "issue:44530", - "right": "issue:44863", + "left": "issue:43872", + "right": "issue:45538", "accept": false, - "reason": "Completely different model families and failure types: Qwen3.5 PagedAttention/linear_attention vs NemotronH checkpoint loading." + "reason": "Completely different subsystems: bitsandbytes quantization error vs CLIP tokenizer max length behavior." }, { - "left": "issue:43872", - "right": "issue:45084", + "left": "issue:44530", + "right": "issue:44863", "accept": false, - "reason": "Unrelated problems: a bitsandbytes constructor incompatibility vs a template-node compilation error." + "reason": "Different models and different runtime failures; too broad to treat as the same bug." }, { "left": "issue:43366", "right": "issue:43828", "accept": false, - "reason": "One is GGUF/gpt-oss support, the other is a Phi MoE dtype mismatch under autocast; different code paths." + "reason": "GGUF/gpt-oss support is unrelated to the Phi-tiny-MoE autocast dtype mismatch." }, { "left": "issue:24643", "right": "issue:30064", "accept": false, - "reason": "DeepSpeed training weight-shape RuntimeError and void segmentation map processing are unrelated." + "reason": "DeepSpeed training weight-shape error vs image processor void segmentation map handling are unrelated." }, { "left": "issue:43366", "right": "issue:43531", "accept": false, - "reason": "Different issues in the Qwen/GGUF space; one is architecture support, the other is a sliding-window bug." + "reason": "Both mention Qwen-related areas, but one is GGUF/gpt-oss support and the other is a sliding_window issue; not the same bug." }, { "left": "issue:43550", "right": "issue:44910", "accept": false, - "reason": "Different models and different runtime failures: Bamba torch.compile/SDPA vs Qwen3.5 flash-attention position-id handling." + "reason": "Different model families and different code paths: torch.compile/SDPA on Bamba vs flash-attention position_ids on Qwen3.5." }, { "left": "issue:43645", "right": "issue:43646", "accept": true, - "reason": "Same Transformers 5.0 custom-model-initialization regression; the notebook report is a specific reproduction of the broader initialization breakage." + "reason": "These describe the same Transformers 5 custom model initialization regression; the notebook mention in 43645 is just a narrower reproduction." }, { - "left": "issue:43366", - "right": "issue:44589", + "left": "issue:43950", + "right": "issue:45325", "accept": false, - "reason": "Different bugs: GGUF support request vs missing Float8 storage type during loading." + "reason": "Both are regression-style loading issues, but one is non-persistent buffers in from_pretrained and the other is a Qwen2.5-VL rope/position_ids bug." }, { - "left": "issue:43950", - "right": "issue:45325", + "left": "issue:43366", + "right": "issue:44589", "accept": false, - "reason": "Non-persistent buffer corruption and Qwen2.5-VL rope-index scaling are separate loading/position-id issues." + "reason": "GGUF gpt-oss support is unrelated to the Float8 storage lookup failure." }, { "left": "issue:44560", "right": "issue:44918", "accept": false, - "reason": "Both involve Qwen video/embedding flows, but the reported failures and affected trainers/models are different." + "reason": "Both involve Qwen3.5 video/input handling, but the observed failures and affected paths are distinct." }, { "left": "issue:43824", "right": "issue:43957", "accept": false, - "reason": "Importing a missing class is not the same as meta-device loading breakage across some models." + "reason": "Importing a missing class is not the same issue as meta-device loading regressions." }, { "left": "issue:42915", "right": "issue:43957", "accept": false, - "reason": "Different root causes: FineGrainedFP8Config on Qwen3Moe vs meta-device loading regressions." + "reason": "Qwen3Moe FP8 config failure and meta-device loading regressions are different code-path problems." }, { "left": "issue:42617", "right": "issue:43366", "accept": false, - "reason": "3D parallel launch failure is unrelated to GGUF support for gpt-oss." + "reason": "3d_parallel.py execution failure is unrelated to GGUF gpt-oss support." }, { "left": "issue:38175", "right": "issue:43994", "accept": false, - "reason": "Same model family, but one is zero probabilities and the other is docs/example mismatch plus quantization failure; not the same concrete bug." + "reason": "Both are SigLIP2-related, but one is zero probabilities while the other is an AutoModel/pipeline loading issue; not clearly the same bug." }, { "left": "issue:43646", "right": "issue:43950", "accept": false, - "reason": "Custom model initialization and non-persistent buffer corruption are different Transformers 5.x regressions." + "reason": "Custom model initialization regression is different from non-persistent buffer corruption during from_pretrained." }, { "left": "issue:44560", "right": "issue:44805", "accept": false, - "reason": "Video StopIteration on Qwen3-VL embedding is unrelated to the generic mask-shape IndexError." + "reason": "Both surface as runtime errors, but one is a Qwen video StopIteration issue and the other is a generic mask/tensor shape mismatch." }, { "left": "issue:36010", "right": "issue:42915", "accept": false, - "reason": "Importing GenerationMixin and Qwen3Moe FP8 failure are unrelated." + "reason": "ImportError for GenerationMixin and Qwen3Moe FP8 failure are unrelated." }, { "left": "issue:43638", "right": "issue:44661", "accept": false, - "reason": "DeepSpeed ZeRO-3/Bert index error is unrelated to TOKENIZER_MAPPING_NAMES handling." + "reason": "Different failures: DeepSpeed zero3 index error vs add-new-model-like tokenizer mapping problem." }, { "left": "issue:43596", "right": "issue:44661", "accept": false, - "reason": "Same as above: different loading path and different failure mode." + "reason": "Both are initialization/config-related, but the concrete bugs and code paths differ." }, { "left": "issue:44877", "right": "issue:45030", "accept": false, - "reason": "Different config-validation regressions affecting different model types." - }, - { - "left": "issue:43317", - "right": "issue:44756", - "accept": false, - "reason": "device_map/offload loading failure and mmap OOM avoidance are not the same bug." + "reason": "Strict config loading for granite_speech and glm4v config validation are separate config-regression issues." }, { "left": "issue:43644", "right": "issue:44534", "accept": true, - "reason": "Near-identical titles describing the same Transformers v5 non-persistent-buffer corruption regression." + "reason": "Near-identical reports of the Transformers v5 non-persistent-buffer corruption regression." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "device_map/offload loading failure and Strix Halo mmap OOM are different loading/performance issues." }, { "left": "issue:38175", "right": "issue:39692", "accept": false, - "reason": "SigLIP2 inference oddities and documentation/example errors are different issues." + "reason": "Same model family, but one is wrong outputs and the other is documentation/example errors plus quantization failure." }, { "left": "issue:45313", "right": "issue:45357", "accept": false, - "reason": "Both concern Qwen3.5, but one is ZeRO-3 weight loading and the other is save_pretrained visual-encoder key saving." + "reason": "Both are Qwen3.5 regressions, but one is DeepSpeed ZeRO-3 weight loading and the other is save_pretrained key persistence." }, { "left": "issue:42491", "right": "issue:43720", "accept": false, - "reason": "Different problems: Qwen3_moe LoRA compatibility across versions vs BitNet packed-weight loading." + "reason": "LoRA qwen3_moe compatibility on hf5.x and BitNet packed-weight loading are unrelated." }, { - "left": "issue:44451", - "right": "issue:44488", + "left": "issue:44960", + "right": "issue:44977", "accept": false, - "reason": "Both are model-load failures, but there is no evidence they share the same root cause or code path." + "reason": "GLM5 placeholder issue and Qwen3.5 flash-attention generation failure are unrelated." }, { - "left": "issue:44960", - "right": "issue:44977", + "left": "issue:44451", + "right": "issue:44488", "accept": false, - "reason": "GLM5 is not the same issue as Qwen3.5 flash-attention generation failure." + "reason": "Both are model-loading complaints, but the affected checkpoints and likely root causes are different; not enough evidence for a duplicate." }, { "left": "issue:43708", "right": "issue:44075", "accept": false, - "reason": "Trainer resume/max_steps accounting and SGD args not being used are unrelated." + "reason": "Trainer checkpoint resume batch-size math and SGD optimizer args usage are different training bugs." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The three accepted soft pairs are supported by near-identical or clearly matching titles, and the rejections generally avoid conflating similar model families with different failure modes. No obvious overstatement of evidence." + "feedback": "The summary is grounded in the packet and stays conservative about duplicate claims. The accepted soft pairs are well-supported by highly similar titles and failure modes, especially the 43644/44534 non-persistent-buffer regression. No pair is overstated, and the cluster-level characterization as mostly heterogeneous is reasonable." }, "evaluator_used": true, "retried": false, @@ -1702,7 +1606,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:36010", "issue:39404", @@ -1755,17 +1659,17 @@ ], "soft_pairs": [ "issue:43854|issue:43931", - "issue:41720|issue:42915", "issue:43856|issue:43873", - "issue:45362|issue:45406", "issue:42491|issue:45276", + "issue:45362|issue:45406", + "issue:41720|issue:42915", "issue:39692|issue:43994", - "issue:44488|issue:44991", "issue:43257|issue:43854", - "issue:44661|issue:44683", "issue:43856|issue:44387", - "issue:42915|issue:44756", + "issue:44488|issue:44991", "issue:43644|issue:43645", + "issue:44661|issue:44683", + "issue:42915|issue:44756", "issue:42491|issue:43931", "issue:42491|issue:43856", "issue:43404|issue:43454", @@ -1779,297 +1683,303 @@ "issue:44387|issue:44756", "issue:43425|issue:43957", "issue:44292|issue:44912", - "issue:44291|issue:44756", "issue:42947|issue:43381", "issue:43854|issue:45216", + "issue:44291|issue:44756", "issue:45276|issue:45292", "issue:44743|issue:44960", "issue:43381|issue:44928", "issue:43957|issue:44756", "issue:44164|issue:44756", "issue:43646|issue:43708", - "issue:44479|issue:44877", "issue:45292|issue:45335", + "issue:44479|issue:44877", "issue:44402|issue:44451", "issue:43645|issue:44164", "issue:43257|issue:44863", "issue:43425|issue:43828", "issue:43828|issue:43957" ], - "prepared_review_unit_hash": "9694aa72093ad27d0e65bb721d14912864be05bc4efdfb297ebc56e5457ba5c6", + "prepared_review_unit_hash": "b911ae06f8b97c8d9e384f2c02aca318c3a9d47f25989add48262015a6eb3b9c", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 41, - "serialized_chars": 39805, - "estimated_input_tokens": 9952, - "estimated_eval_tokens": 20160 + "serialized_chars": 39810, + "estimated_input_tokens": 9953, + "estimated_eval_tokens": 20162 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly heterogeneous, with two clear duplicate subclusters: a resize_token_embeddings/output_embeddings regression set and an lm_head weight-tying regression set. The remaining pairs look like separate model-, runtime-, or version-specific bugs and should stay split.", + "summary": "Most items are unrelated, but there is a small duplicate set around `resize_token_embeddings` not updating model output/decoder embeddings. The generic open issue is the best representative; no PRs are present.", "confidence": 0.82, - "canonical_issue_reason": "Issue 45292 is the broadest description of the resize_token_embeddings regression and can serve as the canonical anchor for the 45276/45335 duplicate subcluster.", + "canonical_issue_reason": "`issue:45292` is the broadest and most general report of the shared bug: `resize_token_embeddings` not affecting output embeddings. It cleanly covers the model-specific reports in the accepted soft pairs and is still open.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45292 is the most generic and reusable report among the clear duplicates, with the widest scope across model variants.", + "best_issue_reason": "`issue:45292` is the best cluster representative because it states the underlying behavior generically, rather than for one specific model family, and matches the duplicate resize-token-embedding reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43854", "right": "issue:43931", "accept": false, - "reason": "Different models and failure modes; not the same bug." + "reason": "Both are model-loading failures, but they involve different models and different failure modes (`GLM-4.7-Flash` unit-test loading vs `Qwen3-VL-30B` weight-shape mismatch)." }, { - "left": "issue:41720", - "right": "issue:42915", + "left": "issue:43856", + "right": "issue:43873", "accept": false, - "reason": "Both are Qwen-family failures, but with different triggers and code paths." + "reason": "Both mention memory/quantization, but one is about Qwen3 MoE training memory usage and the other about offloading behavior with quantization; not the same concrete bug." }, { - "left": "issue:43856", - "right": "issue:43873", + "left": "issue:42491", + "right": "issue:45276", "accept": false, - "reason": "Different memory/offload problems; no concrete shared bug." + "reason": "Different problems: a Qwen3 MoE LoRA compatibility break vs a `resize_token_embeddings` embedding-update bug." }, { "left": "issue:45362", "right": "issue:45406", "accept": false, - "reason": "Unrelated models and errors; one is chat, the other is processor loading." + "reason": "Different regressions in different entry points: `transformers chat` crash for Qwen3.5-35B vs `serve` crash because `Gemma4Processor` lacks `_tokenizer`." }, { - "left": "issue:42491", - "right": "issue:45276", + "left": "issue:41720", + "right": "issue:42915", "accept": false, - "reason": "LoRA checkpoint compatibility issue vs resize_token_embeddings regression." + "reason": "Both concern Qwen3/Qwen3-MoE, but one is a CUDA assert during auto device mapping and the other is a FineGrainedFP8Config failure; different code paths." }, { "left": "issue:39692", "right": "issue:43994", "accept": false, - "reason": "Both involve SigLIP2, but one is a docs/example issue and the other is runtime output corruption." - }, - { - "left": "issue:44488", - "right": "issue:44991", - "accept": false, - "reason": "Separate tokenizer-loading failures for different models." + "reason": "Both involve SigLIP2-style usage, but one is a documentation example with model/processor and quantization errors, while the other is nonsensical outputs in AutoModel/pipeline usage." }, { "left": "issue:43257", "right": "issue:43854", "accept": false, - "reason": "Different model-loading problems; no shared concrete code-path bug." - }, - { - "left": "issue:44661", - "right": "issue:44683", - "accept": false, - "reason": "Unrelated areas: tokenizer mapping vs compiled attention on torch 2.9+." + "reason": "Different loading failures: Qwen3 MoE weights not converted under accelerate+deepspeed vs a unit-test load failure for GLM-4.7-Flash." }, { "left": "issue:43856", "right": "issue:44387", "accept": false, - "reason": "Both mention memory, but the underlying bugs are different." + "reason": "One is Qwen3 MoE training memory usage; the other is increased CUDA reserved memory under int4 quantization. Same theme, but not the same bug." }, { - "left": "issue:42915", - "right": "issue:44756", + "left": "issue:44488", + "right": "issue:44991", "accept": false, - "reason": "Different subsystems: FP8 config failure vs mmap/OOM on Strix Halo." + "reason": "Both are tokenizer/model loading issues, but they concern different models and different failure causes." }, { "left": "issue:43644", "right": "issue:43645", "accept": false, - "reason": "Both are Transformers 5 regressions, but not the same failure." + "reason": "One is about non-persistent buffers being junk-filled; the other is a notebook-specific custom-model initialization regression." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Different features: `add-new-model-like` failing in tokenizer mapping vs compiled flex_attention failing on newer torch." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "Different bug classes: Qwen3 MoE FP8 loading failure vs disabling mmap on Strix Halo to avoid OOM." }, { "left": "issue:42491", "right": "issue:43931", "accept": false, - "reason": "Different models and symptoms; not a duplicate." + "reason": "Different models and different failure surfaces: LoRA incompatibility for Qwen3 MoE vs Qwen3-VL weight-shape mismatch." }, { "left": "issue:42491", "right": "issue:43856", "accept": false, - "reason": "LoRA checkpoint compatibility is unrelated to Qwen3 MoE memory use." + "reason": "Different Qwen3 MoE reports, but one is LoRA compatibility and the other is training memory usage; not the same concrete bug." }, { "left": "issue:43404", "right": "issue:43454", - "accept": true, - "reason": "Same underlying bug: lm_head weights are not tied, causing garbage generation in two model variants." + "accept": false, + "reason": "Both mention lm_head tying in multimodal models, but they are separate model implementations (Mistral3 vs AyaVision) with different reports." }, { "left": "issue:45276", "right": "issue:45335", "accept": true, - "reason": "Same resize_token_embeddings propagation bug affecting different model variants." + "reason": "Both report the same underlying bug: `resize_token_embeddings` does not update model-specific output/decoder embeddings. The model families differ, but the concrete failure is the same." }, { "left": "issue:36010", "right": "issue:39404", "accept": false, - "reason": "ImportError for GenerationMixin vs Whisper pipeline return_language regression." + "reason": "An import error for `GenerationMixin` is unrelated to Whisper pipeline `return_language` behavior." }, { "left": "issue:44164", "right": "issue:44291", "accept": false, - "reason": "Different save/load failures with no shared root cause." + "reason": "Both are loading/saving related, but `extra_state` handling and `init_empty_weights` argument errors are different code paths." }, { "left": "issue:41720", "right": "issue:44155", "accept": false, - "reason": "Qwen device-mapping crash vs AudioFlamingo3 track leakage; unrelated." + "reason": "CUDA assert on Qwen3 auto device mapping is unrelated to AudioFlamingo3 batched inference token/embedding leakage." }, { "left": "issue:44402", "right": "issue:44488", "accept": false, - "reason": "Tokenizer vocab mismatch vs tokenizer/model loading failure on a different model." + "reason": "Tokenizer vocab-size mismatch for one model is not the same as the broader model-loading failure in the other issue." }, { "left": "issue:43873", "right": "issue:44387", "accept": false, - "reason": "Quantization/offload issue vs increased reserved memory OOM; too different." + "reason": "Both are memory-related, but offloading-with-quantization and int4 reserved-memory inflation are different defects." }, { "left": "issue:39692", "right": "issue:44960", "accept": false, - "reason": "SigLIP2 docs/runtime bug vs unrelated GLM5 issue." + "reason": "SigLIP2 doc/example issues are unrelated to the generic GLM5 issue." }, { "left": "issue:44387", "right": "issue:44756", "accept": false, - "reason": "Different memory-related bugs in different loading paths." + "reason": "Int4 quantization OOM and disabling mmap on Strix Halo are different memory-management problems." }, { "left": "issue:43425", "right": "issue:43957", "accept": false, - "reason": "Torch version incompatibility vs meta-device loading regression." + "reason": "Torch 2.10 incompatibility is unrelated to meta-device model-loading breakage." }, { "left": "issue:44292", "right": "issue:44912", "accept": false, - "reason": "Different quantization formats and different load failures." + "reason": "Both are quantization/model-loading issues, but they affect different models and formats (NVFP4 vs MXFP4) with different symptoms." }, { "left": "issue:42947", "right": "issue:43381", "accept": false, - "reason": "Different gradient-checkpointing issues: inefficiency vs eval-mode restriction." + "reason": "Both mention gradient checkpointing, but one is ineffective under PEFT LoRA and the other says it cannot be used in eval mode." }, { "left": "issue:43854", "right": "issue:45216", "accept": false, - "reason": "Different Qwen/Gemma regressions with different symptoms." + "reason": "Different regressions: GLM-4.7-Flash loading in tests vs Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "`init_empty_weights` argument handling and Strix Halo mmap OOM are unrelated loading/memory issues." }, { "left": "issue:45276", "right": "issue:45292", "accept": true, - "reason": "Same resize_token_embeddings bug; 45292 is the generic form of the same failure." + "reason": "Same bug family: `resize_token_embeddings` fails to propagate to output/decoder embeddings. `issue:45292` is the generic version of the same defect." }, { "left": "issue:44743", "right": "issue:44960", "accept": false, - "reason": "Unrelated recurrent-state bug vs GLM5 issue." + "reason": "A recurrent-state/cache bug in modular_qwen3_5.py is unrelated to the generic GLM5 issue." }, { "left": "issue:43381", "right": "issue:44928", "accept": false, - "reason": "Different gradient-checkpointing and RLHF instability problems." + "reason": "Eval-mode gradient checkpointing and RLHF NaN/position_ids issues are different failure modes." }, { "left": "issue:43957", "right": "issue:44756", "accept": false, - "reason": "Meta-loading failure vs mmap/OOM workaround request; unrelated." + "reason": "Meta-device loading breakage is unrelated to the mmap/OOM workaround issue." }, { "left": "issue:44164", "right": "issue:44756", "accept": false, - "reason": "extra_state save/load bug vs mmap/OOM issue." + "reason": "`extra_state` save/load handling is unrelated to Strix Halo mmap OOM." }, { "left": "issue:43646", "right": "issue:43708", "accept": false, - "reason": "Custom model initialization regression vs Trainer resume_from_checkpoint step calculation." - }, - { - "left": "issue:44479", - "right": "issue:44877", - "accept": false, - "reason": "Video-input regression vs strict config loading failure." + "reason": "Custom model initialization regressions and `resume_from_checkpoint` max_steps miscalculation are different trainer/model lifecycle bugs." }, { "left": "issue:45292", "right": "issue:45335", "accept": true, - "reason": "Same resize_token_embeddings regression across model families." + "reason": "Both describe the same `resize_token_embeddings` regression; `issue:45292` is the generic umbrella report and `issue:45335` is a model-specific instance." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Different model families and different problems: video-input regression for Qwen models vs strict config blocking `granite_speech`." }, { "left": "issue:44402", "right": "issue:44451", "accept": false, - "reason": "Different tokenizer/model loading issues." + "reason": "A tokenizer vocab-size mismatch is not the same as the broader inability to load `vesteinn/ScandiBERT`." }, { "left": "issue:43645", "right": "issue:44164", "accept": false, - "reason": "Notebook custom-model init vs save/from_pretrained extra_state handling." + "reason": "Notebook/custom-model initialization problems are unrelated to `extra_state` save/load failures." }, { "left": "issue:43257", "right": "issue:44863", "accept": false, - "reason": "Different Qwen3 MoE loading problems and likely different code paths." + "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to NemotronH checkpoint loading." }, { "left": "issue:43425", "right": "issue:43828", "accept": false, - "reason": "Torch compatibility issue vs autocast dtype mismatch; unrelated." + "reason": "Torch version incompatibility is unrelated to the Phi-tiny-MoE autocast dtype mismatch." }, { "left": "issue:43828", "right": "issue:43957", "accept": false, - "reason": "Different model/runtime failures with no shared concrete bug." + "reason": "Autocast dtype mismatch and meta-device loading failures are different code paths and symptoms." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is well grounded in the packet: the two accepted duplicate subclusters are supported by the issue titles (resize_token_embeddings/output_embeddings and lm_head weight-tying), and the remaining soft-edge rejections stay conservative given the different models, triggers, or subsystems. The choice of issue:45292 as the canonical anchor is reasonable because it is the broadest resize_token_embeddings report among the related items." + "feedback": "The summary is grounded in the packet: the only accepted soft edges are the three `resize_token_embeddings` pairs, and `issue:45292` is indeed the broadest open representative of that cluster. The rejections are conservative and do not overclaim equivalence across clearly different bug classes. No PRs are present in the packet." }, "evaluator_used": true, "retried": false, @@ -2088,7 +1998,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:32090", "issue:34567", @@ -2143,294 +2053,284 @@ "issue:44589|issue:44756", "issue:43906|issue:44530", "issue:43931|issue:44863", - "issue:39404|issue:44912", "issue:43611|issue:43645", + "issue:39404|issue:44912", "issue:43425|issue:43716", "issue:44530|issue:45084", "issue:44265|issue:44898", "issue:43299|issue:43931", "issue:43901|issue:44509", "issue:44479|issue:44805", - "issue:43596|issue:44683", "issue:43994|issue:44079", - "issue:44589|issue:45084", - "issue:44671|issue:44991", "issue:43856|issue:44756", + "issue:44671|issue:44991", "issue:40990|issue:42947", - "issue:43425|issue:44918", "issue:39404|issue:44671", + "issue:43596|issue:44683", + "issue:44589|issue:45084", + "issue:43425|issue:44918", "issue:36032|issue:43452", - "issue:44991|issue:45081", - "issue:32090|issue:39404", "issue:43582|issue:43632", + "issue:32090|issue:39404", + "issue:44991|issue:45081", "issue:43957|issue:44164", - "issue:44488|issue:45081", "issue:44164|issue:44479", "issue:40990|issue:43421", + "issue:44488|issue:45081", "issue:43425|issue:45161", "issue:43883|issue:44488", "issue:43611|issue:43644", - "issue:44488|issue:44987", "issue:44206|issue:44220", "issue:42947|issue:44756", - "issue:43716|issue:45084", + "issue:44488|issue:44987", "issue:34567|issue:40444", - "issue:43883|issue:44991" + "issue:43716|issue:45084" ], - "prepared_review_unit_hash": "d8c846cdf09e707c830f204a83a5cb94c3483a3964216054680ab3b175c9a2fe", + "prepared_review_unit_hash": "bbfd16582678dfdae27099a970c828b67b60ec8491bd8d6a4a68c5fd42e92d7d", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 36, - "serialized_chars": 38483, - "estimated_input_tokens": 9621, - "estimated_eval_tokens": 19498 + "soft_pair_count": 35, + "serialized_chars": 38185, + "estimated_input_tokens": 9547, + "estimated_eval_tokens": 19350 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a set of unrelated issue reports; only the audio-feature pair looks like a plausible duplicate. The rest span different models, subsystems, and failure modes, so I rejected them conservatively.", - "confidence": 0.34, - "canonical_issue_reason": "If forced to pick one issue anchor, 43611 is the best standalone canonical because it is a broad, concrete Transformers v5 regression report and the clearest general triage target among otherwise unrelated issues.", + "summary": "This set is mostly a collection of distinct regression reports across unrelated subsystems. I do not see any pair that is safe to merge as the same concrete bug; the closest audio-fbank pair is still too underspecified to treat as a duplicate with confidence.", + "confidence": 0.52, + "canonical_issue_reason": "Issue 43611 is the best representative anchor because it is a clear, concrete v5 model-loading regression and is broader than the very model-specific reports around it.", "canonical_pr_reason": null, - "best_issue_reason": "43611 is the strongest issue candidate overall: broad regression wording, clear failure mode, and likely to attract related reports.", + "best_issue_reason": "43611 is the strongest standalone issue for triage: specific enough to be actionable, but broad enough to represent the larger v5-loading breakage theme in this set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44589", "right": "issue:44756", "accept": false, - "reason": "Different problems: a Float8 storage/type error versus an Apple Silicon mmap/OOM workaround." + "reason": "Different failures: float8 storage lookup vs mmap/OOM mitigation on Apple Silicon." }, { "left": "issue:43906", "right": "issue:44530", "accept": false, - "reason": "Unrelated code paths: an isolated reproduction of another issue versus a Qwen3.5 PagedAttentionCache crash." + "reason": "Unrelated domains: isolated reproduction of another issue vs Qwen3.5 PagedAttentionCache crash." }, { "left": "issue:43931", "right": "issue:44863", "accept": false, - "reason": "Both are loading failures, but for different models and different symptoms (shape mismatch vs unsupported NemotronH checkpoint loading)." + "reason": "Both are load failures, but for different model families and different symptoms (shape mismatch vs checkpoint loading support)." }, { - "left": "issue:39404", - "right": "issue:44912", + "left": "issue:43611", + "right": "issue:43645", "accept": false, - "reason": "Whisper pipeline language-return regression is unrelated to GPT-OSS MXFP4 quantization fallback." + "reason": "Both involve v5 model loading, but one is base_model_prefix handling and the other is custom-model initialization in notebooks; not the same bug." }, { - "left": "issue:43611", - "right": "issue:43645", + "left": "issue:39404", + "right": "issue:44912", "accept": false, - "reason": "Both mention v5 breakage, but one is base_model_prefix loading and the other is custom-model notebook initialization; not the same bug." + "reason": "Whisper pipeline return_language regression is unrelated to MXFP4 quantization fallback." }, { "left": "issue:43425", "right": "issue:43716", "accept": false, - "reason": "Torch-version incompatibility is not the same as a Mistral-3 image preprocessor dtype mismatch." + "reason": "Torch-version compatibility and image preprocessor dtype mismatch are unrelated issues." }, { "left": "issue:44530", "right": "issue:45084", "accept": false, - "reason": "Different subsystems: Qwen3.5 cache group-type crash versus tokenizer template compilation error." + "reason": "Different code paths: Qwen3.5 cache/group-type failure vs template compilation error." }, { "left": "issue:44265", "right": "issue:44898", "accept": false, - "reason": "torch.export/torch_compilable_check failure is unrelated to Perceiver image classification interpolation handling." + "reason": "torch.export/torch_compilable_check failure is unrelated to Perceiver interpolation on non-default image sizes." }, { "left": "issue:43299", "right": "issue:43931", "accept": false, - "reason": "Both are Qwen3-VL-related, but one is a v5 loading regression and the other is a checkpoint shape mismatch; not clearly the same defect." + "reason": "Both are Qwen3/VL loading complaints, but one is MoE model loading and the other is checkpoint weight-shape mismatch; not the same concrete bug." }, { "left": "issue:43901", "right": "issue:44509", "accept": false, - "reason": "Both are docs-related, but they cover different pipeline API removals and do not look like the same issue." + "reason": "Different documentation regressions: return_all_scores vs removed pipeline task docs." }, { "left": "issue:44479", "right": "issue:44805", "accept": false, - "reason": "A video-input regression for specific VL models is not the same as a generic mask/tensor shape IndexError." - }, - { - "left": "issue:43596", - "right": "issue:44683", - "accept": false, - "reason": "DeepSpeed ZeRO3/BertModel indexing failure is unrelated to compiled flex_attention on torch>=2.9." + "reason": "Video-input regression for Qwen models is unrelated to a masking shape mismatch error." }, { "left": "issue:43994", "right": "issue:44079", "accept": false, - "reason": "Possible overlap in output handling, but one is a Siglip2 bad-results report and the other is a ModelOutput key-assignment bug; too indirect to merge." + "reason": "One is a model/pipeline quality regression, the other is a ModelOutput key-assignment bug; too different." }, { - "left": "issue:44589", - "right": "issue:45084", + "left": "issue:43856", + "right": "issue:44756", "accept": false, - "reason": "Float8 storage lookup error and template-node compilation error are unrelated." + "reason": "Qwen3 MoE training memory usage and Strix Halo mmap/OOM are different problems." }, { "left": "issue:44671", "right": "issue:44991", "accept": false, - "reason": "CamemBERT masked-LM prediction regression and EST-Roberta tokenizer loading failure are different bugs." + "reason": "CamemBERT masked-LM prediction regression is unrelated to est-roberta tokenizer loading." }, { - "left": "issue:43856", - "right": "issue:44756", + "left": "issue:40990", + "right": "issue:42947", "accept": false, - "reason": "Qwen3 MoE training memory inefficiency is not the same as Strix Halo mmap OOM avoidance." + "reason": "High perplexity on a model is unrelated to LoRA gradient-checkpointing ineffectiveness." }, { - "left": "issue:40990", - "right": "issue:42947", + "left": "issue:39404", + "right": "issue:44671", "accept": false, - "reason": "High perplexity on GPT-OSS and gradient-checkpointing ineffectiveness with LoRA are different training/evaluation problems." + "reason": "Whisper return_language and CamemBERT masked-LM prediction issues do not share the same bug." }, { - "left": "issue:43425", - "right": "issue:44918", + "left": "issue:43596", + "right": "issue:44683", "accept": false, - "reason": "Torch version compatibility is unrelated to Qwen3.5 embedding unpacking in TRL SFTTrainer." + "reason": "DeepSpeed ZeRO-3 init index error is unrelated to compiled flex_attention on newer torch." }, { - "left": "issue:39404", - "right": "issue:44671", + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "Float8 storage lookup failure and template compilation error are unrelated." + }, + { + "left": "issue:43425", + "right": "issue:44918", "accept": false, - "reason": "Whisper return_language pipeline regression is unrelated to CamemBERT masked-LM prediction errors." + "reason": "Torch 2.10 compatibility and Qwen3.5 embedding unpacking with TRL are different regressions." }, { "left": "issue:36032", "right": "issue:43452", "accept": false, - "reason": "Tokenizer special-token attribute conflict and gguf_file loading breakage are different tokenizer/model-loading bugs." + "reason": "Tokenizer special-tokens API conflict is unrelated to gguf_file loading breakage." }, { - "left": "issue:44991", - "right": "issue:45081", + "left": "issue:43582", + "right": "issue:43632", "accept": false, - "reason": "Different tokenizer-loading failures: EST-Roberta incompatible tokenizer loading versus Mistral regex patch crashing on backend_tokenizer access." + "reason": "Apple Silicon warmup TypeError and _is_hf_initialized flag breakage are different code paths." }, { "left": "issue:32090", "right": "issue:39404", "accept": false, - "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper pipeline language output." + "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper pipeline return_language behavior." }, { - "left": "issue:43582", - "right": "issue:43632", + "left": "issue:44991", + "right": "issue:45081", "accept": false, - "reason": "Apple Silicon caching allocator warmup TypeError is unrelated to the _is_hf_initialized flag regression." + "reason": "Both affect tokenizer loading, but they hit different models and different failure modes." }, { "left": "issue:43957", "right": "issue:44164", "accept": false, - "reason": "Meta-device model loading and save/from_pretrained extra_state handling are distinct failures." - }, - { - "left": "issue:44488", - "right": "issue:45081", - "accept": false, - "reason": "Generic model-loading failure for cjvt/sleng-bert and Mistral tokenizer regex patch crash do not look like the same issue." + "reason": "Meta-device loading regression is not the same as extra_state save/from_pretrained handling." }, { "left": "issue:44164", "right": "issue:44479", "accept": false, - "reason": "extra_state save/load handling and v5.3.0 video-input regression are unrelated." + "reason": "Save/load extra_state handling is unrelated to video-input regression." }, { "left": "issue:40990", "right": "issue:43421", "accept": false, - "reason": "GPT-OSS perplexity issue and runtime special-token post-processor update bug are different concerns." + "reason": "Perplexity issue and runtime special-token post-processor update bug are unrelated." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Different tokenizer-loading failures for different checkpoints." }, { "left": "issue:43425", "right": "issue:45161", "accept": false, - "reason": "Torch compatibility is not the same as GPT-OSS MoE tensor-parallel loading behavior." + "reason": "Torch compatibility does not match the GPT-OSS tensor-parallel loading problem." }, { "left": "issue:43883", "right": "issue:44488", "accept": false, - "reason": "Missing all_tied_weights_keys on MolmoForCausalLM is not the same as a specific tokenizer/model loading failure." + "reason": "Missing tied-weights metadata and sleng-bert tokenizer loading are unrelated." }, { "left": "issue:43611", "right": "issue:43644", "accept": false, - "reason": "Both are Transformers v5 regressions, but one is about base_model_prefix loading and the other about non-persistent buffer initialization; too different to merge." - }, - { - "left": "issue:44488", - "right": "issue:44987", - "accept": false, - "reason": "Different model-loading failures for different checkpoints and versions; no clear shared bug." + "reason": "Both are v5 loading regressions, but one is base_model_prefix and the other is non-persistent buffer initialization; not the same bug." }, { "left": "issue:44206", "right": "issue:44220", - "accept": true, - "reason": "Both point to the same audio feature-extraction path around _torch_extract_fbank_features; one names the unsupported center argument and the other is the function-level failure report." + "accept": false, + "reason": "Both mention fbank/audio extraction, but the evidence is too thin to say they are the same concrete regression." }, { "left": "issue:42947", "right": "issue:44756", "accept": false, - "reason": "LoRA/gradient checkpointing inefficiency is unrelated to Strix Halo mmap OOM mitigation." + "reason": "LoRA gradient checkpointing and Strix Halo mmap/OOM are unrelated." }, { - "left": "issue:43716", - "right": "issue:45084", + "left": "issue:44488", + "right": "issue:44987", "accept": false, - "reason": "Image preprocessor dtype mismatch and tokenizer template compilation error are unrelated." + "reason": "Different model-loading failures with no shared concrete code-path bug." }, { "left": "issue:34567", "right": "issue:40444", "accept": false, - "reason": "TrainerState token-count tracking and Qwen2.5-VL iterable-dataset multi-image failure are unrelated." + "reason": "TrainerState token counting and multi-image iterable dataset finetuning are unrelated." }, { - "left": "issue:43883", - "right": "issue:44991", + "left": "issue:43716", + "right": "issue:45084", "accept": false, - "reason": "Molmo tied-weights attribute error and EST-Roberta tokenizer loading failure are different model-specific problems." + "reason": "Image-preprocessor dtype mismatch and non-template-node compilation error are unrelated." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The one accepted soft pair (44206/44220) is plausibly the same audio feature-extraction bug, while the rest are reasonably rejected as different models, subsystems, or failure modes. The issue-anchor choice is also defensible from the titles alone." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -2445,7 +2345,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:36032", "issue:39404", @@ -2470,11 +2370,11 @@ "issue:43646", "issue:43819", "issue:43825", + "issue:43883", "issue:43994", "issue:44062", "issue:44220", "issue:44265", - "issue:44291", "issue:44403", "issue:44451", "issue:44466", @@ -2497,281 +2397,275 @@ ], "soft_pairs": [ "issue:43644|issue:43646", + "issue:43883|issue:44991", "issue:43643|issue:44530", + "issue:43452|issue:43482", "issue:44451|issue:44987", - "issue:43541|issue:45084", - "issue:43317|issue:43482", "issue:36032|issue:42915", - "issue:43452|issue:43482", + "issue:43317|issue:43482", + "issue:43541|issue:45084", "issue:43262|issue:43377", - "issue:42915|issue:44530", "issue:39404|issue:43825", + "issue:42915|issue:44530", "issue:44849|issue:45325", - "issue:45084|issue:45507", - "issue:36032|issue:43645", "issue:43404|issue:44466", + "issue:36032|issue:43645", "issue:44987|issue:44991", "issue:43646|issue:44534", + "issue:45084|issue:45507", "issue:43994|issue:44610", "issue:43377|issue:43819", "issue:43645|issue:44534", - "issue:44403|issue:45081", "issue:43322|issue:43540", - "issue:44451|issue:45081", "issue:45216|issue:45357", + "issue:44403|issue:45081", + "issue:44451|issue:45081", + "issue:43632|issue:44756", "issue:44220|issue:44683", "issue:44265|issue:44610", - "issue:43632|issue:44756", - "issue:42915|issue:45084", "issue:41553|issue:43582", - "issue:44987|issue:45081", "issue:43404|issue:45127", + "issue:42915|issue:45084", + "issue:44987|issue:45081", "issue:43296|issue:43531", - "issue:44062|issue:44987", "issue:44479|issue:45325", - "issue:42915|issue:44291" + "issue:44062|issue:44987" ], - "prepared_review_unit_hash": "bb80ec6f68a65719b93adc4c505c7895e0acfdd9aff0b43b558081fe49988893", + "prepared_review_unit_hash": "1a8adf42b51a7510d31f03d60bea4b54a5fada270f229535353ca0fe27fdcaa5", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 34, - "serialized_chars": 37381, - "estimated_input_tokens": 9346, - "estimated_eval_tokens": 18948 + "serialized_chars": 37326, + "estimated_input_tokens": 9332, + "estimated_eval_tokens": 18920 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a loose cluster of Transformers v5/v5.x regression reports. A few pairs look like true duplicates, but most are only broadly related by subsystem or model family. I\u2019ve tightened the tokenizer-loading call: 44987/44991 is related, but not conservative enough to mark duplicate from the packet evidence alone.", - "confidence": 0.71, - "canonical_issue_reason": "issue:43646 is still the best umbrella representative: it\u2019s a broad, user-facing v5 regression around custom model initialization, making it a reasonable anchor for the mixed initialization/loading subgroup.", + "summary": "Most pairs are clearly different bugs and should stay separate. The only strong duplicate-looking pair is the Qwen3.5 `save_pretrained` regression pair.", + "confidence": 0.76, + "canonical_issue_reason": "issue:43644 is the clearest, most concrete regression in the set and best serves as the representative issue for the v5 buffer/state corruption theme.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43646 is the strongest single issue to keep as the cluster anchor because it is broad, concrete, and representative of the v5 regression flavor in this set, unlike the more model-specific reports.", + "best_issue_reason": "issue:43644 is the most actionable standalone issue here: it states a specific, user-visible regression cleanly and is broad enough to represent the cluster better than the more model-specific follow-ups.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43644", "right": "issue:43646", "accept": false, - "reason": "Related v5 initialization regressions, but the symptoms differ: non-persistent buffer junk vs custom model initialization." + "reason": "Different failures: non-persistent buffer corruption vs custom model initialization breakage." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated bugs: missing `all_tied_weights_keys` on one model vs tokenizer loading failure for a different model." }, { "left": "issue:43643", "right": "issue:44530", "accept": false, - "reason": "Different code paths and failures: missing AutoConfig fields with trust_remote_code versus a PagedAttentionCache/linear_attention crash." + "reason": "Different code paths: `AutoConfig.from_pretrained` field loss vs PagedAttentionCache crashing on `linear_attention`." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": false, + "reason": "Both involve loading, but one is a generic `gguf_file` API break while the other is a specific Qwen2.5-GGUF load failure; not clearly the same concrete bug." }, { "left": "issue:44451", "right": "issue:44987", "accept": false, - "reason": "Both are tokenizer-loading failures, but they target different models and the packet does not show a shared root cause." + "reason": "Different models and likely different loading regressions; not the same underlying issue." }, { - "left": "issue:43541", - "right": "issue:45084", + "left": "issue:36032", + "right": "issue:42915", "accept": false, - "reason": "Unrelated failures: grouped_mm tracing in Mixtral versus a non-template-node compilation TypeError." + "reason": "Tokenizer `add_special_tokens` conflict is unrelated to Qwen3Moe FP8 config failure." }, { "left": "issue:43317", "right": "issue:43482", "accept": false, - "reason": "Both involve loading, but one is dequantized device-map offload and the other is a Qwen2.5-GGUF/v5 load failure." - }, - { - "left": "issue:36032", - "right": "issue:42915", - "accept": false, - "reason": "Tokenizer add_special_tokens conflict versus Qwen3Moe FineGrainedFP8Config failure; clearly different bugs." + "reason": "Dequantized offload/device-map failure is a different path from GGUF loading failure." }, { - "left": "issue:43452", - "right": "issue:43482", + "left": "issue:43541", + "right": "issue:45084", "accept": false, - "reason": "Both mention GGUF/loading, but the failures are different concrete bugs and the packet gives no shared root cause." + "reason": "Both mention tracing/compilation, but they are different errors in different code paths and models." }, { "left": "issue:43262", "right": "issue:43377", "accept": false, - "reason": "Different components and behavior: audio chat-template sampling-rate default vs MIMI encoder padding-mask inconsistency." + "reason": "Audio chat-template sampling-rate default bug is unrelated to MIMI batched-vs-single padding-mask behavior." }, { - "left": "issue:42915", - "right": "issue:44530", + "left": "issue:39404", + "right": "issue:43825", "accept": false, - "reason": "Different Qwen-family failures with different mechanisms: FP8 config handling versus invalid group type in PagedAttentionCache." + "reason": "One is a pipeline `return_language` regression; the other is an incorrect error message about translation support." }, { - "left": "issue:39404", - "right": "issue:43825", + "left": "issue:42915", + "right": "issue:44530", "accept": false, - "reason": "Both are pipeline-related, but one is Whisper return_language behavior and the other is an error-message regression." + "reason": "Different bugs: FineGrainedFP8Config failure vs PagedAttentionCache crash on `linear_attention`." }, { "left": "issue:44849", "right": "issue:45325", "accept": false, - "reason": "Different Qwen model families and different bugs: output_hidden_states behavior versus still-image rope-index scaling." + "reason": "Different model behaviors: `output_hidden_states=True` regression vs rope-index scaling for still images." }, { - "left": "issue:45084", - "right": "issue:45507", + "left": "issue:43404", + "right": "issue:44466", "accept": false, - "reason": "Distinct failures: template compilation TypeError versus GraniteMoEHybrid calling an invalid method." + "reason": "Untied `lm_head` on one model is not the same as device-dependent serialization of tied weights." }, { "left": "issue:36032", "right": "issue:43645", "accept": false, - "reason": "Tokenizer special-token conflicts are unrelated to notebook-based custom model initialization failures." - }, - { - "left": "issue:43404", - "right": "issue:44466", - "accept": false, - "reason": "Related to tied weights, but one is a specific Mistral3 tie bug and the other is inconsistent serialization by device." + "reason": "Tokenizer special-token conflict is unrelated to custom model initialization breaking in notebooks." }, { "left": "issue:44987", "right": "issue:44991", "accept": false, - "reason": "The packet only shows broad tokenizer-loading similarity across different models (physical-intelligence/fast vs EMBEDDIA/est-roberta), not enough evidence of the same underlying bug." + "reason": "Different failures on different artifacts: model loading regression vs tokenizer loading regression." }, { "left": "issue:43646", "right": "issue:44534", - "accept": true, - "reason": "These describe the same v5 regression about non-persistent buffers being filled with junk, with slightly different wording/version framing." + "accept": false, + "reason": "Custom model initialization breakage is unrelated to non-persistent buffers being filled with junk." + }, + { + "left": "issue:45084", + "right": "issue:45507", + "accept": false, + "reason": "Different errors: template compilation failure vs GraniteMoEHybrid calling an invalid method." }, { "left": "issue:43994", "right": "issue:44610", "accept": false, - "reason": "Different problems: nonsensical SigLIP2 outputs versus an OmDet-Turbo processor/model size mismatch." + "reason": "Nonsensical model outputs are not the same as a processor/model input-size mismatch." }, { "left": "issue:43377", "right": "issue:43819", "accept": false, - "reason": "Different audio/codec bugs: missing padding-mask support in MIMI versus DAC.from_latents not matching forward due to missing STE." + "reason": "MIMI padding-mask inconsistency is unrelated to DAC `from_latents` mismatch without STE." }, { "left": "issue:43645", "right": "issue:44534", "accept": false, - "reason": "Custom model initialization in notebooks is a different bug from the non-persistent buffer initialization regression." - }, - { - "left": "issue:44403", - "right": "issue:45081", - "accept": false, - "reason": "One is vague loading noise/logging; the other is a concrete Mistral regex patch crash from missing backend_tokenizer." + "reason": "Custom model initialization in notebooks is unrelated to non-persistent buffer junk serialization." }, { "left": "issue:43322", "right": "issue:43540", "accept": false, - "reason": "Both affect vision/video loading, but one is a Llava Next segmentation fault and the other is a Qwen3OmniMoe video ValueError." - }, - { - "left": "issue:44451", - "right": "issue:45081", - "accept": false, - "reason": "Different models and failure modes: ScandiBERT tokenizer load failure versus Mistral regex patch crash." + "reason": "Segmentation fault loading Llava Next is a different bug from Qwen3OmniMoe video-input validation." }, { "left": "issue:45216", "right": "issue:45357", "accept": true, - "reason": "Same Qwen3.5 save_pretrained regression, with the later issue reporting the continued incorrect visual encoder key serialization." + "reason": "Same underlying Qwen3.5 `save_pretrained` regression: both report incorrect saved checkpoints/visual encoder keys after v5 changes." }, { - "left": "issue:44220", - "right": "issue:44683", + "left": "issue:44403", + "right": "issue:45081", "accept": false, - "reason": "Different low-level failures: _torch_extract_fbank_features versus compiled flex_attention on torch>=2.9." + "reason": "Unnecessary load-time noise is not the same as a tokenizer crash in `_patch_mistral_regex`." }, { - "left": "issue:44265", - "right": "issue:44610", + "left": "issue:44451", + "right": "issue:45081", "accept": false, - "reason": "torch.export with torch_compilable_check is unrelated to the OmDet-Turbo processor/model input-size mismatch." + "reason": "Different model-loading failure vs Mistral tokenizer regex-patching crash." }, { "left": "issue:43632", "right": "issue:44756", "accept": false, - "reason": "One is a v5 internal flag regression; the other is mmap OOM avoidance on Strix Halo." + "reason": "Broken `_is_hf_initialized` handling is unrelated to disabling mmap on Strix Halo to avoid OOM." }, { - "left": "issue:42915", - "right": "issue:45084", + "left": "issue:44220", + "right": "issue:44683", "accept": false, - "reason": "Qwen3Moe FP8 config failure is unrelated to the template compilation TypeError." + "reason": "Feature extraction failure and compiled flex_attention failure are unrelated." }, { - "left": "issue:41553", - "right": "issue:43582", + "left": "issue:44265", + "right": "issue:44610", "accept": false, - "reason": "Bad AutoTokenizer error messaging for Voxtral versus an Apple Silicon TypeError in caching_allocator_warmup; unrelated." + "reason": "`torch.export`/`torch_compilable_check` failure is unrelated to OmDet-Turbo processor producing the wrong input size." }, { - "left": "issue:44987", - "right": "issue:45081", + "left": "issue:41553", + "right": "issue:43582", "accept": false, - "reason": "Tokenizer load failure for EMBEDDIA/est-roberta is unrelated to the Mistral regex patch crash." + "reason": "Bad error messaging for AutoTokenizer loading Voxtral is unrelated to the Apple Silicon `caching_allocator_warmup` TypeError." }, { "left": "issue:43404", "right": "issue:45127", "accept": false, - "reason": "Both involve tied weights, but one is a missing tie in Mistral3ForConditionalGeneration and the other is LoRA merge collapse with extended vocabulary." + "reason": "Untied `lm_head` on Mistral3 is not the same bug as LoRA merge collapse with extended vocabulary." }, { - "left": "issue:43296", - "right": "issue:43531", + "left": "issue:44987", + "right": "issue:45081", "accept": false, - "reason": "PaddleOCR-VL load failure in vLLM and a Qwen3-MoE sliding_window issue are distinct model/runtime bugs." + "reason": "Different models and different failures: loading `physical-intelligence/fast` vs Mistral tokenizer regex patch crash." }, { - "left": "issue:44062", - "right": "issue:44987", + "left": "issue:43296", + "right": "issue:43531", "accept": false, - "reason": "AddedToken special-argument conflict is a tokenizer construction bug, not the same as the est-roberta tokenizer loading regression." + "reason": "PaddleOCR-VL load failure and Qwen3-MoE sliding-window issue are separate regressions." }, { "left": "issue:44479", "right": "issue:45325", "accept": false, - "reason": "Both are Qwen visual/video regressions, but they describe different concrete bugs." + "reason": "Video-input regression in several Qwen models is unrelated to still-image temporal position scaling in Qwen2.5-VL." }, { - "left": "issue:42915", - "right": "issue:44291", + "left": "issue:44062", + "right": "issue:44987", "accept": false, - "reason": "FineGrainedFP8Config failure and unexpected _is_hf_initialized during init_empty_weights are different initialization paths and errors." + "reason": "`AddedToken(..., special=...)` constructor conflict is unrelated to the `physical-intelligence/fast` loading regression." } ] }, "evaluator_result": { - "accept": false, - "feedback": "Overall summary is reasonable, but the soft-edge verdict for issue:44987 vs issue:44991 is too assertive for the packet. The two issues are about different models (physical-intelligence/fast vs EMBEDDIA/est-roberta) and the packet does not provide evidence they share the same root cause, only that both are tokenizer-loading failures. That is broad relatedness, not a conservative duplicate call. Because of that overstatement, I can\u2019t accept the cluster as reviewed." + "accept": true, + "feedback": "The summary is grounded in the packet, and the soft-edge verdicts stay conservative overall. The only accepted duplicate-looking pair, issue:45216 vs issue:45357, is well supported by the titles. The other rejections mostly distinguish issues at the title/code-path level without overclaiming shared root cause." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -2787,7 +2681,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:33666", "issue:35707", @@ -2813,14 +2707,11 @@ "issue:43749", "issue:43792", "issue:43818", - "issue:43828", "issue:43856", - "issue:43881", "issue:44079", "issue:44265", "issue:44291", "issue:44403", - "issue:44488", "issue:44534", "issue:44568", "issue:44589", @@ -2836,271 +2727,298 @@ "issue:45357", "issue:45381", "issue:45479", - "issue:45491" + "issue:45491", + "issue:45538", + "issue:45542" ], "soft_pairs": [ + "issue:42175|issue:45542", "issue:40990|issue:44811", - "issue:36032|issue:43742", + "issue:42915|issue:44291", "issue:43611|issue:44534", "issue:43317|issue:43856", - "issue:42915|issue:44589", + "issue:36032|issue:43742", + "issue:43632|issue:44534", "issue:40444|issue:45325", + "issue:42915|issue:44589", "issue:43531|issue:44877", - "issue:43632|issue:44534", - "issue:44683|issue:45020", "issue:43421|issue:44568", - "issue:36032|issue:41720", "issue:43818|issue:45357", - "issue:43425|issue:44403", - "issue:43792|issue:44589", - "issue:44403|issue:44991", + "issue:36032|issue:41720", + "issue:44683|issue:45020", + "issue:43482|issue:45538", "issue:44829|issue:45479", "issue:33666|issue:40444", + "issue:43425|issue:44403", + "issue:44403|issue:44991", "issue:44079|issue:44534", - "issue:43377|issue:45491", + "issue:43792|issue:44589", "issue:35707|issue:40444", + "issue:43377|issue:45491", "issue:43482|issue:43525", "issue:44291|issue:44589", "issue:43749|issue:44079", "issue:43644|issue:44079", "issue:44079|issue:45072", - "issue:44265|issue:45072", "issue:43632|issue:45072", - "issue:43531|issue:44291", + "issue:44265|issue:45072", "issue:40444|issue:45381", - "issue:42175|issue:43296", - "issue:41720|issue:45081", "issue:45325|issue:45381", + "issue:43531|issue:44291", "issue:43611|issue:43646", - "issue:43881|issue:44488", - "issue:43828|issue:44683" + "issue:41720|issue:45081", + "issue:42175|issue:43296" ], - "prepared_review_unit_hash": "7233caba22fc3362737a614b6436a733106431443a0a9142dc670c82494a1452", + "prepared_review_unit_hash": "5037f19fbae2871559be5d330eb59fef7f30d894589e07d9c000d5c797577624", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 35, - "serialized_chars": 38147, - "estimated_input_tokens": 9537, - "estimated_eval_tokens": 19330 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 36, + "serialized_chars": 37918, + "estimated_input_tokens": 9480, + "estimated_eval_tokens": 19216 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a heterogeneous cluster of unrelated Transformers/model-loading regressions. The only clear duplicate-like subclusters are the Transformers v5 _is_hf_initialized/init_empty_weights reports and the Qwen2.5-VL position-id regression reports; most other soft edges should be rejected.", - "confidence": 0.77, - "canonical_issue_reason": "Among the near-duplicates, issue 43632 is the clearest canonical for the _is_hf_initialized/init_empty_weights regression; however, the overall cluster is too mixed for one true canonical issue to cover everything.", + "summary": "Mostly a mixed cluster of unrelated bugs, with one clear duplicate pair around missing TensorFlow backend support in torch-only installs. Most other pairs share broad subsystems or version contexts but not the same concrete defect.", + "confidence": 0.72, + "canonical_issue_reason": "issue:43644 is the clearest representative of the Transformers v5 non-persistent-buffer regression and is the same report as 44534, with earlier discussion and an inbound reference.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45020 is the broadest umbrella for the overall theme of recent-version breakages, but it is still not a clean duplicate of most items in this cluster.", + "best_issue_reason": "issue:43644 is the strongest anchor in this set: it is a concrete, well-scoped regression report and matches the duplicate report 44534 closely.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:45542", + "accept": true, + "reason": "Both point to the same packaging gap: the torch-only install path leaves the TensorFlow backend unavailable, producing backend/import errors." + }, { "left": "issue:40990", "right": "issue:44811", "accept": false, - "reason": "Different models and failures: perplexity on GPT-OSS vs Whisper decode behavior." + "reason": "Completely different bugs: perplexity evaluation on GPT-OSS vs Whisper batch_decode skipping special tokens." }, { - "left": "issue:36032", - "right": "issue:43742", + "left": "issue:42915", + "right": "issue:44291", "accept": false, - "reason": "Tokenizer init conflict vs MobileLLM loading key error; unrelated code paths." + "reason": "Both involve Transformers v5 loading paths, but one is a Qwen3MoE FP8 config failure and the other is an init_empty_weights/_is_hf_initialized TypeError; different root causes." }, { "left": "issue:43611", "right": "issue:44534", "accept": false, - "reason": "Both are Transformers v5 regressions, but one is base_model_prefix loading and the other is non-persistent buffer corruption." + "reason": "Both are v5 regressions, but one is about base_model_prefix loading and the other about non-persistent buffers being filled with junk." }, { "left": "issue:43317", "right": "issue:43856", "accept": false, - "reason": "Device-map/offload loading bug vs Qwen3 MoE training memory usage; not the same bug." + "reason": "Different symptoms and code paths: dequantized model offload/device_map loading vs Qwen3 MoE training memory usage." }, { - "left": "issue:42915", - "right": "issue:44589", + "left": "issue:36032", + "right": "issue:43742", "accept": false, - "reason": "Both involve float8/quantization-adjacent failures, but the reported symptoms and likely code paths differ." + "reason": "Unrelated tokenizer load error vs a MobileLLM key error when loading a specific model." }, { - "left": "issue:40444", - "right": "issue:45325", + "left": "issue:43632", + "right": "issue:44534", "accept": false, - "reason": "Both are Qwen2.5-VL, but one is multi-image IterableDataset training and the other is position-id scaling for still images." + "reason": "Both are Transformers v5 breakages, but one concerns the _is_hf_initialized flag and the other non-persistent buffer initialization." }, { - "left": "issue:43531", - "right": "issue:44877", + "left": "issue:40444", + "right": "issue:45325", "accept": false, - "reason": "Qwen3-MoE sliding-window behavior vs strict config loading for granite_speech are unrelated." + "reason": "Both are Qwen2.5-VL bugs, but one is multi-image iterable dataset finetuning and the other is still-image temporal position_id scaling." }, { - "left": "issue:43632", - "right": "issue:44534", + "left": "issue:42915", + "right": "issue:44589", "accept": false, - "reason": "Different regressions: _is_hf_initialized/init_empty_weights versus junk-filled non-persistent buffers." + "reason": "Different failures: Qwen3MoE FP8 config handling vs missing Float8 storage object." }, { - "left": "issue:44683", - "right": "issue:45020", + "left": "issue:43531", + "right": "issue:44877", "accept": false, - "reason": "Torch >=2.9 compiled flex_attention failure is separate from recent-version remote_code loading breakage." + "reason": "Different model/config issues: Qwen3-MoE sliding_window behavior vs granite_speech strict config loading." }, { "left": "issue:43421", "right": "issue:44568", "accept": false, - "reason": "Both tokenizer-related, but one is runtime post-processor updating and the other is add_special_tokens behavior." - }, - { - "left": "issue:36032", - "right": "issue:41720", - "accept": false, - "reason": "Tokenizer attribute conflict vs Qwen3 device-mapping cuda assert; no shared bug." + "reason": "Both involve special tokens, but one is runtime post-processor updates and the other is add_special_tokens not adding BOS/EOS for a specific tokenizer." }, { "left": "issue:43818", "right": "issue:45357", "accept": false, - "reason": "Video-LLaVA tower/weights issue and Qwen3.5 save_pretrained key regression are distinct." + "reason": "Different multimodal model problems: Video-LLaVA weight/temporal-attention issue vs Qwen3.5 save_pretrained key regression." }, { - "left": "issue:43425", - "right": "issue:44403", + "left": "issue:36032", + "right": "issue:41720", "accept": false, - "reason": "Torch version compatibility vs loading noise; not the same underlying problem." + "reason": "Tokenizer construction error vs Qwen3 auto device mapping CUDA assert; unrelated bugs." }, { - "left": "issue:43792", - "right": "issue:44589", + "left": "issue:44683", + "right": "issue:45020", "accept": false, - "reason": "Whisper runtime failure and float8 storage lookup error are unrelated." + "reason": "Different areas: compiled flex_attention on torch>=2.9 vs remote_code model loading regressions." }, { - "left": "issue:44403", - "right": "issue:44991", + "left": "issue:43482", + "right": "issue:45538", "accept": false, - "reason": "Generic loading noise vs tokenizer loading failure for EMBEDDIA/est-roberta; different issues." + "reason": "Different tokenizer/loading issues: Qwen2.5-GGUF on transformers v5 vs CLIPTokenizer model_max_length behavior." }, { "left": "issue:44829", "right": "issue:45479", "accept": false, - "reason": "Both concern degenerate classification behavior, but the triggers and failure modes differ." + "reason": "Both are classification-training degeneracy reports, but one is tied to flash_attention_3 and the other to num_labels=1 loss semantics." }, { "left": "issue:33666", "right": "issue:40444", "accept": false, - "reason": "Qwen2-VL multi-GPU training and Qwen2.5-VL multi-image dataset loading are different requests/failures." + "reason": "Both are Qwen2-VL/Qwen2.5-VL training issues, but they describe different failure modes and data setups." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Unrelated: Torch 2.10 compatibility vs noisy transformer loading messages." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Different bugs: generic loading noise vs tokenizer loading failure for a specific model." }, { "left": "issue:44079", "right": "issue:44534", "accept": false, - "reason": "ModelOutput key assignment bug vs non-persistent buffer junk are unrelated." + "reason": "ModelOutput key assignment bug vs non-persistent buffer junk initialization; not the same defect." }, { - "left": "issue:43377", - "right": "issue:45491", + "left": "issue:43792", + "right": "issue:44589", "accept": false, - "reason": "Both mention batching/padding, but MIMI encoder mask support and Gemma3 NaN embeddings are different code paths." + "reason": "Whisper runtime failure vs missing Float8 storage type; unrelated." }, { "left": "issue:35707", "right": "issue:40444", "accept": false, - "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL multi-image IterableDataset failure." + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL finetuning with multiple images." + }, + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Both are batching/padding issues, but the affected models and mechanisms differ: MIMI padding-mask support vs Gemma3 sliding-window NaNs." }, { "left": "issue:43482", "right": "issue:43525", "accept": false, - "reason": "Qwen2.5-GGUF loading and Llama4Config pad_token_id are separate model/config regressions." + "reason": "Different loading regressions in unrelated models: Qwen2.5-GGUF vs Llama4Config pad_token_id." }, { "left": "issue:44291", "right": "issue:44589", "accept": false, - "reason": "init_empty_weights/_is_hf_initialized TypeError is unrelated to Float8 storage resolution." + "reason": "init_empty_weights/_is_hf_initialized TypeError is unrelated to the Float8 storage lookup failure." }, { "left": "issue:43749", "right": "issue:44079", "accept": false, - "reason": "FSDP CPU RAM efficient loading and ModelOutput key assignment are different subsystems." + "reason": "FSDP CPU RAM efficient loading and ModelOutput key assignment are unrelated bugs." }, { "left": "issue:43644", "right": "issue:44079", "accept": false, - "reason": "Non-persistent buffer initialization and ModelOutput key handling are not the same bug." + "reason": "Although both mention Transformers internals, one is about non-persistent buffers being corrupted and the other about ModelOutput key handling." }, { "left": "issue:44079", "right": "issue:45072", "accept": false, - "reason": "ModelOutput key assignment and bfloat16 dtype mismatch are unrelated failures." + "reason": "ModelOutput key assignment bug vs bf16 dtype mismatches in CI; different defects." }, { - "left": "issue:44265", + "left": "issue:43632", "right": "issue:45072", "accept": false, - "reason": "torch.export / torch_compilable_check failure is separate from bfloat16 inference dtype mismatches." + "reason": "_is_hf_initialized flag regression is unrelated to dtype mismatch failures in bf16 inference." }, { - "left": "issue:43632", + "left": "issue:44265", "right": "issue:45072", "accept": false, - "reason": "_is_hf_initialized/init_empty_weights regression is not the same as the bfloat16 dtype mismatch report." + "reason": "torch.export / torch_compilable_check failure is unrelated to SwitchTransformers/TimmWrapperModel dtype mismatches." }, { - "left": "issue:43531", - "right": "issue:44291", + "left": "issue:40444", + "right": "issue:45381", "accept": false, - "reason": "Qwen3-MoE sliding_window bug and init_empty_weights/_is_hf_initialized bug are unrelated." + "reason": "Both are Qwen2.5-VL vision-input bugs, but one is multi-image finetuning and the other is video vision_position_ids." }, { - "left": "issue:40444", + "left": "issue:45325", "right": "issue:45381", "accept": false, - "reason": "Both are Qwen2.5-VL vision issues, but multi-image training and video position-id computation are different bugs." + "reason": "Closely related subsystem, but one is still-image temporal position scaling and the other is video position_id computation; not the same concrete bug." }, { - "left": "issue:42175", - "right": "issue:43296", + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to init_empty_weights/_is_hf_initialized argument handling." + }, + { + "left": "issue:43611", + "right": "issue:43646", "accept": false, - "reason": "Missing TensorFlow extra dependency and PaddleOCR-VL loading in vLLM are unrelated package/model issues." + "reason": "Both are Transformers v5 loading regressions, but they describe different failure points: base_model_prefix loading vs custom model initialization." }, { "left": "issue:41720", "right": "issue:45081", "accept": false, - "reason": "Qwen3 auto device mapping cuda assert and Mistral tokenizer regex patch crash are unrelated." + "reason": "Different components: Qwen3 auto device mapping CUDA assert vs Mistral tokenizer regex patch crash." }, { - "left": "issue:45325", - "right": "issue:45381", - "accept": true, - "reason": "Both describe the same Qwen2.5-VL position-id computation regression in transformers 5.3.0, affecting still-image and video paths." + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Both involve model/backend setup, but one is missing TensorFlow in a torch install and the other is PaddleOCR-VL load failure in vLLM." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays appropriately conservative. Most soft edges are rejected for distinct symptoms/code paths, and the only accepted duplicate pair (45325/45381) is reasonably supported by the shared Qwen2.5-VL position-id regression theme." + "feedback": "Grounded overall and conservative. The canonical choice around issue:43644/44534 is well supported by the identical titles and the inbound reference on 43644. The only accepted soft edge, issue:42175 vs issue:45542, is a plausible same-failure packaging/backend-installation match. The remaining rejected pairs mostly distinguish different root causes despite shared subsystems or model families, which is appropriately cautious." }, "evaluator_used": true, "retried": false, @@ -3119,7 +3037,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:34634", "issue:35707", @@ -3145,6 +3063,7 @@ "issue:43749", "issue:43792", "issue:43819", + "issue:43828", "issue:43873", "issue:43881", "issue:43883", @@ -3155,8 +3074,8 @@ "issue:44265", "issue:44291", "issue:44387", - "issue:44423", "issue:44466", + "issue:44488", "issue:44534", "issue:44610", "issue:44617", @@ -3167,277 +3086,252 @@ "issue:44991", "issue:45072", "issue:45325", - "issue:45357", - "issue:45406" + "issue:45357" ], "soft_pairs": [ - "issue:43994|issue:44898", + "issue:43881|issue:44488", "issue:43454|issue:44466", + "issue:43994|issue:44898", "issue:44387|issue:44617", + "issue:43828|issue:44683", "issue:42915|issue:43482", - "issue:43792|issue:43957", "issue:43526|issue:43883", "issue:43701|issue:45325", + "issue:43792|issue:43957", "issue:43262|issue:43746", "issue:43317|issue:43873", "issue:34634|issue:35707", - "issue:44291|issue:44877", - "issue:43881|issue:44991", "issue:43606|issue:44265", + "issue:43881|issue:44991", "issue:44186|issue:44265", + "issue:44291|issue:44877", "issue:43606|issue:43819", "issue:43632|issue:44164", "issue:35707|issue:41720", "issue:43377|issue:44898", - "issue:44610|issue:45325", - "issue:43377|issue:44610", "issue:43454|issue:45072", + "issue:44610|issue:45325", "issue:43454|issue:43550", + "issue:43377|issue:44610", "issue:39692|issue:40444", "issue:43749|issue:44534", + "issue:44466|issue:45357", "issue:43296|issue:44991", "issue:43296|issue:44877", - "issue:44466|issue:45357", "issue:43792|issue:44683", "issue:43454|issue:43720", "issue:41720|issue:43322", "issue:41720|issue:44977", - "issue:43404|issue:45357", - "issue:44423|issue:45406", - "issue:44186|issue:45072" + "issue:43404|issue:45357" ], - "prepared_review_unit_hash": "ff6b1336dc6880ec37d0e8f5970c02b974d1abe904b45df7be534b662c318f5b", + "prepared_review_unit_hash": "7c276a7e3b7a4d8a8b48aef0c934eba5e2d18bb038ebba88d4d194b4ec238d0b", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 34, - "serialized_chars": 37965, - "estimated_input_tokens": 9492, - "estimated_eval_tokens": 19240 + "serialized_chars": 37834, + "estimated_input_tokens": 9459, + "estimated_eval_tokens": 19174 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a set of unrelated closed issues spanning model loading, quantization/offload, multimodal processors, and serialization regressions. I don\u2019t see any true duplicate pair among the soft candidates, so all soft edges should be rejected.", - "confidence": 0.94, - "canonical_issue_reason": "No single issue is a clear duplicate anchor for the cluster; if one must be chosen, issue 43606 is the most concrete and narrowly scoped reproducible failure (device mismatch during CPU offload).", + "summary": "This is a heterogeneous cluster of mostly unrelated issues with only loose similarity around Transformers v5 loading/serialization regressions. I do not see any soft pair that should be merged as a true duplicate; the closest subtheme is tied-weights / model-loading behavior, where issue 43454 is the best representative.", + "confidence": 0.43, + "canonical_issue_reason": "No exact duplicate cluster emerges; issue 43454 is the closest representative because it is the most central concrete bug in the tied-weights/model-loading subtheme.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43606 is the strongest representative because it describes a specific runtime failure with a clear code path and environment trigger, making it the cleanest candidate to anchor triage.", + "best_issue_reason": "43454 is the most actionable anchor issue among the set, but the cluster is too mixed to serve as a broad canonical for all items.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43994", - "right": "issue:44898", + "left": "issue:43881", + "right": "issue:44488", "accept": false, - "reason": "Both touch vision model behavior, but one is a SigLIP2 pipeline/output quality report and the other is a Perceiver resolution/interpolate_pos_encoding failure; different models and failure modes." + "reason": "Both are load failures, but they involve different models and no shared concrete failure mode." }, { "left": "issue:43454", "right": "issue:44466", "accept": false, - "reason": "Both mention tied lm_head weights, but one is AyaVision-specific missing tying and the other is device-dependent save_pretrained serialization; related theme, not the same bug." + "reason": "Both mention lm_head/tied weights, but one is a generation bug and the other is a device-dependent serialization issue." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "Different model families and different bugs; they only share a broad vision-model failure theme." }, { "left": "issue:44387", "right": "issue:44617", "accept": false, - "reason": "One is int4 quantization memory growth/OOM in Transformers 5.x, the other is Sam3Video CUDA OOM; no shared concrete code path." + "reason": "Both mention OOM, but one is an int4 quantization memory regression and the other is a model-specific CUDA OOM." }, { - "left": "issue:42915", - "right": "issue:43482", + "left": "issue:43828", + "right": "issue:44683", "accept": false, - "reason": "Different problems: FineGrainedFP8Config failure for Qwen2.5-Moe versus GGUF loading failure for Qwen2.5 in v5." + "reason": "Autocast dtype mismatch and compiled flex_attention failure are distinct code-path problems." }, { - "left": "issue:43792", - "right": "issue:43957", + "left": "issue:42915", + "right": "issue:43482", "accept": false, - "reason": "Whisper runtime failure and meta-device loading regression are distinct issues with different triggers and model paths." + "reason": "Different models and different failure classes: FP8 config failure versus GGUF loading failure." }, { "left": "issue:43526", "right": "issue:43883", "accept": false, - "reason": "BeitImageProcessorFast label reduction bug is unrelated to Molmo missing all_tied_weights_keys." + "reason": "Label-reduction behavior and missing tied-weights metadata are unrelated bugs." }, { "left": "issue:43701", "right": "issue:45325", "accept": false, - "reason": "Checkpoint key mismatch during resume is unrelated to Qwen2.5-VL rope/index temporal scaling." + "reason": "Checkpoint key mismatch is not the same bug as RoPE position-id scaling." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "These are separate loading/runtime failures with different underlying mechanisms." }, { "left": "issue:43262", "right": "issue:43746", "accept": false, - "reason": "Audio chat template sampling-rate default and GraniteSpeech PEFT checkpoint loading are different subsystems and failures." + "reason": "Audio chat-template sample-rate defaults and PEFT checkpoint loading are unrelated." }, { "left": "issue:43317", "right": "issue:43873", "accept": false, - "reason": "Both involve offload/quantization, but one is dequantized model loading with device_map=auto and the other is a broader offloading regression; not the same bug." + "reason": "Both involve offloading/quantization, but the specific failures and code paths differ." }, { "left": "issue:34634", "right": "issue:35707", "accept": false, - "reason": "BarkProcessor voice_preset is unrelated to progressive generation with inputs_embeds/past_key_values." + "reason": "Voice preset handling and progressive generation with past key values are different issues." }, { - "left": "issue:44291", - "right": "issue:44877", + "left": "issue:43606", + "right": "issue:44265", "accept": false, - "reason": "Unexpected _is_hf_initialized argument from init_empty_weights is unrelated to strict config rejection for granite_speech." + "reason": "CPU offload device mismatch and torch.export/torch_compilable_check are separate bugs." }, { "left": "issue:43881", "right": "issue:44991", "accept": false, - "reason": "Different components entirely: glm-4v-9b model loading versus tokenizer loading for EMBEDDIA/est-roberta." + "reason": "One is a model load failure, the other a tokenizer-loading failure for a different model." }, { - "left": "issue:43606", + "left": "issue:44186", "right": "issue:44265", "accept": false, - "reason": "CPU offload device mismatch is not the same as torch.export/export failing on torch_compilable_check." + "reason": "Tokenizer padding/truncation crashes and export-time compilers checks are unrelated." }, { - "left": "issue:44186", - "right": "issue:44265", + "left": "issue:44291", + "right": "issue:44877", "accept": false, - "reason": "Tokenizer crash on NER/padding is unrelated to export-time torch_compilable_check failures." + "reason": "Init-empty-weights argument handling and strict config validation are different failure modes." }, { "left": "issue:43606", "right": "issue:43819", "accept": false, - "reason": "Device mismatch during offload and DAC.from_latents mismatch with missing STE are different code paths and different models." + "reason": "Device mismatch on offload and DAC latent/forward mismatch are unrelated." }, { "left": "issue:43632", "right": "issue:44164", "accept": false, - "reason": "_is_hf_initialized flag breakage and save/from_pretrained extra_state handling are separate serialization/regression issues." + "reason": "Both are v5-related loader/save regressions, but one is `_is_hf_initialized` flag handling and the other is extra_state serialization." }, { "left": "issue:35707", "right": "issue:41720", "accept": false, - "reason": "Progressive generation inputs_embeds issue is unrelated to Qwen3 auto device mapping cudaErrorAssert." + "reason": "Progressive generation with embeds/past-key-values is unrelated to an auto-device-map CUDA assert." }, { "left": "issue:43377", "right": "issue:44898", "accept": false, - "reason": "MIMI batched-vs-single padding-mask bug is unrelated to Perceiver image resolution handling." + "reason": "Batched-vs-single encoder mismatch and image-resolution mismatch are different bugs." }, { - "left": "issue:44610", - "right": "issue:45325", + "left": "issue:43454", + "right": "issue:45072", "accept": false, - "reason": "OmDet-Turbo processor/model size mismatch is unrelated to Qwen2.5-VL rope index temporal scaling." + "reason": "Tied-weights/generation failure and bfloat16 dtype mismatch are different problems." }, { - "left": "issue:43377", - "right": "issue:44610", + "left": "issue:44610", + "right": "issue:45325", "accept": false, - "reason": "Different models and failures: MIMI padding-mask output divergence versus OmDet-Turbo processor resolution mismatch." + "reason": "Processor image-size mismatch and temporal position-id scaling are not the same bug." }, { "left": "issue:43454", - "right": "issue:45072", + "right": "issue:43550", "accept": false, - "reason": "AyaVision lm_head tying/serialization is unrelated to dtype mismatches in SwitchTransformers and TimmWrapperModel." + "reason": "Tied-weight generation issues are unrelated to the torch.compile SDPA failure." }, { - "left": "issue:43454", - "right": "issue:43550", + "left": "issue:43377", + "right": "issue:44610", "accept": false, - "reason": "Tied lm_head weights bug is unrelated to Bamba-9B-v2 torch.compile SDPA failure." + "reason": "MIMI batching/padding-mask behavior and OmDet-Turbo input-size mismatch are unrelated." }, { "left": "issue:39692", "right": "issue:40444", "accept": false, - "reason": "SigLIP2 doc example errors and Qwen2.5-VL finetuning with IterableDataset are both multimodal, but they are different bugs with different roots." + "reason": "A documentation example error does not match a finetuning failure on multi-image IterableDataset inputs." }, { "left": "issue:43749", "right": "issue:44534", "accept": false, - "reason": "FSDP_CPU_RAM_EFFICIENT_LOADING breakage and non-persistent buffer junk in v5 are separate loading/state-management regressions." + "reason": "FSDP CPU RAM efficient loading and non-persistent buffer initialization are different regressions." }, { - "left": "issue:43296", - "right": "issue:44991", + "left": "issue:44466", + "right": "issue:45357", "accept": false, - "reason": "PaddleOCR-VL vLLM load failure is unrelated to tokenizer loading for EMBEDDIA/est-roberta." + "reason": "Both are serialization issues, but one concerns lm_head.weight and the other visual encoder keys in a different model." }, { "left": "issue:43296", - "right": "issue:44877", - "accept": false, - "reason": "PaddleOCR-VL loading failure and strict granite_speech config rejection are not the same issue." - }, - { - "left": "issue:44466", - "right": "issue:45357", + "right": "issue:44991", "accept": false, - "reason": "Both concern serialization, but one is lm_head weight tying/device dependence and the other is incorrect visual encoder keys in Qwen3.5 save_pretrained." + "reason": "PaddleOCR-VL loading in vLLM and tokenizer loading for EMBEDDIA/est-roberta are unrelated." }, { "left": "issue:43792", "right": "issue:44683", "accept": false, - "reason": "Whisper-large-v2 runtime failure is unrelated to compiled flex_attention breaking on torch >= 2.9." - }, - { - "left": "issue:43454", - "right": "issue:43720", - "accept": false, - "reason": "AyaVision tied-weight bug is unrelated to BitNet packed-weight unpacking during accelerate loading." - }, - { - "left": "issue:41720", - "right": "issue:43322", - "accept": false, - "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segmentation fault are distinct runtime failures." - }, - { - "left": "issue:41720", - "right": "issue:44977", - "accept": false, - "reason": "Auto device mapping assert and flash-attention generation failure affect different code paths and symptoms." + "reason": "Whisper load/run failure and compiled flex_attention on torch 2.9 are distinct bugs." }, { "left": "issue:43404", "right": "issue:45357", "accept": false, - "reason": "Mistral3 lm_head tying bug is unrelated to Qwen3.5 visual encoder key serialization." - }, - { - "left": "issue:44423", - "right": "issue:45406", - "accept": false, - "reason": "Continuous-batching crash from multimodal input handling is unrelated to Gemma4Processor missing _tokenizer." - }, - { - "left": "issue:44186", - "right": "issue:45072", - "accept": false, - "reason": "Tokenizer/padding crashes and bfloat16 dtype mismatches are different bugs in different layers." + "reason": "Mistral3 tied-weight serialization and Qwen3.5 visual-encoder-key serialization are different model-specific issues." } ] }, @@ -3459,7 +3353,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:33357", "issue:34567", @@ -3470,12 +3364,10 @@ "issue:41720", "issue:42617", "issue:42915", - "issue:42994", "issue:43316", "issue:43317", "issue:43322", "issue:43388", - "issue:43482", "issue:43526", "issue:43531", "issue:43540", @@ -3492,6 +3384,7 @@ "issue:44186", "issue:44292", "issue:44361", + "issue:44423", "issue:44479", "issue:44514", "issue:44545", @@ -3505,34 +3398,36 @@ "issue:44918", "issue:44991", "issue:45059", + "issue:45072", "issue:45161", "issue:45406", "issue:45464" ], "soft_pairs": [ + "issue:44423|issue:45406", + "issue:44186|issue:45072", "issue:34567|issue:40990", "issue:43526|issue:43881", "issue:43873|issue:44756", "issue:43577|issue:43708", "issue:42617|issue:42915", - "issue:44877|issue:44991", "issue:43550|issue:43720", "issue:44610|issue:44898", + "issue:44877|issue:44991", "issue:44479|issue:44918", "issue:44186|issue:44610", "issue:36331|issue:42915", "issue:36331|issue:42617", - "issue:42994|issue:43482", "issue:44671|issue:44743", "issue:43316|issue:44913", - "issue:42915|issue:43317", "issue:43550|issue:43606", + "issue:42915|issue:43317", "issue:41720|issue:42617", - "issue:43531|issue:43572", "issue:44292|issue:44479", "issue:41720|issue:45161", - "issue:44186|issue:44361", + "issue:43531|issue:43572", "issue:43540|issue:43782", + "issue:44186|issue:44361", "issue:43388|issue:45059", "issue:41720|issue:43873", "issue:45406|issue:45464", @@ -3541,238 +3436,250 @@ "issue:36331|issue:41720", "issue:44514|issue:44545", "issue:33357|issue:35707", - "issue:41720|issue:43746", - "issue:43606|issue:43720" + "issue:43606|issue:43720", + "issue:41720|issue:43746" ], - "prepared_review_unit_hash": "2d58c681a32653a20e996bcc7df5b1c7cdad34e98e406a0c52c3a5e4762dbc54", + "prepared_review_unit_hash": "ad5846cd3f9798fe24a4744026f15f426dd6ea94f419aaf2d150b3298e770742", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 47, "item_count": 47, - "soft_pair_count": 33, - "serialized_chars": 36769, - "estimated_input_tokens": 9193, - "estimated_eval_tokens": 18642 + "soft_pair_count": 34, + "serialized_chars": 37226, + "estimated_input_tokens": 9307, + "estimated_eval_tokens": 18870 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is still mostly heterogeneous. The only strong link is the very close near-duplicate pair 44514/44545 about `Qwen2_5_VLProcessor.apply_chat_template` crashing on batched input with `padding=False`; the rest are unrelated bugs or at most broad-theme matches.", - "confidence": 0.87, - "canonical_issue_reason": "No single cluster-wide canonical issue is justified because the items span many unrelated bugs. If one issue must anchor the cluster, 44514 is the best representative for the close 44514/44545 near-duplicate pair: it is earlier and has the clearer title/body for the shared batched-input crash.", + "summary": "This cluster is mostly a set of unrelated issues; only one pair looks like a true duplicate. The two Qwen2_5_VLProcessor.apply_chat_template reports describe the same batched-input/padding=False crash.", + "confidence": 0.97, + "canonical_issue_reason": "issue:44545 is the best canonical issue: it states the bug clearly, matches the duplicate pair exactly, and has slightly more supporting references.", "canonical_pr_reason": null, - "best_issue_reason": "44514 is the best issue to represent the only strong duplicate-like pair in the cluster because 44545 is a very close near-duplicate of it, and 44514 is the earlier, cleaner report.", + "best_issue_reason": "issue:44545 is the best overall issue representative for this cluster because it is the only clear duplicate target and has the cleanest title.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both are serve crashes, but they affect different processors/models and different AttributeErrors, so they are not the same bug." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer NER/padding crash vs bfloat16 dtype mismatch in a different model path; unrelated underlying causes." + }, { "left": "issue:34567", "right": "issue:40990", "accept": false, - "reason": "Different subsystems and failure modes: TrainerState token accounting vs perplexity evaluation." + "reason": "Trainer token-count tracking and model perplexity are unrelated problems." }, { "left": "issue:43526", "right": "issue:43881", "accept": false, - "reason": "Unrelated model/component bugs: BeitImageProcessor label reduction vs glm-4v load failure." + "reason": "BEiT label reduction bug and GLM-4V-9B load failure do not share the same code path." }, { "left": "issue:43873", "right": "issue:44756", "accept": false, - "reason": "Both mention offload/memory, but the concrete bugs differ: quantized offloading behavior vs mmap OOM mitigation." + "reason": "Both mention memory/offload, but one is quantization offloading behavior and the other is mmap/OOM on Strix Halo; different issues." }, { "left": "issue:43577", "right": "issue:43708", "accept": false, - "reason": "Different areas entirely: BLIP2 dtype propagation vs trainer resume/max_steps calculation." + "reason": "Model dtype loading bug vs Trainer checkpoint step calculation; no overlap in the concrete defect." }, { "left": "issue:42617", "right": "issue:42915", "accept": false, - "reason": "No shared code-path problem; one is a parallel script failure and the other a Qwen3Moe FP8 config issue." - }, - { - "left": "issue:44877", - "right": "issue:44991", - "accept": false, - "reason": "Both are loading/config issues, but for different components and models; not the same bug." + "reason": "3d_parallel execution failure and Qwen3-MoE FineGrainedFP8Config failure are separate problems." }, { "left": "issue:43550", "right": "issue:43720", "accept": false, - "reason": "Different concrete failures: torch.compile with SDPA in Bamba vs packed-weight unpacking during accelerate loading in BitNet." + "reason": "torch.compile/SDPA failure in Bamba is unrelated to packed-weight unpacking during accelerate loading in BitNet." }, { "left": "issue:44610", "right": "issue:44898", "accept": false, - "reason": "Both involve image sizing, but the affected models and failure mechanisms differ too much to be the same bug." + "reason": "Different models and different image-size/resolution bugs; not the same underlying change." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config loading and tokenizer loading failures are different bugs in different subsystems." }, { "left": "issue:44479", "right": "issue:44918", "accept": false, - "reason": "Different regressions: video-input handling in Qwen multimodal models vs input-embedding unpacking with TRL SFT trainer." + "reason": "Video-input regression for several Qwen VL models vs TRL SFT embedding unpacking; same family name, different failure mode." }, { "left": "issue:44186", "right": "issue:44610", "accept": false, - "reason": "Tokenizer batching/padding crash vs processor output size mismatch; not the same issue." + "reason": "Tokenizer crash vs processor output-size mismatch; unrelated defects." }, { "left": "issue:36331", "right": "issue:42915", "accept": false, - "reason": "CustomTrainer signature regression vs Qwen3Moe FP8 loading failure." + "reason": "CustomTrainer argument mismatch and Qwen3-MoE FP8 loading failure are unrelated." }, { "left": "issue:36331", "right": "issue:42617", "accept": false, - "reason": "No shared underlying bug; trainer API change vs 3d_parallel script failure." - }, - { - "left": "issue:42994", - "right": "issue:43482", - "accept": false, - "reason": "Both touch quantization/serialization, but one is saving quantized models and the other is loading GGUF under v5." + "reason": "Trainer API breakage is not the same as the 3d_parallel runtime failure." }, { "left": "issue:44671", "right": "issue:44743", "accept": false, - "reason": "Masked-LM prediction regression vs recurrent-state reset in modular_qwen3_5 cache handling." + "reason": "CamemBERT masked-LM prediction issue and Qwen3.5 recurrent-state reset bug are unrelated." }, { "left": "issue:43316", "right": "issue:44913", "accept": false, - "reason": "Both are config serialization discrepancies, but for different config fields and model families." - }, - { - "left": "issue:42915", - "right": "issue:43317", - "accept": false, - "reason": "Both involve loading/offload, but one is FP8 Qwen3Moe and the other is dequantized GPU+CPU offload." + "reason": "Both are config-related, but they concern different model configs and different persistence behaviors." }, { "left": "issue:43550", "right": "issue:43606", "accept": false, - "reason": "Device/runtime problems in different models and paths; not the same failure." + "reason": "Bamba compile/SDPA failure and Bark CPU-offload device mismatch are different concrete bugs." }, { - "left": "issue:41720", - "right": "issue:42617", + "left": "issue:42915", + "right": "issue:43317", "accept": false, - "reason": "Different model/runtime issues: auto device mapping cuda assert vs inability to run a parallel script." + "reason": "Qwen3-MoE FP8 loading and dequantized model CPU/GPU offload loading are distinct issues." }, { - "left": "issue:43531", - "right": "issue:43572", + "left": "issue:41720", + "right": "issue:42617", "accept": false, - "reason": "Different model/config bugs with no shared code-path." + "reason": "Different models and different failures: auto device mapping assert vs 3d_parallel execution." }, { "left": "issue:44292", "right": "issue:44479", "accept": false, - "reason": "Distinct Qwen problems: NVFP4 runtime error vs video-input regression." + "reason": "NVFP4 runtime failure and Qwen VL video regression are not the same bug." }, { "left": "issue:41720", "right": "issue:45161", "accept": false, - "reason": "Both mention GPT/Qwen MoE, but one is auto device mapping on A800 and the other is tensor-parallel support for GPT-OSS MoE." + "reason": "Both involve parallelism, but the models and failure modes differ; not mergeable as one fix." }, { - "left": "issue:44186", - "right": "issue:44361", + "left": "issue:43531", + "right": "issue:43572", "accept": false, - "reason": "Tokenizer batching crash vs MLuke task AttributeError; unrelated." + "reason": "Sliding-window behavior in Qwen3-MoE and missing pad_token_idx in StableLmConfig are unrelated." }, { "left": "issue:43540", "right": "issue:43782", "accept": false, - "reason": "Different multimodal Qwen failures and different code paths." + "reason": "Qwen3OmniMoe video-processing ValueError and Qwen3VL weight_only load error are different code paths." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Two tokenizer/task crashes, but on different tokenizers and different failure conditions." }, { "left": "issue:43388", "right": "issue:45059", "accept": false, - "reason": "Metric gathering bug vs SAM3 PCS text/bbox behavior; unrelated." + "reason": "Metric label gathering bug and SAM3 text/bbox behavior are not the same underlying defect." }, { "left": "issue:41720", "right": "issue:43873", "accept": false, - "reason": "Different bugs despite both involving loading/placement; no shared concrete code-path." + "reason": "Auto device mapping CUDA assert and quantization offloading behavior are separate problems." }, { "left": "issue:45406", "right": "issue:45464", "accept": false, - "reason": "Different components: serve-time Gemma4Processor attribute error vs streaming chat/completions failure on Qwen3.5-0.8B." + "reason": "Different serve-time failures on different models/processors and different execution paths." }, { "left": "issue:42915", "right": "issue:43322", "accept": false, - "reason": "Both are load/runtime crashes, but in different models and with different failure modes." + "reason": "Qwen3-MoE FP8 loading and Llava Next segfault are unrelated." }, { "left": "issue:36010", "right": "issue:42617", "accept": false, - "reason": "Import error for GenerationMixin vs a separate script execution issue." + "reason": "GenerationMixin import error and 3d_parallel runtime failure do not match." }, { "left": "issue:36331", "right": "issue:41720", "accept": false, - "reason": "Trainer custom loss API regression vs Qwen3 device-mapping crash." + "reason": "Trainer loss signature mismatch is unrelated to Qwen3 auto device-mapping failure." }, { "left": "issue:44514", "right": "issue:44545", "accept": true, - "reason": "Very close near-duplicate reports: same `apply_chat_template` batched-input `padding=False` crash, with only minor wording/title differences." + "reason": "Same exact Qwen2_5_VLProcessor.apply_chat_template crash on batched input with padding=False; clearly the same bug." }, { - "left": "issue:41720", - "right": "issue:43746", + "left": "issue:33357", + "right": "issue:35707", "accept": false, - "reason": "Both involve Qwen/GPT-style model loading, but the concrete issues are unrelated." + "reason": "MacOS CLIP bus error and progressive generation with inputs_embeds/past_key_values are unrelated." }, { "left": "issue:43606", "right": "issue:43720", "accept": false, - "reason": "Different offload-related bugs in different models and loading paths." + "reason": "CPU-offload device mismatch and BitNet packed-weight unpacking are different issues." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert and GraniteSpeech PEFT checkpoint loading are unrelated." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The soft-edge verdicts look conservative overall, but the writeup overstates the evidence for 44514/44545: the packet supports a very close near-duplicate, not an exact duplicate pair. Because the summary and canonical/best-issue rationale rely on that stronger claim, I can\u2019t accept it as grounded as written." + "accept": true, + "feedback": "The cluster summary is grounded in the packet and stays conservative. The only accepted duplicate pair, issue:44514 and issue:44545, is well supported by the matching Qwen2_5_VLProcessor.apply_chat_template batched-input/padding=False crash, and the canonical choice is reasonable given the slightly stronger references on issue:44545. The remaining soft-edge rejections are cautious and consistent with the titles." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -3788,7 +3695,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:30064", "issue:34567", @@ -3822,7 +3729,6 @@ "issue:44589", "issue:44610", "issue:44625", - "issue:44661", "issue:44671", "issue:44843", "issue:44898", @@ -3847,248 +3753,232 @@ "issue:43606|issue:43653", "issue:44625|issue:45479", "issue:43421|issue:43927", - "issue:34634|issue:41720", "issue:44898|issue:45072", "issue:43782|issue:45381", - "issue:44451|issue:44991", + "issue:34634|issue:41720", "issue:34567|issue:35707", - "issue:44062|issue:44451", - "issue:42886|issue:44843", "issue:44077|issue:44625", - "issue:43421|issue:43653", + "issue:42886|issue:44843", + "issue:44062|issue:44451", "issue:43381|issue:43844", - "issue:43531|issue:44589", + "issue:44451|issue:44991", + "issue:43421|issue:43653", "issue:44918|issue:44936", + "issue:43531|issue:44589", "issue:44987|issue:45042", "issue:43994|issue:44671", "issue:39692|issue:44610", "issue:43421|issue:44186", "issue:43596|issue:44220", - "issue:43531|issue:43645", "issue:45081|issue:45356", - "issue:43746|issue:45381", + "issue:43531|issue:43645", "issue:45183|issue:45479", - "issue:39692|issue:43873", - "issue:44220|issue:44661" + "issue:43746|issue:45381", + "issue:39692|issue:43873" ], - "prepared_review_unit_hash": "4f988a3466035fb2e16070c07c526a3fd4cea9d801b80760c3d27cb39fc64408", + "prepared_review_unit_hash": "2c82192f7daa3034568ab3ca3efe3ba761146cf4373b2846c60473f206c91079", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 30, - "serialized_chars": 36954, - "estimated_input_tokens": 9239, - "estimated_eval_tokens": 18734 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36079, + "estimated_input_tokens": 9020, + "estimated_eval_tokens": 18296 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a heterogeneous set of model, tokenizer, and documentation regressions. Only one clear duplicate pair stands out around HF_HUB_OFFLINE tokenizer loading; the rest are too model- or symptom-specific to merge confidently.", - "confidence": 0.9, - "canonical_issue_reason": "Issue 44843 is the clearest, most specific representative of the tokenizer/offline-loading bug class: it names the exact code path (_patch_mistral_regex -> model_info) and the failing HF_HUB_OFFLINE behavior.", + "summary": "This cluster is mostly heterogeneous, but there is a small tokenizer/special-token subset. No pair looks like a true duplicate from titles alone, so I would not merge any of the soft edges.", + "confidence": 0.56, + "canonical_issue_reason": "issue:43421 is the most central and actionable issue in the tokenizer/special-token subset: it describes a concrete backend/post-processor update bug with a clear fix path.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44843 is the best single issue to keep because it pinpoints the concrete failure mode and likely fix location, making it the strongest anchor for this cluster slice.", + "best_issue_reason": "issue:43421 is the best representative issue overall because it is a specific, code-path-level bug rather than a model-specific symptom or documentation problem.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:30064", "right": "issue:34634", "accept": false, - "reason": "Different subsystems and failures: image segmentation map processing vs BarkProcessor voice preset handling." + "reason": "Different components and failures: image processor segmentation maps vs Bark voice_preset handling." }, { "left": "issue:43550", "right": "issue:43653", "accept": false, - "reason": "Unrelated bugs: torch.compile/SDPA behavior in Bamba versus tokenizer special-token registration in BigBirdTokenizer." + "reason": "Unrelated bugs: torch.compile/SDPA model execution vs tokenizer special-token registration." }, { "left": "issue:44368", "right": "issue:45357", "accept": false, - "reason": "Both involve warnings/config tweaks, but one is about tie_word_embeddings during LoRA training and the other is a save_pretrained regression in Qwen3.5 vision keys." + "reason": "Different problems: Qwen3.5 LoRA tie_word_embeddings warning vs incorrect visual encoder keys on save_pretrained." }, { "left": "issue:44492", "right": "issue:44509", "accept": false, - "reason": "A typo in cache-strategy docs is not the same underlying issue as obsolete pipeline-task documentation." + "reason": "Both are docs/wording-related, but they are not the same underlying bug or change." }, { "left": "issue:43606", "right": "issue:43653", "accept": false, - "reason": "Different model/runtime failures: CPU offload device mismatch versus tokenizer mask-token registration." + "reason": "Device-mismatch offload bug vs BigBirdTokenizer special-token decode bug; different code paths." }, { "left": "issue:44625", "right": "issue:45479", "accept": false, - "reason": "Both mention classification config, but one is Qwen3.5 config propagation and the other is a generic loss bug for num_labels=1; not the same concrete defect." + "reason": "Both involve classification config, but one is propagation of num_labels and the other is a zero-loss problem; not the same issue." }, { "left": "issue:43421", "right": "issue:43927", "accept": false, - "reason": "Both touch token/config metadata, but one is runtime post-processor updating and the other is save/load token ID loss causing generation IndexError." - }, - { - "left": "issue:34634", - "right": "issue:41720", - "accept": false, - "reason": "Bark voice_preset and Qwen3 auto device mapping crashes are unrelated." + "reason": "Both touch token/config state, but one is runtime post-processor updates and the other is save/load losing custom token IDs." }, { "left": "issue:44898", "right": "issue:45072", "accept": false, - "reason": "Different issues in different model families: image-size handling for Perceiver versus dtype mismatch in SwitchTransformers/TimmWrapperModel." + "reason": "Different failure classes: image resolution/interpolation vs bfloat16 dtype mismatch." }, { "left": "issue:43782", "right": "issue:45381", "accept": false, - "reason": "Separate Qwen multimodal bugs with different code paths: weight_only loading versus video vision_position_ids." + "reason": "Unrelated multimodal loading error vs video-position-id regression." }, { - "left": "issue:44451", - "right": "issue:44991", + "left": "issue:34634", + "right": "issue:41720", "accept": false, - "reason": "Both are tokenizer-loading regressions, but they affect different models and do not point to the same underlying bug." + "reason": "Bark voice_preset bug is unrelated to Qwen3 auto device-mapping CUDA assert." }, { "left": "issue:34567", "right": "issue:35707", "accept": false, - "reason": "TrainerState token counting and progressive generation with inputs_embeds/past_key_values are unrelated." + "reason": "Trainer token-count state bug vs generation with inputs_embeds/past_key_values; different subsystems." }, { - "left": "issue:44062", - "right": "issue:44451", + "left": "issue:44077", + "right": "issue:44625", "accept": false, - "reason": "AddedToken special-flag construction error is not the same as ScandiBERT tokenizer loading failure." + "reason": "Patchtsmixer post_init policy issue is unrelated to Qwen3.5 num_labels propagation." }, { "left": "issue:42886", "right": "issue:44843", - "accept": true, - "reason": "Same offline-tokenizer loading bug: 44843 identifies the concrete _patch_mistral_regex/model_info path that explains 42886's HF_HUB_OFFLINE cache failure." - }, - { - "left": "issue:44077", - "right": "issue:44625", "accept": false, - "reason": "PatchTSMixer post_init validation and Qwen3.5 num_labels propagation are unrelated." + "reason": "Both involve tokenizer loading/offline behavior, but one is cache fallback and the other is an unconditional model_info call in a regex patch." }, { - "left": "issue:43421", - "right": "issue:43653", + "left": "issue:44062", + "right": "issue:44451", "accept": false, - "reason": "Both involve special tokens, but one is post-processor refresh logic and the other is missing mask-token registration; not the same concrete defect." + "reason": "AddedToken keyword collision is a tokenizer-construction bug; ScandiBERT load failure is a different model-loading issue." }, { "left": "issue:43381", "right": "issue:43844", "accept": false, - "reason": "Eval-mode gradient checkpointing and ZeRO-3 gradient blow-up are different training behaviors with different causes." + "reason": "Gradient checkpointing eval-mode limitation vs ZeRO-3 gradient growth; different training-path bugs." }, { - "left": "issue:43531", - "right": "issue:44589", + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are load failures, but they concern different models and different root causes." + }, + { + "left": "issue:43421", + "right": "issue:43653", "accept": false, - "reason": "Qwen3-MoE sliding_window and Float8 storage lookup errors are unrelated." + "reason": "Related tokenizer/special-token area, but not the same concrete bug: runtime post-processor refresh vs missing special-token registration." }, { "left": "issue:44918", "right": "issue:44936", "accept": false, - "reason": "Different trainer failures: SFT input-embedding unpacking versus evaluate() after train()." + "reason": "Different trainer failures: SFT embedding unpacking vs evaluate-after-train behavior." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Sliding-window Qwen3-MoE bug is unrelated to Float8 storage object lookup." }, { "left": "issue:44987", "right": "issue:45042", "accept": false, - "reason": "Different model-loading problems: physical-intelligence/fast regression versus PIL image processor torchvision dependency." + "reason": "Model-loading regression vs image processor torchvision dependency regression; different code paths." }, { "left": "issue:43994", "right": "issue:44671", "accept": false, - "reason": "Different model-specific output bugs; one concerns SigLIP2 inference, the other CamemBERT masked LM predictions." + "reason": "Both are incorrect model outputs, but they are different models and different symptoms." }, { "left": "issue:39692", "right": "issue:44610", "accept": false, - "reason": "Both are vision docs/model-processor mismatches, but they affect different models and different expected input sizes/causes." + "reason": "Both mention processor/model mismatches, but one is a docs example error and the other is an OmDet-Turbo resolution mismatch." }, { "left": "issue:43421", "right": "issue:44186", "accept": false, - "reason": "Tokenizer special-token post-processing and LayoutLMv2 NER padding/truncation crashes are not the same issue." + "reason": "Tokenizer-related, but one is special-token post-processing and the other is NER/batched padding truncation crash." }, { "left": "issue:43596", "right": "issue:44220", "accept": false, - "reason": "Deepspeed ZeRO-3/BertModel index error and audio fbank extraction failure are unrelated." - }, - { - "left": "issue:43531", - "right": "issue:43645", - "accept": false, - "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to Jupyter custom-model initialization in Transformers 5.0.0." + "reason": "IndexError under ZeRO-3 init is unrelated to the fbank feature extraction issue." }, { "left": "issue:45081", "right": "issue:45356", "accept": false, - "reason": "Both mention fix_mistral_regex, but one is an AttributeError crash in the patch helper while the other is a Kimi-K2.5 codec-handling regression; too different to treat as one duplicate." - }, - { - "left": "issue:43746", - "right": "issue:45381", - "accept": false, - "reason": "PEFT local-checkpoint loading and Qwen2.5-VL video position IDs are unrelated." + "reason": "Both mention fix_mistral_regex, but one is an AttributeError crash and the other is a different tokenizer regression/warning; not clearly the same concrete bug." }, { "left": "issue:45183", "right": "issue:45479", "accept": false, - "reason": "Input validation error messaging and degenerate loss for num_labels=1 are different bugs." + "reason": "Validation error messaging bug vs zero-loss classification bug; different underlying causes." }, { - "left": "issue:39692", - "right": "issue:43873", + "left": "issue:43746", + "right": "issue:45381", "accept": false, - "reason": "SigLIP2 docs/model mismatch and quantized offloading behavior are different problem classes." + "reason": "PEFT local checkpoint loading bug is unrelated to Qwen2.5-VL video input position IDs." }, { - "left": "issue:44220", - "right": "issue:44661", + "left": "issue:39692", + "right": "issue:43873", "accept": false, - "reason": "Audio feature extraction failure and add-new-model-like tokenizer-mapping failure are unrelated." + "reason": "SigLIP2 docs/model mismatch and quantization offloading problems are different issues." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The writeup is grounded in the packet and stays conservative overall. The only accepted soft-edge pair is well-supported by the shared HF_HUB_OFFLINE/tokenizer-loading path, and the rejected pairs mostly distinguish different models or code paths without overclaiming duplication. No mergeability issues apply here because this is an issue-cluster review, not a PR-pair merge decision." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -4103,7 +3993,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:30064", "issue:36010", @@ -4136,13 +4026,14 @@ "issue:44008", "issue:44062", "issue:44186", + "issue:44220", "issue:44442", "issue:44448", "issue:44488", "issue:44561", + "issue:44661", "issue:44671", "issue:44811", - "issue:44821", "issue:44843", "issue:44877", "issue:44898", @@ -4150,7 +4041,6 @@ "issue:45020", "issue:45072", "issue:45081", - "issue:45084", "issue:45381" ], "soft_pairs": [ @@ -4158,15 +4048,16 @@ "issue:43746|issue:43873", "issue:40444|issue:43746", "issue:43618|issue:43697", + "issue:44220|issue:44661", "issue:43716|issue:44008", - "issue:44843|issue:45081", "issue:43720|issue:43819", + "issue:44843|issue:45081", "issue:43653|issue:43720", "issue:43335|issue:43756", "issue:44062|issue:44488", - "issue:30064|issue:36010", "issue:43421|issue:44811", "issue:44186|issue:44898", + "issue:30064|issue:36010", "issue:43550|issue:45072", "issue:43540|issue:45381", "issue:43577|issue:43818", @@ -4177,228 +4068,221 @@ "issue:43782|issue:44977", "issue:43644|issue:43749", "issue:44448|issue:44671", - "issue:44561|issue:45020", "issue:43653|issue:43819", - "issue:44821|issue:45084", + "issue:44561|issue:45020", "issue:43825|issue:43994", "issue:43550|issue:43819", "issue:44186|issue:44442", "issue:43825|issue:43827" ], - "prepared_review_unit_hash": "515277295d884e5480577cf5ebdcb072b7387fe5fc9e40c8493eca339169b9ee", + "prepared_review_unit_hash": "38fd18ffe6a92ad3f54bcb445cdaddbf0df954c5e8a3836077129f979c5a7b47", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 36278, - "estimated_input_tokens": 9070, - "estimated_eval_tokens": 18396 + "serialized_chars": 36314, + "estimated_input_tokens": 9079, + "estimated_eval_tokens": 18414 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The set is thematically related to Transformers regressions, but the proposed pairs are mostly different bugs across different models/subsystems. I would not merge any of the soft-edge candidates as duplicates.", - "confidence": 0.94, - "canonical_issue_reason": "issue:45020 is the broadest representative of the recent v5 / `remote_code` regression class and is the best umbrella-style issue in this set, though the cluster is still fairly heterogeneous.", + "summary": "The cluster is very heterogeneous; most items are unrelated model-specific bugs. No soft pair looks like the same concrete underlying bug/change, so all soft edges should be rejected. The broadest and most representative issue is the open remote_code regression report.", + "confidence": 0.49, + "canonical_issue_reason": "issue:45020 is the broadest, most central report in the set: it describes recent-version breakage across models using `remote_code`, which matches the general loading/regression theme better than the narrower single-model reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 is the strongest issue to anchor triage because it is open, broad, and captures the recurring recent-version breakage theme better than the narrower model-specific reports.", + "best_issue_reason": "issue:45020 is the best global representative because it is current, open, and describes an umbrella regression affecting multiple remote-code models rather than one narrow model-specific failure.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43540", "right": "issue:44008", "accept": false, - "reason": "Different models and failure modes: Qwen3OmniMoe video input processing vs Gemma 3n variable-name collision." + "reason": "Different models and failures: Qwen3OmniMoe video-input processing vs Gemma3n variable-name collision in forward()." }, { "left": "issue:43746", "right": "issue:43873", "accept": false, - "reason": "One is PEFT/local checkpoint loading for GraniteSpeech; the other is quantization/offloading behavior." + "reason": "Both touch loading/offloading, but one is PEFT local checkpoint loading and the other is quantization offloading behavior; not the same bug." }, { "left": "issue:40444", "right": "issue:43746", "accept": false, - "reason": "IterableDataset multi-image finetuning failure is unrelated to GraniteSpeech local checkpoint loading." + "reason": "IterableDataset multi-image finetuning failure is unrelated to GraniteSpeech PEFT checkpoint loading." }, { "left": "issue:43618", "right": "issue:43697", "accept": false, - "reason": "CLIP attentions regression and RTDetrV2 output drift are different model-specific issues." + "reason": "CLIP output attentions assignment and RTDetr output drift are different code paths and symptoms." }, { - "left": "issue:43716", - "right": "issue:44008", + "left": "issue:44220", + "right": "issue:44661", "accept": false, - "reason": "Image preprocessor/model dtype mismatch is unrelated to the Gemma 3n forward-pass attribute collision." + "reason": "Audio fbank extraction issue is unrelated to tokenizer mapping / add-new-model-like failure." }, { - "left": "issue:44843", - "right": "issue:45081", + "left": "issue:43716", + "right": "issue:44008", "accept": false, - "reason": "Both touch Mistral regex handling, but one is offline model_info access and the other is a backend_tokenizer attribute crash." + "reason": "Different modality bugs: image-preprocessor dtype mismatch vs audio/video tensor name collision." }, { "left": "issue:43720", "right": "issue:43819", "accept": false, - "reason": "BitNet packed-weight loading and DAC.from_latents/STE mismatch are unrelated code paths." + "reason": "BitNet packed-weight loading and DAC STE mismatch are unrelated implementation bugs." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Both involve `_patch_mistral_regex`, but one is offline `model_info()` access and the other is a backend_tokenizer attribute crash; distinct failure modes." }, { "left": "issue:43653", "right": "issue:43720", "accept": false, - "reason": "BigBirdTokenizer special-token decode issue is unrelated to BitNet accelerate-loading unpacking." + "reason": "BigBirdTokenizer special-token decode bug is unrelated to BitNet accelerate loading." }, { "left": "issue:43335", "right": "issue:43756", "accept": false, - "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer drop are separate configuration bugs." + "reason": "SwitchTransformers sparse-layer construction and Smollm3 RoPE-layer dropping are different model-specific configuration bugs." }, { "left": "issue:44062", "right": "issue:44488", "accept": false, - "reason": "AddedToken special-argument duplication is not the same bug as loading cjvt/sleng-bert." - }, - { - "left": "issue:30064", - "right": "issue:36010", - "accept": false, - "reason": "Void segmentation-map processing and a GenerationMixin import error are completely different failures." + "reason": "AddedToken constructor conflict is not the same as a model failing to load a specific checkpoint." }, { "left": "issue:43421", "right": "issue:44811", "accept": false, - "reason": "Runtime special-token post-processor updates and Whisper batch_decode skip_special_tokens behavior are different tokenizer issues." + "reason": "Runtime post-processor update for special tokens is different from Whisper batch_decode skipping special tokens." }, { "left": "issue:44186", "right": "issue:44898", "accept": false, - "reason": "LayoutLMv2 NER/padding crashes and Perceiver non-default-resolution failures are unrelated." + "reason": "LayoutLMv2 tokenizer padding/NER crash is unrelated to Perceiver non-default resolution inference." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Image processor segmentation-map handling is unrelated to GenerationMixin import failure." }, { "left": "issue:43550", "right": "issue:45072", "accept": false, - "reason": "torch.compile/SDPA breakage in Bamba is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "Bamba torch.compile+SDPA failure and dtype mismatches in other models are different regressions." }, { "left": "issue:43540", "right": "issue:45381", "accept": false, - "reason": "Both involve video inputs, but they affect different Qwen model families and likely different position/masking logic." + "reason": "Qwen3OmniMoe video-input failure is unrelated to Qwen2.5-VL video position id issues." }, { "left": "issue:43577", "right": "issue:43818", "accept": false, - "reason": "BLIP2 dtype-loading bug is unrelated to Video-LLaVA missing temporal attention and weight-sharing concerns." + "reason": "BLIP2 dtype persistence and Video-LLaVA temporal-attention/weight sharing are unrelated." }, { "left": "issue:43550", "right": "issue:43927", "accept": false, - "reason": "torch.compile/SDPA failure and DiaConfig save/load token-ID loss are unrelated bugs." + "reason": "torch.compile+SDPA failure is unrelated to DiaConfig save/load token-id loss." }, { "left": "issue:43643", "right": "issue:44877", "accept": false, - "reason": "trust_remote_code missing fields and strict config blocking granite_speech config are different config-loading problems." + "reason": "Both involve config loading, but one is missing fields from `trust_remote_code` and the other is strict config blocking granite_speech; not the same defect." }, { "left": "issue:43577", "right": "issue:43582", "accept": false, - "reason": "BLIP2 dtype propagation and Apple Silicon caching_allocator_warmup TypeError do not share a code path." + "reason": "BLIP2 dtype mismatch and Apple Silicon caching allocator TypeError are unrelated." }, { "left": "issue:43531", "right": "issue:43643", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior and trust_remote_code config field loss are unrelated." + "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to missing fields in AutoConfig with `trust_remote_code`." }, { "left": "issue:43782", "right": "issue:44977", "accept": false, - "reason": "Both are Qwen-family issues, but one is a from_pretrained weight_only error and the other is flash-attention generation behavior." + "reason": "Qwen3VL weight_only loading error is different from Qwen3.5 flash-attention generation behavior." }, { "left": "issue:43644", "right": "issue:43749", "accept": false, - "reason": "Non-persistent buffer initialization and FSDP CPU RAM efficient loading are different loading regressions." + "reason": "Non-persistent buffer junk in v5 and FSDP CPU RAM efficient loading are different regressions." }, { "left": "issue:44448", "right": "issue:44671", "accept": false, - "reason": "Different model regressions: Pegasus output drift vs CamemBERT masked-LM prediction errors." - }, - { - "left": "issue:44561", - "right": "issue:45020", - "accept": false, - "reason": "Both concern recent-version remote_code breakage, but they are not the same concrete failure; one is FX availability, the other is broader model loading regressions." + "reason": "Both are output regressions in v5, but they affect different models and code paths." }, { "left": "issue:43653", "right": "issue:43819", "accept": false, - "reason": "Tokenizer special-token registration and DAC latent conversion are unrelated." + "reason": "Tokenizer special-token registration bug is unrelated to DAC latent reconstruction mismatch." }, { - "left": "issue:44821", - "right": "issue:45084", + "left": "issue:44561", + "right": "issue:45020", "accept": false, - "reason": "AutoImageProcessor URL loading and template-node compilation errors are different subsystems entirely." + "reason": "44561 is one specific remote_code breakage cause, while 45020 is a broad umbrella report; not the same concrete bug." }, { "left": "issue:43825", "right": "issue:43994", "accept": false, - "reason": "pipeline translation messaging and SigLIP2 output quality are not the same bug." + "reason": "Pipeline error-message wording is unrelated to SigLIP2 nonsensical outputs." }, { "left": "issue:43550", "right": "issue:43819", "accept": false, - "reason": "Bamba torch.compile SDPA failure is unrelated to DAC.from_latents/forward mismatch." + "reason": "SDPA/torch.compile failure and DAC from_latents mismatch are unrelated model implementations." }, { "left": "issue:44186", "right": "issue:44442", "accept": false, - "reason": "LayoutLMv2 tokenizer crashes and AutoTokenizer loading FastSpeech2ConformerTokenizer are separate tokenizer issues." - }, - { - "left": "issue:43825", - "right": "issue:43827", - "accept": false, - "reason": "One is an incorrect pipeline error message; the other is outdated docs still referencing pipeline()." + "reason": "LayoutLMv2 tokenizer crashes and FastSpeech2Conformer tokenizer loading failure are different tokenizer bugs." }, { "left": "issue:43825", "right": "issue:43827", "accept": false, - "reason": "One is an incorrect pipeline error message; the other is outdated docs still referencing pipeline()." + "reason": "One is an incorrect pipeline error message; the other is stale docs referencing removed pipelines. Related area, but not the same bug." } ] }, @@ -4420,7 +4304,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:30064", "issue:34634", @@ -4454,248 +4338,244 @@ "issue:44509", "issue:44554", "issue:44568", - "issue:44589", "issue:44610", "issue:44671", "issue:44805", "issue:44811", + "issue:44821", "issue:44857", "issue:44912", "issue:44936", "issue:44991", - "issue:45042", "issue:45072", "issue:45081", + "issue:45084", "issue:45325", "issue:45406", - "issue:45488" + "issue:45488", + "issue:45538" ], "soft_pairs": [ + "issue:44821|issue:45084", "issue:43650|issue:43906", "issue:41720|issue:44991", - "issue:30064|issue:36331", "issue:44805|issue:44936", "issue:45081|issue:45406", - "issue:42175|issue:45081", "issue:43606|issue:45072", + "issue:30064|issue:36331", + "issue:42175|issue:45081", "issue:44610|issue:45072", "issue:43782|issue:45325", "issue:44485|issue:44509", "issue:34634|issue:36010", - "issue:44060|issue:44368", "issue:43720|issue:45072", + "issue:44060|issue:44368", "issue:43262|issue:43819", "issue:43582|issue:44857", "issue:43749|issue:44291", "issue:43653|issue:44671", "issue:43526|issue:43697", "issue:43526|issue:44554", + "issue:45084|issue:45538", "issue:43653|issue:44912", "issue:43454|issue:43577", "issue:44568|issue:45488", "issue:34634|issue:36331", "issue:43452|issue:44991", "issue:43638|issue:44936", - "issue:43746|issue:44811", "issue:43257|issue:43329", - "issue:44589|issue:45042" + "issue:43746|issue:44811" ], - "prepared_review_unit_hash": "d6f79a5577496fc46241c45f51f0aacbd7b4d0988acc84ebff4fb434f2f5dd39", + "prepared_review_unit_hash": "2fa2ced6f8b03f58f5db9f2131400b92a7e881cdd9be57f2f98359e9e132ded2", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 28, - "serialized_chars": 36084, - "estimated_input_tokens": 9021, - "estimated_eval_tokens": 18298 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36899, + "estimated_input_tokens": 9225, + "estimated_eval_tokens": 18706 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This looks like a false-positive cluster: the issues span unrelated bugs in tokenizers, trainers, quantization, multimodal processors, loading, and docs. I don\u2019t see any pair that clearly describes the same underlying fixable bug, so no soft edge should be accepted.", - "confidence": 0.96, - "canonical_issue_reason": "No true canonical duplicate stands out; if one issue must anchor the cluster, issue 45081 is the best representative because it is a concrete runtime crash with multiple inbound references and a specific code path.", + "summary": "These items are a heterogeneous mix of unrelated bug reports; none of the soft-similarity pairs look like true duplicates or the same fixable code-path change. I would not merge any pair.", + "confidence": 0.97, + "canonical_issue_reason": "No single true canonical duplicate stands out because the cluster is heterogeneous. If one issue must be chosen as the representative anchor, issue #45072 is a concrete, well-scoped loading/inference bug report rather than a vague or documentation-only report.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45081 is the strongest standalone candidate to keep because it is specific, reproducible, and currently active, but it is not actually a duplicate of the others.", + "best_issue_reason": "Issue #45072 is the best representative issue in this set because it is a specific, reproducible runtime bug with a clear failure mode and actionable description; several other items are similarly about model loading/inference, but none are close enough to be duplicates.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "Different failures: loading AutoImageProcessor from URL versus a Jinja/template compilation TypeError. No shared code-path." + }, { "left": "issue:43650", "right": "issue:43906", "accept": false, - "reason": "Unrelated: one is a generic data request, the other is an isolated reproduction of a separate upstream bug." + "reason": "Unrelated titles and problems; one is a filler issue, the other is a reproduced bug report." }, { "left": "issue:41720", "right": "issue:44991", "accept": false, - "reason": "Different subsystems and failures: device-mapping CUDA assert vs tokenizer loading regression." - }, - { - "left": "issue:30064", - "right": "issue:36331", - "accept": false, - "reason": "Image processor segmentation-map handling is unrelated to CustomTrainer loss signature changes." + "reason": "Different models and subsystems: Qwen3 device-mapping CUDA assert versus tokenizer loading for EMBEDDIA/est-roberta." }, { "left": "issue:44805", "right": "issue:44936", "accept": false, - "reason": "Mask shape mismatch during indexing is a different failure from trainer.evaluate() breaking after train()." + "reason": "Mask/index shape error in model forward path versus trainer evaluate failing after train; not the same bug." }, { "left": "issue:45081", "right": "issue:45406", "accept": false, - "reason": "Both are processor/tokenizer crashes, but they affect different models and different missing attributes/code paths." + "reason": "Both are loading-time attribute errors, but they hit different objects and code paths (_patch_mistral_regex/tokenizer backend vs Gemma4Processor _tokenizer)." }, { - "left": "issue:42175", - "right": "issue:45081", + "left": "issue:43606", + "right": "issue:45072", "accept": false, - "reason": "Packaging/backend dependency issue is unrelated to a Mistral tokenizer patch crash." + "reason": "Both involve inference/loading mismatches, but the concrete failures are different: CPU offload device mismatch versus bfloat16 dtype mismatches." }, { - "left": "issue:43606", - "right": "issue:45072", + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Different components and symptoms: image processor segmentation maps versus custom trainer loss signature." + }, + { + "left": "issue:42175", + "right": "issue:45081", "accept": false, - "reason": "CPU offload device mismatch in Bark is not the same bug as bfloat16 dtype mismatches in other models." + "reason": "Package install/backend dependency issue versus tokenizer regex patch crash; unrelated." }, { "left": "issue:44610", "right": "issue:45072", "accept": false, - "reason": "Processor output resolution mismatch is a different concrete problem than dtype inconsistency." + "reason": "Processor output size mismatch versus dtype mismatch in inference; not the same underlying bug." }, { "left": "issue:43782", "right": "issue:45325", "accept": false, - "reason": "Qwen3VL load-time weight_only failure and Qwen2.5-VL rope-index scaling are different model-specific issues." + "reason": "Different Qwen VL bugs: weight_only load error versus RoPE position-id scaling regression." }, { "left": "issue:44485", "right": "issue:44509", "accept": false, - "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still mentioning removed pipeline tasks." + "reason": "One is a model implementation discussion, the other is a docs cleanup issue; no duplicate relation." }, { "left": "issue:34634", "right": "issue:36010", "accept": false, - "reason": "BarkProcessor voice_preset failure is unrelated to the GenerationMixin import error." + "reason": "Different processors and failures: Bark voice_preset versus GenerationMixin import error." }, { - "left": "issue:44060", - "right": "issue:44368", + "left": "issue:43720", + "right": "issue:45072", "accept": false, - "reason": "Both mention tied-weights warnings, but one is a wrong weight tie and the other is a config warning during LoRA fine-tuning." + "reason": "Accelerate loading of packed weights versus bfloat16 inference dtype mismatch; related theme but different concrete bugs." }, { - "left": "issue:43720", - "right": "issue:45072", + "left": "issue:44060", + "right": "issue:44368", "accept": false, - "reason": "Packed-weight loading in BitNet is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "Both mention tied-weights warnings, but one reports an incorrect tie to a bias tensor while the other reports a warning emitted during LoRA fine-tuning." }, { "left": "issue:43262", "right": "issue:43819", "accept": false, - "reason": "Audio chat-template sampling-rate defaulting is unrelated to DAC latent reconstruction mismatch." + "reason": "Audio chat-template sampling-rate default bug versus DAC latent/forward mismatch; unrelated." }, { "left": "issue:43582", "right": "issue:44857", "accept": false, - "reason": "Apple Silicon allocator warmup TypeError and CUDA AMP loss crash are different platform-specific bugs." + "reason": "Apple Silicon TypeError in allocator warmup versus CUDA AMP crash in a loss function; different platforms and code paths." }, { "left": "issue:43749", "right": "issue:44291", "accept": false, - "reason": "Both concern loading, but FSDP RAM-efficient loading and init_empty_weights/_is_hf_initialized are distinct code paths and failures." + "reason": "Both are loading-related, but FSDP CPU RAM efficient loading and init_empty_weights unexpected argument are distinct compatibility issues." }, { "left": "issue:43653", "right": "issue:44671", "accept": false, - "reason": "BigBird mask-token decoding and CamemBERT masked-LM prediction regressions are different tokenizer/model issues." + "reason": "Tokenizer special-token registration bug versus masked LM prediction regression; not the same issue." }, { "left": "issue:43526", "right": "issue:43697", "accept": false, - "reason": "BEiT label reduction bug is unrelated to RTDetrV2 output differences." + "reason": "Processor label reduction bug versus RTDetr output divergence in v5; different layers of the stack." }, { "left": "issue:43526", "right": "issue:44554", "accept": false, - "reason": "Image label reduction bug and MPS attention correctness issue do not share the same code path." + "reason": "Vision processor label handling versus MPS attention correctness issue; unrelated." + }, + { + "left": "issue:45084", + "right": "issue:45538", + "accept": false, + "reason": "Template-node compilation error versus CLIPTokenizer max-length constant bug; unrelated." }, { "left": "issue:43653", "right": "issue:44912", "accept": false, - "reason": "Tokenizer special-token registration issue is unrelated to MXFP4 quantization loading behavior." + "reason": "Tokenizer special token decoding issue versus MXFP4 quantization fallback; different subsystems." }, { "left": "issue:43454", "right": "issue:43577", "accept": false, - "reason": "AyaVision lm_head tying failure and BLIP2 dtype propagation are separate model-loading bugs." + "reason": "Weight tying bug in AyaVision versus dtype propagation bug in Blip2; different fixes." }, { "left": "issue:44568", "right": "issue:45488", "accept": false, - "reason": "Different tokenizer regressions affecting different models and tokenization behaviors; not the same bug." + "reason": "Both are tokenizer regressions, but they affect different tokenizer implementations and failure modes." }, { "left": "issue:34634", "right": "issue:36331", "accept": false, - "reason": "Bark voice_preset and CustomTrainer compute_loss signature are entirely unrelated." + "reason": "BarkProcessor voice preset issue is unrelated to CustomTrainer.compute_loss signature incompatibility." }, { "left": "issue:43452", "right": "issue:44991", "accept": false, - "reason": "gguf_file support/loading regression is not the same as the EMBEDDIA tokenizer-loading failure." + "reason": "gguf_file/from_pretrained handling versus a specific tokenizer loading regression; not the same code-path." }, { "left": "issue:43638", "right": "issue:44936", "accept": false, - "reason": "DeepSpeed ZeRO3/BERT index error is unrelated to evaluate-after-train behavior." - }, - { - "left": "issue:43746", - "right": "issue:44811", - "accept": false, - "reason": "PEFT checkpoint loading for GraniteSpeech is unrelated to Whisper batch_decode skip_special_tokens handling." - }, - { - "left": "issue:43257", - "right": "issue:43329", - "accept": false, - "reason": "Qwen3 MOE weight conversion with accelerate+deepspeed is unrelated to missing video-branch helpers in multimodal token counting." - }, - { - "left": "issue:44589", - "right": "issue:45042", - "accept": false, - "reason": "Float8 storage lookup failure and PIL backend torchvision dependency regression are unrelated." + "reason": "DeepSpeed ZeRO-3 empty-index error versus trainer.evaluate failing after train; different behaviors and likely causes." } ] }, @@ -4717,10 +4597,8 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ - "issue:33357", - "issue:34567", "issue:36010", "issue:36331", "issue:38175", @@ -4730,6 +4608,7 @@ "issue:42915", "issue:43257", "issue:43262", + "issue:43317", "issue:43329", "issue:43450", "issue:43452", @@ -4749,282 +4628,279 @@ "issue:43854", "issue:43927", "issue:43931", - "issue:44062", "issue:44291", "issue:44479", "issue:44560", "issue:44568", + "issue:44589", "issue:44610", "issue:44756", "issue:44811", - "issue:44821", "issue:44912", "issue:44938", "issue:44977", - "issue:44991", + "issue:44987", "issue:45042", "issue:45072", "issue:45356", "issue:45357", - "issue:45381" + "issue:45381", + "issue:45538" ], "soft_pairs": [ "issue:43854|issue:44977", "issue:38175|issue:44610", - "issue:36010|issue:43645", - "issue:43257|issue:43696", + "issue:44589|issue:45042", "issue:39692|issue:44912", + "issue:43257|issue:43696", "issue:43819|issue:43927", "issue:43262|issue:44811", + "issue:36010|issue:43645", "issue:43720|issue:44610", "issue:43550|issue:44912", "issue:44568|issue:45356", - "issue:42175|issue:44938", "issue:43749|issue:44756", - "issue:43531|issue:45042", "issue:43329|issue:44479", - "issue:36010|issue:36331", + "issue:43531|issue:45042", "issue:43257|issue:43931", + "issue:36010|issue:36331", "issue:43329|issue:43450", "issue:43257|issue:43756", "issue:43577|issue:45072", "issue:43452|issue:43582", "issue:43697|issue:45357", "issue:41720|issue:43540", + "issue:42175|issue:44938", "issue:44479|issue:45381", - "issue:44291|issue:45042", - "issue:42915|issue:43742", - "issue:43540|issue:44560", - "issue:44062|issue:44991", + "issue:44987|issue:45538", "issue:43927|issue:44912", + "issue:43540|issue:44560", + "issue:43317|issue:45538", + "issue:42915|issue:43742", "issue:43540|issue:44479", - "issue:42915|issue:44821", - "issue:33357|issue:34567" + "issue:44291|issue:45042" ], - "prepared_review_unit_hash": "f3a12a7d78a8ae62bb07c3684fffa92707181e71abd55ece3cd591c1ac64eccc", + "prepared_review_unit_hash": "39fe55d49ec512f122f63e69482b751bcdf4bda86d08e1c76a19ef63b3beae1f", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, + "node_count": 47, + "item_count": 47, "soft_pair_count": 31, - "serialized_chars": 37095, - "estimated_input_tokens": 9274, - "estimated_eval_tokens": 18804 + "serialized_chars": 36508, + "estimated_input_tokens": 9127, + "estimated_eval_tokens": 18510 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated issue reports with a small, coherent Qwen video-input regression subgroup. One soft edge looks like the same underlying regression; the rest are distinct model-specific bugs or unrelated failures.", + "summary": "The cluster is heterogeneous: the soft-similarity pairs mostly connect unrelated bugs across loading, quantization, video/multimodal processing, tokenizer behavior, and environment issues. I reject all soft edges and treat this as a broad mixed cluster rather than a duplicate set. If a single issue must anchor the cluster, 43749 is the strongest representative.", "confidence": 0.79, - "canonical_issue_reason": "Issue 44479 is the broadest umbrella for the only clearly recurring theme here: Qwen video-input regressions in v5.3.0 across several model families.", + "canonical_issue_reason": "No true duplicate canonical spans this cluster; if one representative is needed, issue 43749 is the best anchor because it has the highest activity/inbound references and describes a broad loading/regression problem rather than a narrow model-specific symptom.", "canonical_pr_reason": null, - "best_issue_reason": "44479 is the most representative user-facing report in the cluster, with the widest scope and the clearest regression framing among the related video issues.", + "best_issue_reason": "Issue 43749 is the best cluster representative: high discussion/inbound references and a broad infrastructure loading failure make it a stronger triage anchor than the many isolated model- or feature-specific bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43854", "right": "issue:44977", "accept": false, - "reason": "Different models and different failures: GLM load/test issue vs Qwen3.5 flash-attention generation behavior." + "reason": "Different failures: GLM-4.7 load/test problem vs Qwen3.5 flash-attention generation regression." }, { "left": "issue:38175", "right": "issue:44610", "accept": false, - "reason": "SigLIP2 zero-probabilities and OmDet-Turbo processor size mismatch are unrelated bug types." + "reason": "Unrelated symptoms: zero probabilities in SigLIP2 vs processor/model input-size mismatch in OmDet-Turbo." }, { - "left": "issue:36010", - "right": "issue:43645", + "left": "issue:44589", + "right": "issue:45042", "accept": false, - "reason": "Importing GenerationMixin and Jupyter custom model init are separate compatibility problems." + "reason": "Different layers: Float8 storage/type loading error vs PIL image processor incorrectly requiring torchvision." }, { - "left": "issue:43257", - "right": "issue:43696", + "left": "issue:39692", + "right": "issue:44912", "accept": false, - "reason": "Qwen3 MoE weight conversion with accelerate/deepspeed is unrelated to GPT-oss CUDA OOM." + "reason": "Docs/example mistakes for SigLIP2 vs MXFP4 quantization fallback for git-oss-20b." }, { - "left": "issue:39692", - "right": "issue:44912", + "left": "issue:43257", + "right": "issue:43696", "accept": false, - "reason": "SigLIP2 docs/model mismatch vs git-oss MXFP4 loading fallback are different issues." + "reason": "Different problems: Qwen3 MoE loading with accelerate+deepspeed vs plain CUDA out-of-memory on GPT-oss-20b." }, { "left": "issue:43819", "right": "issue:43927", "accept": false, - "reason": "DAC latent/forward mismatch and DiaConfig save/load token ID loss are unrelated." + "reason": "DAC forward/STE mismatch is unrelated to DiaConfig save/load token ID corruption." }, { "left": "issue:43262", "right": "issue:44811", "accept": false, - "reason": "Audio chat-template sampling-rate default and Whisper batch_decode special-token handling are different code paths." + "reason": "Audio chat-template sampling-rate default bug is unrelated to Whisper batch_decode skip_special_tokens handling." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "Importing GenerationMixin and Jupyter custom-model initialization are separate breakages." }, { "left": "issue:43720", "right": "issue:44610", "accept": false, - "reason": "BitNet accelerate-loading unpacking bug and OmDet-Turbo image size bug do not match." + "reason": "Packed-weight unpacking during accelerate loading is unrelated to OmDet processor output-shape mismatch." }, { "left": "issue:43550", "right": "issue:44912", "accept": false, - "reason": "Bamba torch.compile/SDPA failure and git-oss MXFP4 quantization fallback are unrelated." + "reason": "torch.compile/SDPA failure in Bamba is not the same as MXFP4 loading fallback in git-oss-20b." }, { "left": "issue:44568", "right": "issue:45356", "accept": false, - "reason": "Both are tokenizer problems, but they affect different models and different tokenizer behavior." + "reason": "Tokenizer add_special_tokens regression is distinct from Kimi-K2.5 codec/warning behavior." }, { - "left": "issue:42175", - "right": "issue:44938", + "left": "issue:43749", + "right": "issue:44756", "accept": false, - "reason": "Missing TensorFlow in extras install and Python 3.14 import failure are unrelated packaging/runtime issues." + "reason": "Both involve loading/performance, but one is FSDP CPU RAM efficient loading and the other is mmap on Strix Halo OOM; not the same bug." }, { - "left": "issue:43749", - "right": "issue:44756", + "left": "issue:43329", + "right": "issue:44479", "accept": false, - "reason": "FSDP CPU RAM efficient loading and Strix Halo mmap OOM are separate memory/loading problems." + "reason": "Undefined video-branch helper variables are a code bug; the other is a broader v5.3 video regression across Qwen models." }, { "left": "issue:43531", "right": "issue:45042", "accept": false, - "reason": "Qwen3-MoE sliding_window and PIL backend torchvision dependency regression are unrelated." + "reason": "Qwen3-MoE sliding_window behavior has no overlap with PIL backend torchvision dependency regression." }, { - "left": "issue:43329", - "right": "issue:44479", + "left": "issue:43257", + "right": "issue:43931", "accept": false, - "reason": "Both involve video paths, but one is a specific undefined-variable bug in token counting while the other is a broader Qwen video regression; too speculative to merge." + "reason": "MoE weights not converted under accelerate+deepspeed is different from Qwen3-VL-30B shape-mismatch loading." }, { "left": "issue:36010", "right": "issue:36331", "accept": false, - "reason": "GenerationMixin import failure and compute_loss signature breakage are unrelated API regressions." - }, - { - "left": "issue:43257", - "right": "issue:43931", - "accept": false, - "reason": "Qwen3 MOE weight conversion and Qwen3-VL weight-shape mismatch are different loading bugs." + "reason": "GenerationMixin import path breakage is unrelated to CustomTrainer.compute_loss signature mismatch." }, { "left": "issue:43329", "right": "issue:43450", "accept": false, - "reason": "Undefined variables in multimodal token counting vs batched video processor shape mismatch are not the same bug." + "reason": "Undefined video token-count helpers differ from batched video processor output-shape issues." }, { "left": "issue:43257", "right": "issue:43756", "accept": false, - "reason": "Qwen3 MOE weight conversion and Smollm3 RoPE-layer drop are unrelated model-specific issues." + "reason": "Qwen3 MoE loading/conversion failure is unrelated to Smollm3 RoPE-layer drop." }, { "left": "issue:43577", "right": "issue:45072", "accept": false, - "reason": "BLIP2 dtype propagation and SwitchTransformers/TimmWrapper bfloat16 dtype mismatches are distinct." + "reason": "Blip2 dtype staying float32 is a different dtype bug than SwitchTransformers/TimmWrapperModel bfloat16 mismatches." }, { "left": "issue:43452", "right": "issue:43582", "accept": false, - "reason": "Tokenizer/gguf_file loading and Apple Silicon warmup TypeError are unrelated." + "reason": "gguf_file tokenizer/model loading failure is unrelated to Apple Silicon caching_allocator_warmup TypeError." }, { "left": "issue:43697", "right": "issue:45357", "accept": false, - "reason": "RTDetrV2 output drift and Qwen3.5 visual encoder save_pretrained regression are unrelated." + "reason": "RTDetrV2 output drift is not the same as Qwen3.5 save_pretrained visual-encoder key regression." }, { "left": "issue:41720", "right": "issue:43540", "accept": false, - "reason": "Qwen3 auto device mapping cudaErrorAssert and Qwen3OmniMoe video ValueError are different failure modes." + "reason": "Qwen3 auto device-map CUDA assert on A800 and Qwen3OmniMoe video ValueError are different failures." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "Missing TensorFlow in extras install and Python 3.14 import failure are unrelated environment/package issues." }, { "left": "issue:44479", "right": "issue:45381", - "accept": true, - "reason": "Both describe the same v5.3.0 Qwen video-input regression family; 45381 is a narrower symptom report about wrong vision_position_ids." + "accept": false, + "reason": "The former is a broad video-input regression across several Qwen models; the latter is a specific vision_position_ids bug in qwen2.5-vl." }, { - "left": "issue:44291", - "right": "issue:45042", + "left": "issue:44987", + "right": "issue:45538", "accept": false, - "reason": "init_empty_weights/_is_hf_initialized TypeError and PIL backend torchvision dependency regression are unrelated." + "reason": "Loading physical-intelligence/fast on transformers>=5.1.0 and CLIPTokenizer model_max_length are unrelated." }, { - "left": "issue:42915", - "right": "issue:43742", + "left": "issue:43927", + "right": "issue:44912", "accept": false, - "reason": "Qwen3Moe FineGrainedFP8Config failure and MobileLLM key error are separate loading issues." + "reason": "DiaConfig token-ID persistence and MXFP4 quantization fallback affect different code paths and model families." }, { "left": "issue:43540", "right": "issue:44560", "accept": false, - "reason": "Both are video-input bugs, but on different Qwen model families with different symptoms; not enough to treat as one fix." + "reason": "Qwen3OmniMoe video processing ValueError is distinct from Qwen3-vl-embedding StopIteration." }, { - "left": "issue:44062", - "right": "issue:44991", + "left": "issue:43317", + "right": "issue:45538", "accept": false, - "reason": "AddedToken duplicate special flag and tokenizer loading failure for est-roberta are different tokenizer bugs." + "reason": "dequantized model offload/device_map=auto failure is unrelated to CLIPTokenizer max-length handling." }, { - "left": "issue:43927", - "right": "issue:44912", + "left": "issue:42915", + "right": "issue:43742", "accept": false, - "reason": "DiaConfig token ID persistence and git-oss MXFP4 loading fallback are unrelated." + "reason": "Qwen3Moe FineGrainedFP8Config failure and MobileLLM key error are different model-loading bugs." }, { "left": "issue:43540", "right": "issue:44479", "accept": false, - "reason": "Related video-input area, but Qwen3OmniMoe ValueError is not the same concrete regression as the broader Qwen2.5/Qwen3 video-input issue." - }, - { - "left": "issue:42915", - "right": "issue:44821", - "accept": false, - "reason": "Qwen3Moe FP8 loading failure and AutoImageProcessor-from-URL failure are unrelated." + "reason": "A specific Qwen3OmniMoe video ValueError is not the same as the broader v5.3 video regression across multiple Qwen models." }, { - "left": "issue:33357", - "right": "issue:34567", + "left": "issue:44291", + "right": "issue:45042", "accept": false, - "reason": "MacOS bus error on CLIP model and TrainerState token counting not updating are entirely unrelated." + "reason": "init_empty_weights unexpected argument error is unrelated to the torchvision requirement regression in image processors." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The clustering is grounded in the packet: the broad Qwen video-input regression issue is a reasonable canonical choice, and the accepted soft edge (44479 \u2194 45381) looks like the same bug family. The remaining verdicts are conservative and the summary does not overclaim beyond the evidence." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -5039,26 +4915,26 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:28282", "issue:29127", + "issue:33357", "issue:34567", "issue:34689", "issue:36010", "issue:36331", "issue:38175", "issue:42175", + "issue:42915", "issue:43452", "issue:43493", "issue:43526", - "issue:43531", "issue:43577", "issue:43582", "issue:43643", "issue:43646", "issue:43688", - "issue:43697", "issue:43742", "issue:43749", "issue:43756", @@ -5076,282 +4952,283 @@ "issue:44704", "issue:44792", "issue:44821", - "issue:44843", "issue:44898", "issue:44964", + "issue:44991", "issue:45042", "issue:45081", "issue:45137", - "issue:45216", "issue:45290", "issue:45325", "issue:45375", "issue:45381", "issue:45405", - "issue:45479" + "issue:45479", + "issue:45538" ], "soft_pairs": [ - "issue:42175|issue:43824", - "issue:42175|issue:44488", + "issue:33357|issue:34567", "issue:45137|issue:45405", + "issue:44062|issue:44991", "issue:43906|issue:45381", + "issue:42915|issue:45538", + "issue:42915|issue:44821", + "issue:42175|issue:43824", + "issue:42175|issue:44488", "issue:43526|issue:45479", "issue:38175|issue:44898", "issue:43582|issue:43782", "issue:28282|issue:34689", - "issue:44484|issue:44704", - "issue:36010|issue:42175", - "issue:43742|issue:44291", "issue:43646|issue:43749", "issue:43493|issue:43994", - "issue:28282|issue:29127", + "issue:43742|issue:44291", + "issue:36010|issue:42175", "issue:43643|issue:45375", + "issue:28282|issue:29127", "issue:43756|issue:45325", + "issue:44484|issue:44704", + "issue:34567|issue:36331", "issue:44062|issue:45081", - "issue:44291|issue:44821", "issue:43452|issue:44488", - "issue:34567|issue:36331", + "issue:44291|issue:44821", "issue:44792|issue:44964", "issue:43688|issue:43749", - "issue:43742|issue:44589", "issue:44514|issue:45290", "issue:43452|issue:43577", + "issue:43742|issue:44589", "issue:43643|issue:44561", - "issue:45042|issue:45081", - "issue:43531|issue:44821", "issue:44514|issue:45381", - "issue:43697|issue:45216", - "issue:43452|issue:44843" + "issue:45042|issue:45081" ], - "prepared_review_unit_hash": "62709e333fcbaea1f749a763216799943edcb6c7872eacbeb4e89862cb091ab4", + "prepared_review_unit_hash": "beb48c37b6c02c005b90f9cdecc8251348a8ff7d32cb9792226d58c9399aa3b2", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 37300, - "estimated_input_tokens": 9325, - "estimated_eval_tokens": 18906 + "soft_pair_count": 32, + "serialized_chars": 37451, + "estimated_input_tokens": 9363, + "estimated_eval_tokens": 18982 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly heterogeneous false-positive similarity matches. The only pair that looks like the same underlying bug is the two SigLIP2 reports, both about incorrect outputs for `google/siglip2-base-patch16-224` / HF-vs-JAX behavior.", - "confidence": 0.64, - "canonical_issue_reason": "`issue:43493` is the best canonical issue candidate for the one likely duplicate pair because it points to the underlying SigLIP2 implementation discrepancy, while `issue:43994` reads more like the downstream symptom report.", + "summary": "Most items are unrelated issue reports, mostly around distinct model-loading, tokenizer, or backend regressions. The only strong duplicate-looking pair is the Qwen2.5-VL apply_chat_template crash reports (44514/45290); the SigLIP2 pair is a plausible same-bug match but less certain. The rest look like false-positive similarity matches.", + "confidence": 0.66, + "canonical_issue_reason": "Issue 44514 is the better canonical representative for the apply_chat_template crash pair: both 44514 and 45290 describe crashes in the Qwen2.5-VL chat-template/tokenization path, and 44514 is the broader repro while 45290 is a more specific trigger.", "canonical_pr_reason": null, - "best_issue_reason": "`issue:43493` is the strongest issue to keep as the representative report: it is specific, root-cause-oriented, and covers the same SigLIP2 failure mode seen in `issue:43994`.", + "best_issue_reason": "44514 is the clearest issue to keep as canonical because it directly names the shared apply_chat_template crash and has the broader repro description.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:42175", - "right": "issue:43824", + "left": "issue:33357", + "right": "issue:34567", "accept": false, - "reason": "Different problems: TensorFlow backend packaging vs a specific Qwen2.5-VL import error." + "reason": "Different subsystems and failures: a CLIP Mac bus error vs TrainerState token-count tracking." }, { - "left": "issue:42175", - "right": "issue:44488", + "left": "issue:45137", + "right": "issue:45405", "accept": false, - "reason": "Package/backend install issue is unrelated to a model-loading failure for `cjvt/sleng-bert`." + "reason": "DeepSpeed ZeRO3 deque errors and a PEFT version bump are unrelated." }, { - "left": "issue:45137", - "right": "issue:45405", + "left": "issue:44062", + "right": "issue:44991", "accept": false, - "reason": "DeepSpeed ZeRO3 deque error and a PEFT version pin regression are unrelated." + "reason": "Both involve tokenizer/model loading, but the reported failures and likely causes differ." }, { "left": "issue:43906", "right": "issue:45381", "accept": false, - "reason": "Unclear relation: one is an isolated repro of an older issue, the other is a Qwen2.5-VL video position-id bug." + "reason": "Different Qwen2.5-VL regressions affecting different behaviors and code paths." + }, + { + "left": "issue:42915", + "right": "issue:45538", + "accept": false, + "reason": "FP8 MoE loading and CLIP tokenizer max length are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "Different model/load paths; no clear shared underlying bug." + }, + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Backend packaging vs missing model-class import are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "Dependency packaging and a specific model load failure are not the same bug." }, { "left": "issue:43526", "right": "issue:45479", "accept": false, - "reason": "Beit image-label reduction and zero-loss sequence classification are different code paths and symptoms." + "reason": "Beit label reduction and sequence-classification zero loss are different problems." }, { "left": "issue:38175", "right": "issue:44898", "accept": false, - "reason": "Both involve vision models, but the failure modes differ: bad probabilities vs Perceiver resolution handling." + "reason": "Same broad vision area, but different models and failure modes." }, { "left": "issue:43582", "right": "issue:43782", "accept": false, - "reason": "Apple Silicon allocator warmup TypeError is unrelated to Qwen3VL `weight_only=True` loading." + "reason": "Apple Silicon allocator TypeError and Qwen3VL weight_only loading error are unrelated." }, { "left": "issue:28282", "right": "issue:34689", "accept": false, - "reason": "PyTorch-missing ImportError and Llama 3.2 Vision loading breakage are different underlying issues." + "reason": "Missing PyTorch ImportError and Llama 3.2 Vision load breakage are different issues." }, { - "left": "issue:44484", - "right": "issue:44704", + "left": "issue:43646", + "right": "issue:43749", "accept": false, - "reason": "Save shard-size behavior and `AutoProcessor` kwarg forwarding are not the same bug." + "reason": "Custom model initialization breakage and FSDP CPU RAM-efficient loading are separate changes." }, { - "left": "issue:36010", - "right": "issue:42175", - "accept": false, - "reason": "`GenerationMixin` import failure is unrelated to TensorFlow backend installation." + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Both report SigLIP2 correctness regressions on the same checkpoint, so they plausibly describe the same underlying model bug." }, { "left": "issue:43742", "right": "issue:44291", "accept": false, - "reason": "Model key lookup failure and `_is_hf_initialized` argument error are different loading bugs." + "reason": "A MobileLLM key error and an init_empty_weights TypeError are not the same failure." }, { - "left": "issue:43646", - "right": "issue:43749", + "left": "issue:36010", + "right": "issue:42175", "accept": false, - "reason": "Custom model init breakage and FSDP CPU RAM efficient loading are distinct regressions." + "reason": "GenerationMixin import failure and TensorFlow backend packaging are unrelated." }, { - "left": "issue:43493", - "right": "issue:43994", - "accept": true, - "reason": "Both describe the same SigLIP2 problem: `google/siglip2-base-patch16-224` behaves incorrectly in HF compared with the original implementation / expected outputs." + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both mention missing config fields, but the model/config contexts differ too much to treat as one bug." }, { "left": "issue:28282", "right": "issue:29127", "accept": false, - "reason": "Missing PyTorch import error is unrelated to LayoutLMv3 box validation messaging." + "reason": "Missing PyTorch dependency error vs LayoutLMv3 message clarity are unrelated." }, { - "left": "issue:43643", - "right": "issue:45375", + "left": "issue:43756", + "right": "issue:45325", "accept": false, - "reason": "Both mention config-field handling, but they are not clearly the same concrete bug or same failing code path." + "reason": "RoPE layer dropping and Qwen2.5-VL temporal position scaling are distinct model-specific bugs." }, { - "left": "issue:43756", - "right": "issue:45325", + "left": "issue:44484", + "right": "issue:44704", "accept": false, - "reason": "Different models and different RoPE/position-id issues." + "reason": "save_pretrained shard sizing and AutoProcessor kwargs propagation are different code paths." }, { - "left": "issue:44062", - "right": "issue:45081", + "left": "issue:34567", + "right": "issue:36331", "accept": false, - "reason": "Tokenizers `AddedToken` argument conflict is unrelated to `_patch_mistral_regex` backend-tokenizer access." + "reason": "TrainerState token counting and compute_loss signature compatibility are separate Trainer issues." }, { - "left": "issue:44291", - "right": "issue:44821", + "left": "issue:44062", + "right": "issue:45081", "accept": false, - "reason": "`init_empty_weights` loading error and `AutoImageProcessor` URL loading are unrelated." + "reason": "AddedToken special-kw handling and Mistral regex patching are unrelated tokenizer bugs." }, { "left": "issue:43452", "right": "issue:44488", "accept": false, - "reason": "GGUF loading bug and a specific model-load failure are not the same issue." + "reason": "gguf_file loading breakage and a specific sleng-bert load failure are not clearly the same bug." }, { - "left": "issue:34567", - "right": "issue:36331", + "left": "issue:44291", + "right": "issue:44821", "accept": false, - "reason": "Trainer token-count tracking and `compute_loss` signature change are different trainer issues." + "reason": "init_empty_weights TypeError and loading AutoImageProcessor from URL are unrelated." }, { "left": "issue:44792", "right": "issue:44964", "accept": false, - "reason": "Janus test failure and Phi-4 multimodal loading failure are different problems." + "reason": "Janus test failure and Phi-4 multimodal model load failure are different multimodal issues." }, { "left": "issue:43688", "right": "issue:43749", "accept": false, - "reason": "Auxiliary-loss normalization and FSDP CPU RAM loading are unrelated." - }, - { - "left": "issue:43742", - "right": "issue:44589", - "accept": false, - "reason": "Model key error and missing `Float8_e4m3fnStorage` are different failures." + "reason": "Auxiliary-loss normalization and FSDP loading are unrelated." }, { "left": "issue:44514", "right": "issue:45290", - "accept": false, - "reason": "Both are chat-template crashes, but the triggers and likely code paths differ." + "accept": true, + "reason": "Both are apply_chat_template/tokenize crashes in the Qwen2.5-VL path; the triggers differ, but they look close enough to be the same underlying bug." }, { "left": "issue:43452", "right": "issue:43577", "accept": false, - "reason": "GGUF tokenizer/model loading and BLIP2 dtype propagation are unrelated." - }, - { - "left": "issue:43643", - "right": "issue:44561", - "accept": false, - "reason": "Both involve `trust_remote_code`, but one is missing fields in `AutoConfig` and the other is a removed helper breaking remote-code models." - }, - { - "left": "issue:45042", - "right": "issue:45081", - "accept": false, - "reason": "PIL backend torchvision requirement and Mistral regex crash are unrelated." - }, - { - "left": "issue:43531", - "right": "issue:44821", - "accept": false, - "reason": "Qwen3-MoE sliding-window behavior and URL-based image-processor loading are unrelated." + "reason": "gguf_file loading and BLIP-2 dtype handling are different bugs." }, { - "left": "issue:44514", - "right": "issue:45381", + "left": "issue:43742", + "right": "issue:44589", "accept": false, - "reason": "Both are Qwen2.5-VL, but one is chat-template batching and the other is video vision-position-id calculation." + "reason": "Model-load key errors and missing Float8 storage errors are unrelated." }, { - "left": "issue:43697", - "right": "issue:45216", + "left": "issue:43643", + "right": "issue:44561", "accept": false, - "reason": "RTDetrV2 output regression and Qwen3.5 checkpoint save regression are unrelated." + "reason": "Missing config fields and removal of is_torch_fx_available are different breakages." }, { "left": "issue:44514", "right": "issue:45381", "accept": false, - "reason": "Same as above: related model family, but different concrete bugs and code paths." + "reason": "An apply_chat_template crash and a position-id scaling bug are unrelated." }, { - "left": "issue:43452", - "right": "issue:44843", + "left": "issue:45042", + "right": "issue:45081", "accept": false, - "reason": "GGUF loading failure and offline `model_info()` call in Mistral regex patch are different tokenizer/loading bugs." + "reason": "Image-processor torchvision dependency regression and Mistral regex patching are unrelated." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The only accepted soft-edge pair, issue:43493 vs issue:43994, is plausibly the same SigLIP2 failure mode, while the other pair verdicts are appropriately cautious false negatives." + "accept": false, + "feedback": "The writeup overstates the evidence in the packet. In particular, issue 44514 vs 45290 are not shown as exact duplicates by the titles alone: one is about batched input with `padding=False`, the other about assistant tool-call messages with no content. The canonical-issue reason also claims they match \"almost verbatim\" and specifies a concrete shared failure mode that is not grounded in the packet text. The SigLIP2 pair may be plausible, but the overall summary and the accepted soft-edge verdicts should be phrased more conservatively." }, "evaluator_used": true, - "retried": false, + "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -5367,7 +5244,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:24643", "issue:28282", @@ -5383,7 +5260,7 @@ "issue:41762", "issue:43122", "issue:43295", - "issue:43329", + "issue:43452", "issue:43525", "issue:43531", "issue:43577", @@ -5406,6 +5283,7 @@ "issue:44625", "issue:44743", "issue:44811", + "issue:44821", "issue:44843", "issue:44877", "issue:44898", @@ -5413,16 +5291,19 @@ "issue:44977", "issue:44991", "issue:45005", - "issue:45325", + "issue:45216", "issue:45356", "issue:45440" ], "soft_pairs": [ + "issue:43697|issue:45216", + "issue:43531|issue:44821", "issue:44625|issue:44877", + "issue:43452|issue:44843", "issue:43819|issue:44811", "issue:43720|issue:44898", - "issue:43653|issue:44568", "issue:43688|issue:45440", + "issue:43653|issue:44568", "issue:41762|issue:44936", "issue:44464|issue:44743", "issue:44466|issue:45005", @@ -5432,238 +5313,242 @@ "issue:40444|issue:44811", "issue:43525|issue:43643", "issue:44361|issue:44442", - "issue:44843|issue:44991", - "issue:38175|issue:43295", "issue:43577|issue:43697", + "issue:38175|issue:43295", "issue:43653|issue:44361", + "issue:44843|issue:44991", "issue:44479|issue:44936", "issue:28282|issue:32090", "issue:28282|issue:33357", - "issue:30064|issue:33666", "issue:28282|issue:35141", "issue:43122|issue:43906", "issue:44977|issue:45356", + "issue:30064|issue:33666", "issue:44464|issue:44977", "issue:43531|issue:43742", - "issue:43577|issue:44554", "issue:43653|issue:43927", - "issue:43329|issue:45325", - "issue:44479|issue:44625" + "issue:43577|issue:44554" ], - "prepared_review_unit_hash": "34854e51f77c1afa678dcc5fc363eb9703ccfd7847e3f882ca27dae5e55ed0a4", + "prepared_review_unit_hash": "94ed7c71b5d7e2361920365a8b2a93d9a93b88c255acd0f0d5b410e3b6333623", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 31, - "serialized_chars": 36791, - "estimated_input_tokens": 9198, - "estimated_eval_tokens": 18652 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37547, + "estimated_input_tokens": 9387, + "estimated_eval_tokens": 19030 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is highly heterogeneous: the soft pairs mostly share broad themes like tokenizers, configs, or multimodal models, but they do not describe the same underlying bug or fix path. I would reject all proposed duplicate edges and treat this as an overbroad similarity cluster rather than a true duplicate set.", - "confidence": 0.91, - "canonical_issue_reason": "No single canonical issue fits the cluster well; the items span unrelated bugs across tokenizer loading, config propagation, multimodal processing, training, and serialization.", + "summary": "This cluster is highly heterogeneous: the soft pairs mostly share only broad Transformer-style themes (tokenizers, configs, model loading, regressions), but they describe different models, code paths, and failure modes. I would not merge any of the proposed pairs as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": "No single issue cleanly represents the cluster; the items span unrelated bugs across tokenization, model loading, training, vision, and generation.", "canonical_pr_reason": null, - "best_issue_reason": "No strong global best issue exists because none of the artifacts are close enough to serve as a representative duplicate target for the others.", + "best_issue_reason": "No issue is a strong global duplicate target because the cluster lacks a single underlying bug.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Both are regressions, but one is RTDetr output mismatch and the other is Qwen3.5 save_pretrained corruption; different code paths and fixes." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3-MoE sliding window recurrence bug vs AutoImageProcessor URL loading; unrelated subsystems." + }, { "left": "issue:44625", "right": "issue:44877", "accept": false, - "reason": "Both involve configs, but one is about Qwen3.5 num_labels propagation and the other about granite_speech strict config loading; different bug and code paths." + "reason": "Different config propagation issues for Qwen3.5 versus strict Granite speech config loading." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "GGUF tokenizer/model loading breakage is not the same as Mistral regex/offline model_info() behavior." }, { "left": "issue:43819", "right": "issue:44811", "accept": false, - "reason": "DAC latent conversion mismatch and Whisper batch_decode skip_special_tokens behavior are unrelated model/decoder bugs." + "reason": "DAC latent reconstruction mismatch is unrelated to Whisper batch_decode skip_special_tokens handling." }, { "left": "issue:43720", "right": "issue:44898", "accept": false, - "reason": "BitNet packed-weight loading under accelerate and Perceiver image classification with interpolate_pos_encoding are different failures in different subsystems." + "reason": "BitNet packed-weight loading and Perceiver interpolation are distinct model-specific bugs." }, { - "left": "issue:43653", - "right": "issue:44568", + "left": "issue:43688", + "right": "issue:45440", "accept": false, - "reason": "Both concern special tokens, but one is BigBird mask-token registration and the other is mDeBERTa BOS/EOS addition; not the same underlying defect." + "reason": "Auxiliary loss normalization in MoE models is unrelated to DeepSeekV3 native-vs-remote divergence." }, { - "left": "issue:43688", - "right": "issue:45440", + "left": "issue:43653", + "right": "issue:44568", "accept": false, - "reason": "Both are MoE-related, but one is auxiliary-loss normalization and the other is DeepSeekV3 implementation divergence; distinct bugs." + "reason": "BigBirdTokenizer special-token registration is unrelated to mdeberta-v3 BOS/EOS add_special_tokens behavior." }, { "left": "issue:41762", "right": "issue:44936", "accept": false, - "reason": "Gemma3 ZeRO-3 loading failure and trainer.evaluate after train are unrelated training/runtime issues." + "reason": "Gemma3 ZeRO-3 load failure and trainer.evaluate() after train() are different failure modes." }, { "left": "issue:44464", "right": "issue:44743", "accept": false, - "reason": "Chunked generation inconsistency with compiled forward is a generation/compile issue, while recurrent state reset in qwen3_5 is a cache logic bug." + "reason": "Chunked generation with compiled forward is not the same as Qwen3.5 recurrent-state reset in modular_qwen3_5.py." }, { "left": "issue:44466", "right": "issue:45005", "accept": false, - "reason": "Both mention tied weights, but one is serialization/device-dependent lm_head.weight behavior and the other is translation-model tied-weight issues in v5; not the same concrete bug." + "reason": "lm_head serialization/tied-weights issue and translation-model tied-weights issues are related only at a broad level, not the same bug." }, { "left": "issue:43295", "right": "issue:45356", "accept": false, - "reason": "One is a processor.tokenizer regression with images in v4.57.5, the other is Kimi-K2.5 tokenizer codec/regex handling in v5.4.0." + "reason": "Custom model processor/tokenizer regression differs from Kimi-K2.5 tokenizer codec/fix_mistral_regex regression." }, { "left": "issue:24643", "right": "issue:29127", "accept": false, - "reason": "DeepSpeed weight dimensionality error and LayoutLMv3 box-validation messaging are unrelated." + "reason": "DeepSpeed training weight-dimension error is not the same as LayoutLMv3 box validation messaging." }, { "left": "issue:41720", "right": "issue:43906", "accept": false, - "reason": "Qwen3 auto device mapping cudaErrorAssert and an isolated reproduction of another issue are not the same bug." + "reason": "Qwen3 auto device mapping cuda assert and an isolated reproduction of another issue are not the same concrete bug." }, { "left": "issue:40444", "right": "issue:44811", "accept": false, - "reason": "Multi-image IterableDataset finetuning failure and Whisper batch_decode special-token handling affect different pipelines." + "reason": "IterableDataset multi-image finetuning failure is unrelated to Whisper batch_decode special-token decoding." }, { "left": "issue:43525", "right": "issue:43643", "accept": false, - "reason": "Llama4Config missing pad_token_id and AutoConfig trust_remote_code losing fields are related to config metadata, but not the same failure mode or fix." + "reason": "Missing pad_token_id on Llama4Config is distinct from trust_remote_code returning incomplete AutoConfig fields." }, { "left": "issue:44361", "right": "issue:44442", "accept": false, - "reason": "MLukeTokenizer task AttributeError and FastSpeech2ConformerTokenizer loading failure are different tokenizer-specific issues." + "reason": "MLukeTokenizer task AttributeError and FastSpeech2ConformerTokenizer loading failure are different tokenizer implementations." }, { - "left": "issue:44843", - "right": "issue:44991", + "left": "issue:43577", + "right": "issue:43697", "accept": false, - "reason": "Offline model_info() call in _patch_mistral_regex and est-roberta tokenizer loading failure have different causes and code paths." + "reason": "BLIP2 dtype preservation and RTDetr output differences are unrelated model behavior bugs." }, { "left": "issue:38175", "right": "issue:43295", "accept": false, - "reason": "Siglip2 zero probabilities and processor.tokenizer/image regression are unrelated model behavior vs API regression bugs." + "reason": "SigLIP2 zero probabilities and processor/tokenizer regression involve different models and symptoms." }, { - "left": "issue:43577", - "right": "issue:43697", + "left": "issue:43653", + "right": "issue:44361", "accept": false, - "reason": "Blip2 dtype propagation and RTDetrV2 output drift in v5 are separate model correctness issues." + "reason": "BigBirdTokenizer special-token registration and MLukeTokenizer AttributeError are separate tokenizer bugs." }, { - "left": "issue:43653", - "right": "issue:44361", + "left": "issue:44843", + "right": "issue:44991", "accept": false, - "reason": "BigBird mask-token registration and MLukeTokenizer task failure are distinct tokenizer problems." + "reason": "Offline model_info() behavior in _patch_mistral_regex is not the same as tokenizer loading failure for est-roberta." }, { "left": "issue:44479", "right": "issue:44936", "accept": false, - "reason": "Qwen v5.3 video-input regression and trainer.evaluate-after-train are unrelated." + "reason": "Qwen video-input regression and trainer lifecycle failure are unrelated." }, { "left": "issue:28282", "right": "issue:32090", "accept": false, - "reason": "PyTorch missing import error and NoneType broadcast failure in Trainer are unrelated environment/runtime issues." + "reason": "Generic PyTorch import failure and Trainer _gpu_broadcast_one NoneType error are clearly different." }, { "left": "issue:28282", "right": "issue:33357", "accept": false, - "reason": "Missing PyTorch import and MacOS bus error on CLIP are different failure classes." - }, - { - "left": "issue:30064", - "right": "issue:33666", - "accept": false, - "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are unrelated multimodal/image issues." + "reason": "Missing PyTorch import and MacOS bus error with community CLIP are unrelated." }, { "left": "issue:28282", "right": "issue:35141", "accept": false, - "reason": "AutoModel ImportError and tied-word-embedding post_init reinitialization are unrelated." + "reason": "PyTorch missing import versus embedding reinitialization after resizing are different layers of failure." }, { "left": "issue:43122", "right": "issue:43906", "accept": false, - "reason": "Tokenizer behavior changed across versions and an isolated reproduction of another issue do not identify the same bug." + "reason": "Tokenizer version drift and an isolated reproduction of another issue are not the same bug." }, { "left": "issue:44977", "right": "issue:45356", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug and Kimi-K2.5 tokenizer codec regression are different subsystems." + "reason": "Qwen3.5 flash-attention generation regression and Kimi-K2.5 tokenizer regression involve different model families and paths." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are unrelated vision/training issues." }, { "left": "issue:44464", "right": "issue:44977", "accept": false, - "reason": "Compiled-forward chunked generation inconsistency and flash-attention generation failure are both generation bugs but not the same code-path problem." + "reason": "Compiled-forward chunked generation inconsistency is not the same as Qwen3.5 flash-attention generation failure." }, { "left": "issue:43531", "right": "issue:43742", "accept": false, - "reason": "Qwen3-MoE sliding_window issue and MobileLLM-125M key error are unrelated model-loading/runtime bugs." - }, - { - "left": "issue:43577", - "right": "issue:44554", - "accept": false, - "reason": "Blip2 dtype propagation and MPS attention correctness issue are not the same defect." + "reason": "Qwen3-MoE sliding-window state handling is unrelated to MobileLLM-125M key error on load." }, { "left": "issue:43653", "right": "issue:43927", "accept": false, - "reason": "BigBird special-token registration and DiaConfig save/load custom token IDs are different tokenizer/config persistence bugs." - }, - { - "left": "issue:43329", - "right": "issue:45325", - "accept": false, - "reason": "Undefined helpers in multimodal token counting and Qwen2.5-VL rope-index scaling for still images are separate video/positioning bugs." + "reason": "BigBirdTokenizer special-token registration and DiaConfig custom token IDs save/load corruption are different tokenizer/config issues." }, { - "left": "issue:44479", - "right": "issue:44625", + "left": "issue:43577", + "right": "issue:44554", "accept": false, - "reason": "Qwen video regression and Qwen3.5 num_labels propagation are different issues despite shared family naming." + "reason": "BLIP2 dtype loading bug and MPS attention correctness issue are unrelated." } ] }, @@ -5685,7 +5570,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:32090", "issue:33666", @@ -5698,6 +5583,7 @@ "issue:43262", "issue:43278", "issue:43299", + "issue:43329", "issue:43334", "issue:43452", "issue:43493", @@ -5720,11 +5606,10 @@ "issue:44361", "issue:44451", "issue:44464", - "issue:44488", - "issue:44589", + "issue:44479", "issue:44610", + "issue:44625", "issue:44821", - "issue:44843", "issue:44849", "issue:44913", "issue:44964", @@ -5732,281 +5617,264 @@ "issue:45084", "issue:45200", "issue:45229", + "issue:45325", "issue:45357", - "issue:45405", - "issue:45520" + "issue:45405" ], "soft_pairs": [ - "issue:43452|issue:44451", - "issue:41720|issue:44464", + "issue:43329|issue:45325", + "issue:44479|issue:44625", "issue:43012|issue:43577", + "issue:41720|issue:44464", "issue:43550|issue:44361", "issue:43504|issue:43720", + "issue:43452|issue:44451", "issue:43504|issue:43550", "issue:32090|issue:33666", "issue:32090|issue:34567", "issue:43262|issue:45084", "issue:43504|issue:44610", "issue:43278|issue:43688", - "issue:43334|issue:43572", "issue:44849|issue:45357", - "issue:43742|issue:44821", "issue:45200|issue:45229", - "issue:43540|issue:43716", + "issue:43334|issue:43572", "issue:43756|issue:44913", - "issue:43525|issue:43572", + "issue:43540|issue:43716", "issue:43493|issue:43825", + "issue:43742|issue:44821", "issue:33666|issue:36010", + "issue:43525|issue:43572", "issue:43720|issue:43927", "issue:43606|issue:43927", "issue:43688|issue:43873", - "issue:44488|issue:44843", - "issue:44589|issue:45520", "issue:43278|issue:43844", "issue:43540|issue:44977", - "issue:43257|issue:43299", "issue:43720|issue:44361", + "issue:43257|issue:43299", + "issue:44964|issue:45405", "issue:39692|issue:43493", - "issue:44964|issue:45405" + "issue:43329|issue:43606" ], - "prepared_review_unit_hash": "20e889d19131ef4faf70a2d58ca3cf3ac0ca2942ecb7c953397a3e4032505701", + "prepared_review_unit_hash": "ef75676425bce8efaec28f2219f33b26c1e20460a0e842dbe7cbb5e499dddd27", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 31, - "serialized_chars": 36887, - "estimated_input_tokens": 9222, - "estimated_eval_tokens": 18700 + "soft_pair_count": 32, + "serialized_chars": 37319, + "estimated_input_tokens": 9330, + "estimated_eval_tokens": 18916 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a loose collection of unrelated Transformers regressions (loading/config, dtype, generation, training, docs). The soft pairs share broad keywords, but none look like the same underlying bug or a mergeable PR-level change.", - "confidence": 0.93, - "canonical_issue_reason": "No single issue cleanly subsumes the cluster; the items span distinct bugs across model loading, configuration, dtype handling, generation, training, and documentation.", + "summary": "This cluster is mostly a loose semantic grouping of unrelated Transformers regressions (config-loading, multimodal/video bugs, dtype/offload issues, and compile/runtime errors). None of the soft pairs look like exact duplicate reports; at best they are related by broad theme, not the same concrete bug.", + "confidence": 0.41, + "canonical_issue_reason": "No single exact duplicate stands out, but issue 43334 is the best anchor because it cleanly represents the recurring v5 config-loading regression where a model-specific config is missing a pad token field and loading fails with an AttributeError.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43452 is the most self-contained and broadly impactful regression here, with a clear reproducible API break (`AutoTokenizer` / `AutoModelForCausalLM` + `gguf_file`).", + "best_issue_reason": "Issue 43334 is the most representative issue for the dominant configuration-regression theme in this set: it is specific, reproducible, and clearly tied to a loader breakage in dev versions.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43452", - "right": "issue:44451", + "left": "issue:43329", + "right": "issue:45325", "accept": false, - "reason": "Both involve loading models, but one is a gguf_file/from_pretrained regression and the other is a model-specific failure for ScandiBERT; different underlying causes." + "reason": "Both are Qwen VL/video-related, but they describe different bugs: undefined video-token helpers in multimodal token counting vs wrong still-image temporal position scaling." }, { - "left": "issue:41720", - "right": "issue:44464", + "left": "issue:44479", + "right": "issue:44625", "accept": false, - "reason": "Qwen3 auto device mapping CUDA assert and chunked generation with compiled forward are different code paths and symptoms." + "reason": "One is a video-input regression across several Qwen VL models; the other is a Qwen3.5 config propagation bug for num_labels. Different failure modes and fixes." }, { "left": "issue:43012", "right": "issue:43577", "accept": false, - "reason": "Both mention bf16/fp32, but one is a compile-time warning and the other is incorrect dtype propagation when loading Blip2." + "reason": "Both mention dtype/precision, but one is a PyTorch warning during compile and the other is model/qformer staying float32 after load. Not the same code-path problem." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "Different problems: CUDA assert with auto device mapping for Qwen3 vs inconsistent outputs from chunked generation with compiled forward." }, { "left": "issue:43550", "right": "issue:44361", "accept": false, - "reason": "A torch.compile/SDPA model bug and a tokenizer AttributeError are unrelated." + "reason": "Unrelated subsystems: SDPA/torch.compile failure in Bamba vs tokenizer AttributeError on tasks in MLukeTokenizer." }, { "left": "issue:43504", "right": "issue:43720", "accept": false, - "reason": "Both are loading failures, but they hit different models and mechanisms: legacy field handling vs packed-weight unpacking in accelerate." + "reason": "Both are loading-related, but one is a legacy field in Beit semantic segmentation config and the other is packed-weight unpacking for BitNet during accelerate loading." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "Both concern model loading, but the concrete bugs differ: gguf_file handling in tokenizer/model loaders vs failure to load a specific pretrained model due to changed behavior." }, { "left": "issue:43504", "right": "issue:43550", "accept": false, - "reason": "Different models and failure modes; one is pretrained loading, the other is compile+SDPA behavior." + "reason": "Different models and different failures: legacy config field loading vs compile/SDPA runtime failure." }, { "left": "issue:32090", "right": "issue:33666", "accept": false, - "reason": "Generic Trainer broadcast TypeError versus Qwen2-VL multi-GPU training issue; not the same bug." + "reason": "Trainer broadcast TypeError and Qwen2-VL multi-GPU training are too broad and do not describe the same underlying issue." }, { "left": "issue:32090", "right": "issue:34567", "accept": false, - "reason": "Trainer broadcast failure and `num_input_tokens_seen` not updating are unrelated Trainer problems." + "reason": "Trainer broadcast NoneType error and TrainerState token counter not updating are unrelated Trainer bugs." }, { "left": "issue:43262", "right": "issue:45084", "accept": false, - "reason": "Audio chat template sampling-rate defaulting and a template compilation TypeError are different subsystems." + "reason": "Audio chat-template sampling-rate defaulting and template-node compilation failure are different components and symptoms." }, { "left": "issue:43504", "right": "issue:44610", "accept": false, - "reason": "Model preset loading failure vs processor input-size mismatch; no shared bug." + "reason": "One is a legacy preset-loading issue in BEiT; the other is an OmDet-Turbo processor/model input-size mismatch." }, { "left": "issue:43278", "right": "issue:43688", "accept": false, - "reason": "Embedding dtype drift during eval and auxiliary-loss normalization are separate training issues." - }, - { - "left": "issue:43334", - "right": "issue:43572", - "accept": false, - "reason": "Both are missing-config-field regressions, but on different models and different fields; not the same concrete defect." + "reason": "Both involve training/inference behavior, but one is dtype drift between training and evaluate while the other is auxiliary loss normalization in MoE models." }, { "left": "issue:44849", "right": "issue:45357", "accept": false, - "reason": "Output_hidden_states behavior and save_pretrained visual-key serialization are unrelated Qwen3.5 bugs." - }, - { - "left": "issue:43742", - "right": "issue:44821", - "accept": false, - "reason": "MobileLLM key error and AutoImageProcessor-from-URL loading are different loading paths." + "reason": "Qwen3.5 hidden-states bug and incorrect visual encoder keys on save_pretrained are distinct regressions." }, { "left": "issue:45200", "right": "issue:45229", "accept": false, - "reason": "Text-only fine-tuning token IDs and multi-GPU inference OOM are unrelated Gemma 4 issues." + "reason": "Different Gemma 4 issues: text-only mm_token_type_ids defaults vs multi-GPU inference OOM." }, { - "left": "issue:43540", - "right": "issue:43716", + "left": "issue:43334", + "right": "issue:43572", "accept": false, - "reason": "Video-processing ValueError in Qwen3OmniMoe and Mistral-3 image-preprocessor dtype mismatch are different problems." + "reason": "Both are missing-pad-token-field config regressions, but they affect different config classes and attribute names; the evidence is too broad to treat them as the same bug." }, { "left": "issue:43756", "right": "issue:44913", "accept": false, - "reason": "RoPE layer dropping in Smollm3 and GPTNeoX rotary_pct reload regression are not the same bug." + "reason": "Smollm3 RoPE-layer behavior and GPTNeoX rotary_pct reload behavior are separate architecture/config issues." }, { - "left": "issue:43525", - "right": "issue:43572", + "left": "issue:43540", + "right": "issue:43716", "accept": false, - "reason": "Similar missing-pad-token symptoms, but they affect different configs and fields; too broad to treat as one bug." + "reason": "Different multimodal bugs: Qwen3OmniMoe video processing ValueError vs Mistral-3 image preprocessor/model dtype mismatch." }, { "left": "issue:43493", "right": "issue:43825", "accept": false, - "reason": "SigLIP2 implementation discrepancy and a pipeline error message change are unrelated." + "reason": "SigLIP2 HF-vs-JAX discrepancy and pipeline translation-task error message are unrelated." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "MobileLLM key error on load and AutoImageProcessor-from-URL failure are different loader paths with different root causes." }, { "left": "issue:33666", "right": "issue:36010", "accept": false, - "reason": "Multi-GPU training for Qwen2-VL and a GenerationMixin import error do not share a concrete code-path." + "reason": "Qwen2-VL multi-GPU training and GenerationMixin import failure are not the same underlying bug." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Both are config attribute regressions after v5, but they involve different models and fields; not enough evidence of one shared fix." }, { "left": "issue:43720", "right": "issue:43927", "accept": false, - "reason": "Packed-weight loading in BitNet and DiaConfig custom-token-ID persistence are different serialization/loading bugs." + "reason": "Packed-weight unpacking during accelerate loading and DiaConfig custom token IDs lost on save/load are different bugs." }, { "left": "issue:43606", "right": "issue:43927", "accept": false, - "reason": "CPU offload device mismatch for Bark-small and DiaConfig save/load token-ID loss are unrelated." + "reason": "CPU offload device mismatch and DiaConfig save/load token ID loss are unrelated." }, { "left": "issue:43688", "right": "issue:43873", "accept": false, - "reason": "Aux-loss normalization and quantization offload behavior are separate concerns." - }, - { - "left": "issue:44488", - "right": "issue:44843", - "accept": false, - "reason": "A model-loading failure and an offline-mode tokenizer patch bug are different issues." - }, - { - "left": "issue:44589", - "right": "issue:45520", - "accept": false, - "reason": "Missing Float8 storage support and a flash_attn import_utils KeyError are unrelated environment/runtime problems." + "reason": "Aux-loss normalization in MoE models and quantization/offloading behavior are different change areas." }, { "left": "issue:43278", "right": "issue:43844", "accept": false, - "reason": "BF16-to-FP32 dtype drift and ZeRO-3 gradient inflation are different training behaviors." + "reason": "Embedding dtype drift in evaluate and abnormal gradient growth under DeepSpeed ZeRO-3 are different training issues." }, { "left": "issue:43540", "right": "issue:44977", "accept": false, - "reason": "Qwen3OmniMoe video input processing and Qwen3.5 flash-attention generation are distinct." - }, - { - "left": "issue:43257", - "right": "issue:43299", - "accept": false, - "reason": "Both involve Qwen3 MOE loading, but one is accelerate+deepspeed weight conversion and the other is a transformers dev regression; not clearly the same fix." + "reason": "Both concern Qwen3.5-family behavior, but one is video-input processing and the other is flash-attention generation correctness." }, { "left": "issue:43720", "right": "issue:44361", "accept": false, - "reason": "BitNet loading/unpacking and MLukeTokenizer task failures are unrelated." + "reason": "BitNet packed-weight loading and MLukeTokenizer task AttributeError are unrelated." }, { - "left": "issue:39692", - "right": "issue:43493", + "left": "issue:43257", + "right": "issue:43299", "accept": false, - "reason": "SigLIP2 doc-example errors and SigLIP2 implementation mismatch are related only by model family, not the same bug." + "reason": "Both involve Qwen3/MoE loading, but one is accelerate+deepspeed weight conversion and the other is a dev-version loading regression in Qwen3VL MoE. Too different to merge." }, { "left": "issue:44964", "right": "issue:45405", "accept": false, - "reason": "Phi-4 multimodal loading regression and a PEFT version bump are completely unrelated." + "reason": "Model loading failure and a version-pin release issue are not the same bug or change." }, { - "left": "issue:44849", - "right": "issue:45357", - "accept": false, - "reason": "Qwen3.5 hidden-state output bug and incorrect save_pretrained visual encoder keys are different failures." - }, - { - "left": "issue:43825", - "right": "issue:43844", - "accept": false, - "reason": "Pipeline translation messaging and ZeRO-3 gradient behavior are unrelated." - }, - { - "left": "issue:43504", - "right": "issue:43572", + "left": "issue:39692", + "right": "issue:43493", "accept": false, - "reason": "Different models and different missing legacy fields; too broad to merge as one bug." + "reason": "SigLIP2 doc example errors and HF-vs-JAX implementation discrepancy are different issues; one is documentation/example correctness, the other is model parity." }, { - "left": "issue:43012", - "right": "issue:43577", + "left": "issue:43329", + "right": "issue:43606", "accept": false, - "reason": "A warning during compilation is not the same issue as dtype preservation on model load." + "reason": "Video token counting bug and Bark CPU offload device mismatch are unrelated." } ] }, @@ -6028,7 +5896,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:15354", "issue:33357", @@ -6039,7 +5907,6 @@ "issue:43257", "issue:43278", "issue:43295", - "issue:43329", "issue:43493", "issue:43504", "issue:43540", @@ -6063,16 +5930,16 @@ "issue:44361", "issue:44423", "issue:44466", + "issue:44488", "issue:44493", "issue:44521", "issue:44554", "issue:44589", "issue:44734", "issue:44792", + "issue:44843", "issue:44977", - "issue:45072", "issue:45084", - "issue:45198", "issue:45200", "issue:45290", "issue:45405", @@ -6080,224 +5947,224 @@ "issue:45520" ], "soft_pairs": [ - "issue:43329|issue:43606", + "issue:44488|issue:44843", + "issue:44589|issue:45520", "issue:33666|issue:36331", "issue:43493|issue:43844", "issue:43756|issue:44060", "issue:43646|issue:44466", - "issue:43688|issue:43746", "issue:44423|issue:44734", - "issue:43824|issue:44589", + "issue:43688|issue:43746", "issue:44521|issue:45290", + "issue:43824|issue:44589", "issue:33357|issue:33666", - "issue:43572|issue:44291", "issue:39692|issue:43295", "issue:43994|issue:44493", "issue:43504|issue:43606", - "issue:43575|issue:45405", "issue:43504|issue:43653", + "issue:43575|issue:45405", + "issue:43572|issue:44291", "issue:43927|issue:44361", "issue:43278|issue:43701", "issue:44792|issue:45405", "issue:43257|issue:45405", "issue:43606|issue:44112", - "issue:45084|issue:45520", "issue:15354|issue:38175", + "issue:45084|issue:45520", "issue:44554|issue:45446", "issue:43540|issue:45200", - "issue:43582|issue:44977", - "issue:45072|issue:45198" + "issue:43582|issue:44977" ], - "prepared_review_unit_hash": "e0f835a3e655f025a079ed9d67658e024d86a37489a104b9c21ce07b294e731e", + "prepared_review_unit_hash": "60577a898ffde87ef031e4d79b015b747d8b15dfea12936bb0d1afc557ba0e81", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, + "node_count": 47, + "item_count": 47, "soft_pair_count": 27, - "serialized_chars": 36089, - "estimated_input_tokens": 9023, - "estimated_eval_tokens": 18302 + "serialized_chars": 35425, + "estimated_input_tokens": 8857, + "estimated_eval_tokens": 17970 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The set is heterogeneous: it spans unrelated bugs in training, serving, tokenization, multimodal processors, and model loading. None of the soft pairs look like true duplicates, so there is no clear canonical duplicate target.", - "confidence": 0.97, - "canonical_issue_reason": "The items do not form one coherent duplicate cluster; they cover many unrelated subsystems and model-specific regressions.", + "summary": "This cluster is heterogeneous. A few pairs share a subsystem or API surface, but none look like the same underlying bug/change, so I reject all soft edges. No PRs are present.", + "confidence": 0.89, + "canonical_issue_reason": "Issue 44521 is the closest representative of the only coherent subtopic in the set (multimodal/chat-template behavior) and is an active user-facing regression, but it is not a duplicate of the other items.", "canonical_pr_reason": null, - "best_issue_reason": "No single issue cleanly stands out as the canonical representative because the cluster is too mixed.", + "best_issue_reason": "Issue 44521 is the best umbrella issue because it is concrete, current, and in a broadly used path; the rest of the set is too mixed to form a better canonical duplicate target.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43329", - "right": "issue:43606", + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Both involve loading/tokenizer-related behavior, but one is a model-loading failure and the other is an offline-mode tokenizer patch bug; different failure modes and code paths." + }, + { + "left": "issue:44589", + "right": "issue:45520", "accept": false, - "reason": "Different bugs: undefined video-token helper usage in multimodal token counting vs CPU offload device mismatch in Bark." + "reason": "These are unrelated import/storage/type errors in different areas; no shared bug or change." }, { "left": "issue:33666", "right": "issue:36331", "accept": false, - "reason": "Qwen2-VL multi-GPU training request is unrelated to the CustomTrainer keyword-argument regression." + "reason": "Multi-GPU training for Qwen2-VL and a CustomTrainer signature regression are unrelated." }, { "left": "issue:43493", "right": "issue:43844", "accept": false, - "reason": "SigLIP2 HF/JAX discrepancy is not the same issue as ZeRO-3 gradient inflation during DeepSpeed training." + "reason": "SigLIP2 implementation discrepancy and ZeRO-3 gradient growth are different correctness issues." }, { "left": "issue:43756", "right": "issue:44060", "accept": false, - "reason": "RoPE layer-count mismatch in Smollm3 is unrelated to the Qwen3-Next tied-weights warning." + "reason": "Both mention model internals, but one is RoPE layer dropping and the other is a tied-weights warning involving Qwen3-Next; not the same bug." }, { "left": "issue:43646", "right": "issue:44466", "accept": false, - "reason": "Custom model init breakage in v5 is different from inconsistent lm_head serialization behavior." - }, - { - "left": "issue:43688", - "right": "issue:43746", - "accept": false, - "reason": "Aux-loss normalization bug and PEFT local-checkpoint loading bug are separate code paths." + "reason": "Both touch serialization/tied weights, but one is custom model initialization breakage and the other is device-dependent lm_head serialization; different concrete problems." }, { "left": "issue:44423", "right": "issue:44734", "accept": false, - "reason": "Continuous-batching multimodal crash and KV-cache continuation indexing error are distinct serving failures." + "reason": "Both are server crashes, but one is a multimodal string/.to error and the other is KV-cache tensor indexing; different code paths." }, { - "left": "issue:43824", - "right": "issue:44589", + "left": "issue:43688", + "right": "issue:43746", "accept": false, - "reason": "Import failure for Qwen2.5-VL class is unrelated to missing Float8 storage support." + "reason": "Auxiliary-loss normalization and PEFT checkpoint loading are unrelated." }, { "left": "issue:44521", "right": "issue:45290", "accept": false, - "reason": "All-zero assistant masks for multimodal inputs is not the same as tool-call chat-template crashing on empty content." + "reason": "Both use apply_chat_template, but one is wrong assistant mask output for multimodal inputs and the other is a tool-call/no-content crash; distinct bugs." }, { - "left": "issue:33357", - "right": "issue:33666", + "left": "issue:43824", + "right": "issue:44589", "accept": false, - "reason": "MacOS bus error on CLIP and Qwen2-VL multi-GPU training are unrelated model/runtime problems." + "reason": "An import error for Qwen2.5-VL and a missing Float8 storage type are unrelated." }, { - "left": "issue:43572", - "right": "issue:44291", + "left": "issue:33357", + "right": "issue:33666", "accept": false, - "reason": "Missing pad_token_idx in StableLmConfig is different from the init_empty_weights unexpected-argument regression." + "reason": "MacOS bus error on a CLIP model and Qwen2-VL multi-GPU training are different issues." }, { "left": "issue:39692", "right": "issue:43295", "accept": false, - "reason": "SigLIP2 doc example errors are documentation/model usage issues, not the processor.tokenizer regression." + "reason": "A SigLIP2 docs example bug and a regression in processor/tokenizer access are not the same underlying problem." }, { "left": "issue:43994", "right": "issue:44493", "accept": false, - "reason": "Nonsensical SigLIP2 outputs and unexpected position-id keys are separate symptoms with different likely causes." + "reason": "Nonsensical SigLIP2 outputs and unexpected position-id keys point to different model/config issues." }, { "left": "issue:43504", "right": "issue:43606", "accept": false, - "reason": "Legacy field loading failure in Beit is unrelated to the Bark device-mismatch issue." + "reason": "Legacy-field load failure for Beit and a CPU offload device mismatch in bark-small are unrelated." + }, + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Legacy field loading for Beit and BigBirdTokenizer special-token registration are different bugs." }, { "left": "issue:43575", "right": "issue:45405", "accept": false, - "reason": "Tensor-parallel OOM for a large Qwen model is unrelated to the PEFT minimum-version bump." + "reason": "Tensor-parallel OOM for a model load and a PEFT version bump are not the same issue." }, { - "left": "issue:43504", - "right": "issue:43653", + "left": "issue:43572", + "right": "issue:44291", "accept": false, - "reason": "Beit legacy-field loading and BigBirdTokenizer special-token registration are different bugs." + "reason": "Missing pad_token_idx in a config and an init_empty_weights/_is_hf_initialized argument mismatch are different compatibility problems." }, { "left": "issue:43927", "right": "issue:44361", "accept": false, - "reason": "DiaConfig token ID persistence and MLukeTokenizer AttributeError are unrelated issues." + "reason": "Config token-ID persistence and MLukeTokenizer AttributeError are unrelated." }, { "left": "issue:43278", "right": "issue:43701", "accept": false, - "reason": "Embedding dtype drift during evaluation is not the same as resume_from_checkpoint key mismatch." + "reason": "Embedding dtype drift between train/eval and resume_from_checkpoint key mismatch are different regressions." }, { "left": "issue:44792", "right": "issue:45405", "accept": false, - "reason": "Janus test failure is unrelated to the PEFT version pin bump." + "reason": "A janus image-generation test failure and a PEFT version pin issue are unrelated." }, { "left": "issue:43257", "right": "issue:45405", "accept": false, - "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is a model-loading bug, not a dependency-version release issue." + "reason": "Qwen3 MOE weight conversion under accelerate/deepspeed and a dependency-version bump are not the same bug." }, { "left": "issue:43606", "right": "issue:44112", "accept": false, - "reason": "CPU offload device mismatch and a stale CI test in GraniteSpeech do not describe the same underlying bug." + "reason": "Both are CI/device-mismatch themed, but one is real CPU offload mismatch and the other is a stale GraniteSpeech test; not duplicates." }, { - "left": "issue:45084", - "right": "issue:45520", + "left": "issue:15354", + "right": "issue:38175", "accept": false, - "reason": "Template-node compilation error is unrelated to the flash_attn import_utils KeyError on Python 3.13." + "reason": "JIT scripting/export failure and zero probabilities on SigLIP2 are unrelated." }, { - "left": "issue:15354", - "right": "issue:38175", + "left": "issue:45084", + "right": "issue:45520", "accept": false, - "reason": "TorchScript GeneratorExp export failure is unrelated to SigLIP2 zero-probability outputs." + "reason": "Template-node compilation error and flash_attn import failure on Python 3.13 are different issues." }, { "left": "issue:44554", "right": "issue:45446", "accept": false, - "reason": "MPS attention correctness bug with mismatched head dims is unrelated to the AuxRequest version-check bug." + "reason": "MPS attention correctness with mismatched head dims and a PyTorch version check bug for AuxRequest import are unrelated." }, { "left": "issue:43540", "right": "issue:45200", "accept": false, - "reason": "Video-input ValueError in Qwen3OmniMoe and Gemma 4 mm_token_type_ids defaulting are different multimodal issues." + "reason": "Qwen3OmniMoe video processing error and Gemma 4 mm_token_type_ids defaulting are different multimodal bugs." }, { "left": "issue:43582", "right": "issue:44977", "accept": false, - "reason": "Apple Silicon allocator TypeError and Qwen3.5 flash-attention generation failure are unrelated." - }, - { - "left": "issue:45072", - "right": "issue:45198", - "accept": false, - "reason": "bfloat16 dtype mismatch in inference is unrelated to Wav2Vec2 save_pretrained/tokenization failure." + "reason": "AppleSilicon caching_allocator_warmup TypeError and Qwen3.5 flash-attention generation failure are not the same underlying defect." } ] }, @@ -6319,13 +6186,12 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:37428", "issue:38175", "issue:39401", "issue:40990", - "issue:42915", "issue:43232", "issue:43278", "issue:43295", @@ -6363,6 +6229,7 @@ "issue:44936", "issue:44964", "issue:44987", + "issue:45072", "issue:45127", "issue:45198", "issue:45200", @@ -6371,13 +6238,14 @@ "issue:45412" ], "soft_pairs": [ + "issue:45072|issue:45198", "issue:43232|issue:44849", "issue:43844|issue:44964", "issue:39401|issue:44568", - "issue:37428|issue:43957", "issue:44315|issue:44849", - "issue:43334|issue:43525", + "issue:37428|issue:43957", "issue:44554|issue:45356", + "issue:43334|issue:43525", "issue:43295|issue:44610", "issue:43493|issue:43701", "issue:45200|issue:45325", @@ -6385,241 +6253,237 @@ "issue:43873|issue:44464", "issue:44466|issue:45127", "issue:40990|issue:44625", - "issue:37428|issue:43824", - "issue:43572|issue:44987", "issue:43388|issue:43526", "issue:44514|issue:44521", - "issue:43572|issue:44589", "issue:43844|issue:44936", + "issue:37428|issue:43824", "issue:43575|issue:44464", - "issue:43824|issue:43975", + "issue:43572|issue:44987", "issue:43295|issue:43450", + "issue:43572|issue:44589", "issue:44442|issue:45198", + "issue:43824|issue:43975", "issue:43295|issue:43873", "issue:38175|issue:45412", "issue:43278|issue:43856", "issue:43295|issue:43994", - "issue:44360|issue:44561", - "issue:42915|issue:43824" + "issue:44360|issue:44561" ], - "prepared_review_unit_hash": "d1b0b73ac42342681ea1d60489394f1a8e21ec776865a6dabcfcfff5f2a3b625", + "prepared_review_unit_hash": "9ccc3ce2f4f2b3714fe60080e04f08dc563b68bb5843857dfb2ddc3070e85c87", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36847, - "estimated_input_tokens": 9212, - "estimated_eval_tokens": 18680 + "serialized_chars": 36888, + "estimated_input_tokens": 9222, + "estimated_eval_tokens": 18700 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous and does not look like a single duplicate set. The only strong duplicate-like edge is the shared `pad_token_id` config regression across new model configs; the rest are unrelated or only broadly similar at the subsystem level.", - "confidence": 0.9, - "canonical_issue_reason": "Issue 43334 is the clearest representative of the shared `pad_token_id`-missing config regression and is the more concrete report in the only likely duplicate pair.", + "summary": "The cluster is very heterogeneous: it mixes unrelated tokenizer, multimodal, loading, training, and memory issues. None of the soft pairs looks like the same underlying bug or change, so I rejected all of them.", + "confidence": 0.96, + "canonical_issue_reason": "issue:44521 is the best cluster representative: it is active, well-discussed, and describes a concrete regression in a core multimodal API with clear repro details.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43334 is the best anchor for the cluster\u2019s one real overlap; it states the failure mode directly and maps to the likely shared fix path.", + "best_issue_reason": "issue:44521 has the strongest combination of impact, discussion, and reproducibility among this set, even though it does not duplicate the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "Different failures: dtype mismatch in bfloat16 inference vs save_pretrained/tokenization problems in Wav2Vec2." + }, { "left": "issue:43232", "right": "issue:44849", "accept": false, - "reason": "Both mention generation/model behavior, but they are different bugs: `sync_gpus` kwargs handling vs `output_hidden_states` on Qwen3.5." + "reason": "Different code paths: generation kwargs sync issue vs Qwen3.5 hidden-states output bug." }, { "left": "issue:43844", "right": "issue:44964", "accept": false, - "reason": "ZeRO-3 gradient growth and Phi-4 multimodal loading are unrelated code paths." + "reason": "Unrelated problems: ZeRO-3 gradient growth vs model-loading failure for Phi-4 multimodal." }, { "left": "issue:39401", "right": "issue:44568", "accept": false, - "reason": "Tokenizer offset mapping and add_special_tokens/BOS-EOS behavior are distinct tokenizer bugs." + "reason": "Both are tokenizer bugs, but they affect different models and behaviors: offset mapping vs missing BOS/EOS on add_special_tokens." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Liger kernel not applied during model_init is unrelated to Qwen3.5 output_hidden_states behavior." }, { "left": "issue:37428", "right": "issue:43957", "accept": false, - "reason": "Flash-attention import error vs `torch.device(\"meta\")` loading failure are different regressions." + "reason": "ImportError for a flash-attention helper is not the same as torch.device('meta') model-loading breakage." }, { - "left": "issue:44315", - "right": "issue:44849", + "left": "issue:44554", + "right": "issue:45356", "accept": false, - "reason": "Liger Kernel application in `model_init` is unrelated to hidden-states output on Qwen3.5." + "reason": "MPS attention correctness issue is unrelated to Kimi-K2.5 tokenizer codec/regression handling." }, { "left": "issue:43334", "right": "issue:43525", - "accept": true, - "reason": "Both report the same missing-`pad_token_id` config regression on new model classes, just surfaced through different models." - }, - { - "left": "issue:44554", - "right": "issue:45356", "accept": false, - "reason": "MPS attention correctness and Kimi tokenizer codec handling are unrelated." + "reason": "Both mention missing pad_token_id, but they are different model families and distinct config objects, not the same bug." }, { "left": "issue:43295", "right": "issue:44610", "accept": false, - "reason": "Processor/tokenizer regression and OmDet image-size mismatch are different processor bugs." + "reason": "Processor.tokenizer/images-to-tokenizer regression is unrelated to OmDet-Turbo image-size mismatch." }, { "left": "issue:43493", "right": "issue:43701", "accept": false, - "reason": "SigLIP2 implementation mismatch and resume-from-checkpoint key mismatch do not share a concrete code path." + "reason": "SigLIP2 implementation discrepancy is unrelated to resume_from_checkpoint key mismatch." }, { "left": "issue:45200", "right": "issue:45325", "accept": false, - "reason": "Gemma 4 multimodal token types and Qwen2.5-VL rope indexing are separate model-specific issues." + "reason": "Gemma 4 token-type defaults and Qwen2.5-VL rope-index scaling are different multimodal config bugs." }, { "left": "issue:44898", "right": "issue:45412", "accept": false, - "reason": "Perceiver resize/interpolation failure and RT-DETR memory release are unrelated." + "reason": "Perceiver interpolation failure is unrelated to RT-DETR memory not being released." }, { "left": "issue:43873", "right": "issue:44464", "accept": false, - "reason": "Quantization/offloading behavior and compiled-forward chunked generation are different problems." + "reason": "Quantization/offloading behavior is a different problem from chunked generation with compiled forward." }, { "left": "issue:44466", "right": "issue:45127", "accept": false, - "reason": "Both involve weights, but serialization inconsistency and LoRA-merge collapse are not the same bug." + "reason": "lm_head serialization inconsistency is unrelated to LoRA merge collapse with extended vocabulary." }, { "left": "issue:40990", "right": "issue:44625", "accept": false, - "reason": "High perplexity on gpt-oss-20b and Qwen3.5 `num_labels` propagation are unrelated." - }, - { - "left": "issue:37428", - "right": "issue:43824", - "accept": false, - "reason": "These are both import-related, but they miss different symbols in different model families." - }, - { - "left": "issue:43572", - "right": "issue:44987", - "accept": false, - "reason": "StableLm `pad_token_idx` regression and loading `physical-intelligence/fast` failure are unrelated." + "reason": "High perplexity on gpt-oss-20b is unrelated to Qwen3.5 num_labels propagation." }, { "left": "issue:43388", "right": "issue:43526", "accept": false, - "reason": "Both concern labels, but `gather_for_metrics` truncation and `reduce_labels` in BeitImageProcessorFast are different layers and failures." + "reason": "Both involve labels, but one drops tuple labels in gather_for_metrics while the other reduces BEiT labels incorrectly." }, { "left": "issue:44514", "right": "issue:44521", "accept": false, - "reason": "Both touch `apply_chat_template`, but one is a batching/padding crash and the other is wrong assistant masks." + "reason": "Same API area, but different symptoms: batched padding=False crash vs all-zero assistant masks for multimodal inputs." }, { - "left": "issue:43572", - "right": "issue:44589", + "left": "issue:43844", + "right": "issue:44936", "accept": false, - "reason": "Missing `pad_token_idx` and Float8 storage lookup errors are unrelated." + "reason": "ZeRO-3 gradient inflation and trainer.evaluate-after-train failure are unrelated training issues." }, { - "left": "issue:43844", - "right": "issue:44936", + "left": "issue:37428", + "right": "issue:43824", "accept": false, - "reason": "ZeRO-3 gradient growth and trainer evaluate-after-train behavior are different training bugs." + "reason": "Different import/loading failures: flash-attention helper ImportError vs missing Qwen2_5_VLForConditionalGeneration export." }, { "left": "issue:43575", "right": "issue:44464", "accept": false, - "reason": "Tensor-parallel OOM on model load is unrelated to compiled-forward chunked generation output inconsistency." + "reason": "OOM on loading Qwen2-57B with tp is not the same as chunked generation inconsistency with compiled forward." }, { - "left": "issue:43824", - "right": "issue:43975", + "left": "issue:43572", + "right": "issue:44987", "accept": false, - "reason": "Importing `Qwen2_5_VLForConditionalGeneration` and incorrect detokenization are different issues." + "reason": "StableLmConfig pad_token_idx regression is unrelated to loading physical-intelligence/fast on transformers>=5.1.0." }, { "left": "issue:43295", "right": "issue:43450", "accept": false, - "reason": "Processor API regression and batched video shape bug are only broadly related to processors, not the same bug." + "reason": "Custom processor/tokenizer regression is not the same as batched video processor shape handling." + }, + { + "left": "issue:43572", + "right": "issue:44589", + "accept": false, + "reason": "Missing pad_token_idx in StableLmConfig is unrelated to Float8 storage lookup failure." }, { "left": "issue:44442", "right": "issue:45198", "accept": false, - "reason": "FastSpeech2Conformer tokenizer loading and Wav2Vec2 save/tokenization failure are separate tokenizer issues." + "reason": "AutoTokenizer loading failure for FastSpeech2ConformerTokenizer is unrelated to Wav2Vec2 save/tokenization failure." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Qwen2_5_VL import failure is unrelated to deepseek-coder detokenization regression." }, { "left": "issue:43295", "right": "issue:43873", "accept": false, - "reason": "Processor regression and quantization/offloading behavior do not match." + "reason": "Processor/tokenizer regression and quantization offloading bug are different subsystems and different failures." }, { "left": "issue:38175", "right": "issue:45412", "accept": false, - "reason": "SigLIP2 zero probabilities and RT-DETR memory leak are unrelated." + "reason": "SigLIP2 zero-probabilities bug is unrelated to RT-DETR memory release behavior." }, { "left": "issue:43278", "right": "issue:43856", "accept": false, - "reason": "Embedding dtype change during eval and Qwen3 MoE memory usage are different problems." + "reason": "Embedding dtype drift in eval is not the same as Qwen3 MoE training memory inefficiency." }, { "left": "issue:43295", "right": "issue:43994", "accept": false, - "reason": "Custom processor/tokenizer regression and SigLIP2 nonsensical outputs are unrelated." + "reason": "Custom processor/tokenizer regression is unrelated to SigLIP2 nonsensical outputs." }, { "left": "issue:44360", "right": "issue:44561", "accept": false, - "reason": "DSA ReLU discussion and removal of `is_torch_fx_available` are unrelated." - }, - { - "left": "issue:42915", - "right": "issue:43824", - "accept": false, - "reason": "Qwen3Moe FP8 failure and Qwen2_5_VL import error are unrelated." + "reason": "DSA indexer/ReLU discussion is unrelated to removal of is_torch_fx_available breaking trust_remote_code models." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The assessment is grounded in the packet: most pairs are clearly different bugs, and the only accepted edge (43334/43525) is a conservative match on the same missing-`pad_token_id` config regression across model configs. No overstatement evident." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -6634,13 +6498,14 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:29942", "issue:32090", "issue:37428", "issue:38175", "issue:39692", + "issue:42915", "issue:43262", "issue:43278", "issue:43335", @@ -6655,12 +6520,12 @@ "issue:43611", "issue:43653", "issue:43819", + "issue:43824", "issue:43825", "issue:43827", "issue:43901", "issue:43931", "issue:44112", - "issue:44292", "issue:44351", "issue:44448", "issue:44462", @@ -6682,13 +6547,13 @@ "issue:45290", "issue:45310", "issue:45381", - "issue:45478", - "issue:45520" + "issue:45478" ], "soft_pairs": [ "issue:43827|issue:43901", "issue:44849|issue:45003", "issue:38175|issue:43493", + "issue:42915|issue:43824", "issue:43653|issue:44112", "issue:44568|issue:44991", "issue:43450|issue:43493", @@ -6699,262 +6564,268 @@ "issue:44462|issue:44561", "issue:44561|issue:44779", "issue:44610|issue:44857", - "issue:37428|issue:43425", "issue:43819|issue:44112", "issue:43262|issue:43450", "issue:44448|issue:44743", "issue:43606|issue:45083", "issue:43278|issue:43825", + "issue:37428|issue:43425", "issue:29942|issue:32090", "issue:45310|issue:45478", - "issue:37428|issue:44351", "issue:43819|issue:44493", "issue:43611|issue:43931", "issue:39692|issue:43526", "issue:43335|issue:43550", + "issue:37428|issue:44351", "issue:43450|issue:43526", - "issue:45200|issue:45381", "issue:44855|issue:45083", + "issue:45200|issue:45381", "issue:43404|issue:45200", - "issue:44292|issue:45520", "issue:39692|issue:44493", + "issue:43335|issue:43653", "issue:44448|issue:44779" ], - "prepared_review_unit_hash": "6e59b9cf73c0db6813a43203d58fb8e4653fd08d286cbef1379ef0908ebdc7a0", + "prepared_review_unit_hash": "97499c222d4baee33989bbca5f48ebb47a1e9a6d038cbaf1b4b0e10d6e95b63d", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37611, - "estimated_input_tokens": 9403, - "estimated_eval_tokens": 19062 + "soft_pair_count": 34, + "serialized_chars": 37953, + "estimated_input_tokens": 9489, + "estimated_eval_tokens": 19234 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated issues; only one clear duplicate pair appears in the Qwen3.5 MoE from_pretrained regression reports.", - "confidence": 0.91, - "canonical_issue_reason": "issue:45310 is the earlier, broader report of the Qwen3.5 MoE from_pretrained regression, and issue:45478 reads like a later duplicate follow-up.", + "summary": "Mostly heterogeneous issue set. The only soft-edge duplicate that is well supported from the packet is the Qwen3.5 MoE from_pretrained pair (issue:45310 / issue:45478); the other candidate pairs look like broader thematic similarities rather than the same underlying bug.", + "confidence": 0.94, + "canonical_issue_reason": "issue:45310 is the best canonical issue because it is the earlier report of the same Qwen3.5 MoE from_pretrained failure and has the same concrete title/scope as issue:45478.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45310 is the best representative issue for the accepted duplicate pair because it is earlier and already captured the same failure mode.", + "best_issue_reason": "issue:45310 is the best representative issue for this duplicate pair: it states the concrete failure directly, predates issue:45478, and the packet supports treating 45478 as the later near-identical report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43827", "right": "issue:43901", "accept": false, - "reason": "Both are docs-related pipeline complaints, but they target different docs and different problems." + "reason": "Both are v5/docs-related, but they describe different concrete problems: leftover pipeline references versus return_all_scores docs/behavior." }, { "left": "issue:44849", "right": "issue:45003", "accept": false, - "reason": "Different subsystems and failure modes; one is a Qwen3.5 hidden-states bug, the other a sys.modules access issue." + "reason": "Different models and different code paths; one is a Qwen3.5 hidden-states bug, the other is unsafe sys.modules access." }, { "left": "issue:38175", "right": "issue:43493", "accept": false, - "reason": "Same model family, but the reported symptoms differ enough that this looks like a related area, not the same bug." + "reason": "Both mention SigLIP2, but the reported failures are different: zero probabilities versus HF/JAX implementation discrepancy." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "Different model families and failure modes; FP8 config failure is unrelated to the Qwen2.5-VL import error." }, { "left": "issue:43653", "right": "issue:44112", "accept": false, - "reason": "Tokenizer special-token decoding and a GraniteSpeech CI test failure are unrelated." + "reason": "BigBirdTokenizer special-token registration and GraniteSpeech CI device override are unrelated bugs." }, { "left": "issue:44568", "right": "issue:44991", "accept": false, - "reason": "Both involve tokenizer loading/behavior, but they are distinct models and distinct breakages." + "reason": "These are separate tokenizer regressions affecting different models and different behaviors." }, { "left": "issue:43450", "right": "issue:43493", "accept": false, - "reason": "Video processor batching shape bug vs SigLIP2 implementation discrepancy; not the same code-path issue." + "reason": "A batched video-shape bug is not the same as the SigLIP2 implementation mismatch." }, { "left": "issue:45127", "right": "issue:45245", "accept": false, - "reason": "LoRA/vocab collapse and a category-limit runtime error are unrelated." + "reason": "LoRA merge/vocab collapse and the category-count runtime error are different defects." }, { "left": "issue:45290", "right": "issue:45381", "accept": false, - "reason": "Chat template tool-call crash and Qwen2.5-VL video position-id bug are different failures." + "reason": "Different multimodal bugs: chat-template tool-call handling versus Qwen2.5-VL video position ids." }, { "left": "issue:43819", "right": "issue:45083", "accept": false, - "reason": "DAC latent conversion mismatch and qwen3_omni_moe feature-length helper behavior are unrelated." + "reason": "DAC latent/STE mismatch and qwen3_omni_moe feature-length behavior do not describe the same code-path problem." }, { "left": "issue:43450", "right": "issue:43575", "accept": false, - "reason": "Batched video shape bug and Qwen2-57B-A14B-Instruct tensor-parallel OOM are not the same issue." + "reason": "Video processor batch-shape handling and TP OOM on model load are unrelated." }, { "left": "issue:44462", "right": "issue:44561", "accept": false, - "reason": "Tokenizer.json handling and removal of is_torch_fx_available are unrelated regression classes." + "reason": "Tokenizer.json loading and removal of is_torch_fx_available affect different subsystems." }, { "left": "issue:44561", "right": "issue:44779", "accept": false, - "reason": "Both mention v5 regressions, but one is a missing helper API and the other is tokenizer output corruption." + "reason": "Both are v5 regressions, but one is trust_remote_code compatibility and the other is Deepseek tokenizer correctness." }, { "left": "issue:44610", "right": "issue:44857", "accept": false, - "reason": "Different model processors and different symptoms; shape mismatch is not the same as AMP CUDA crash." - }, - { - "left": "issue:37428", - "right": "issue:43425", - "accept": false, - "reason": "Flash-attention import error and Torch 2.10 incompatibility are related only at a broad ecosystem level, not the same bug." + "reason": "OmDet-Turbo processor sizing and LwDetrImageLoss AMP crash are different problems." }, { "left": "issue:43819", "right": "issue:44112", "accept": false, - "reason": "Unrelated audio/latent conversion issue versus a stale CI device override test." + "reason": "Audio/DAC latent handling is unrelated to GraniteSpeech CI device override behavior." }, { "left": "issue:43262", "right": "issue:43450", "accept": false, - "reason": "Audio chat-template sampling-rate default and video batching shape are different processor bugs." + "reason": "Different processor bugs: audio sampling-rate defaulting versus batched video output shape." }, { "left": "issue:44448", "right": "issue:44743", "accept": false, - "reason": "Pegasus v4/v5 output difference and Qwen recurrent-state reset are separate model behaviors." + "reason": "Pegasus v4/v5 output difference and qwen3_5 recurrent-state reset are unrelated." }, { "left": "issue:43606", "right": "issue:45083", "accept": false, - "reason": "CPU offload device mismatch and qwen3_omni_moe feature-length helper behavior are unrelated." + "reason": "CPU offload device mismatch is not the same as qwen3_omni_moe feature-length behavior." }, { "left": "issue:43278", "right": "issue:43825", "accept": false, - "reason": "Embedding dtype drift and a pipeline error-message issue do not share the same underlying change." + "reason": "Embedding dtype regression and pipeline error-message behavior are different issues." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Two version-related errors, but with different missing symbols / incompatibility surfaces." }, { "left": "issue:29942", "right": "issue:32090", "accept": false, - "reason": "Flash Attention 2 test failures and _gpu_broadcast_one NoneType errors are unrelated." + "reason": "Flash Attention 2 test failures and a Trainer _gpu_broadcast_one NoneType error are unrelated." }, { "left": "issue:45310", "right": "issue:45478", "accept": true, - "reason": "Same Qwen3.5 MoE from_pretrained error on transformers>=5.4.0; 45478 is a duplicate report." - }, - { - "left": "issue:37428", - "right": "issue:44351", - "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and different modules." + "reason": "Same title and same concrete Qwen3.5 MoE from_pretrained failure; 45478 reads as a later duplicate report of 45310." }, { "left": "issue:43819", "right": "issue:44493", "accept": false, - "reason": "DAC latent mismatch and unexpected position-id keys are not the same bug." + "reason": "DAC latent/STE mismatch and position-id warnings across many models are different bugs." }, { "left": "issue:43611", "right": "issue:43931", "accept": false, - "reason": "Generic model-loading breakage from base_model_prefix removal vs a specific Qwen3-VL weight-shape mismatch." + "reason": "base_model_prefix loading breakage and Qwen3-VL weight-shape mismatch are distinct model-loading problems." }, { "left": "issue:39692", "right": "issue:43526", "accept": false, - "reason": "SigLIP2 docs/quantization errors and BEiT reduce_labels behavior are unrelated." + "reason": "SigLIP2 documentation/model-processor issues and BEiT fast label reduction are unrelated." }, { "left": "issue:43335", "right": "issue:43550", "accept": false, - "reason": "SwitchTransformers sparse-layer construction and Bamba torch.compile SDPA failure are different concrete bugs." + "reason": "SwitchTransformers sparse-layer config and Bamba SDPA torch.compile failure are different defects." }, { - "left": "issue:43450", - "right": "issue:43526", + "left": "issue:37428", + "right": "issue:44351", "accept": false, - "reason": "Video processor batching shape issue and BEiT label reduction bug do not match." + "reason": "Both are import errors, but for different missing symbols and likely different regressions." }, { - "left": "issue:45200", - "right": "issue:45381", + "left": "issue:43450", + "right": "issue:43526", "accept": false, - "reason": "Both are multimodal token-id/default issues, but they affect different models and different fields." + "reason": "Video batch-shape handling and BEiT label reduction are unrelated." }, { "left": "issue:44855", "right": "issue:45083", "accept": false, - "reason": "Python 3.13 import parsing failure and qwen3_omni_moe helper behavior are unrelated." + "reason": "Python 3.13 torch.jit parsing and qwen3_omni_moe helper behavior are unrelated." }, { - "left": "issue:43404", - "right": "issue:45200", + "left": "issue:45200", + "right": "issue:45381", "accept": false, - "reason": "lm_head weight tying and mm_token_type_ids defaults are separate model configuration bugs." + "reason": "Gemma 4 mm_token_type_ids defaulting and Qwen2.5-VL video position ids are different multimodal bugs." }, { - "left": "issue:44292", - "right": "issue:45520", + "left": "issue:43404", + "right": "issue:45200", "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime error and a flash_attn import KeyError are not the same failure." + "reason": "Mistral3 weight tying and Gemma 4 text-only fine-tuning inputs are unrelated." }, { "left": "issue:39692", "right": "issue:44493", "accept": false, - "reason": "SigLIP2 doc/quantization problems and position-id key warnings are unrelated." + "reason": "SigLIP2 example errors and widespread position-id warnings are not the same underlying issue." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "SwitchTransformers sparse-layer creation and BigBirdTokenizer special-token registration are unrelated." }, { "left": "issue:44448", "right": "issue:44779", "accept": false, - "reason": "Pegasus output drift and Deepseek tokenizer regressions are separate issues." + "reason": "Pegasus generation differences and Deepseek tokenizer regressions are distinct." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is well grounded in the packet, and the only accepted pair (issue:45310 / issue:45478) is clearly supported by the titles and timing as a duplicate Qwen3.5 MoE from_pretrained regression. The other verdicts stay conservative and do not overstate evidence." + "accept": false, + "feedback": "The main duplicate call is plausible, but the summary is not fully grounded: issue:45310 is not open in the packet (it is closed), so the canonical-issue rationale overstates the evidence. Please revise any status claims and keep the duplicate verdict limited to what the packet supports." }, "evaluator_used": true, - "retried": false, + "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -6970,7 +6841,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:25251", "issue:29127", @@ -6992,7 +6863,6 @@ "issue:43526", "issue:43550", "issue:43606", - "issue:43653", "issue:43701", "issue:43720", "issue:43723", @@ -7000,6 +6870,7 @@ "issue:43976", "issue:43994", "issue:44112", + "issue:44292", "issue:44448", "issue:44451", "issue:44462", @@ -7022,12 +6893,12 @@ "issue:45520" ], "soft_pairs": [ - "issue:29127|issue:32090", - "issue:43335|issue:43653", "issue:44977|issue:45229", + "issue:29127|issue:32090", + "issue:44292|issue:45520", "issue:38175|issue:44493", - "issue:43335|issue:45083", "issue:43335|issue:43504", + "issue:43335|issue:45083", "issue:43550|issue:44112", "issue:43335|issue:45072", "issue:44568|issue:45491", @@ -7038,264 +6909,264 @@ "issue:43335|issue:44112", "issue:34689|issue:35141", "issue:43606|issue:44857", - "issue:45372|issue:45406", "issue:44857|issue:45072", + "issue:45372|issue:45406", "issue:43873|issue:44493", "issue:43066|issue:44462", "issue:43873|issue:45229", - "issue:37428|issue:44987", "issue:43278|issue:43329", "issue:43701|issue:45083", "issue:29942|issue:35141", - "issue:41720|issue:45229", "issue:43550|issue:45083", - "issue:44451|issue:45520", + "issue:41720|issue:45229", + "issue:37428|issue:44987", "issue:32090|issue:33357", "issue:43976|issue:45440", + "issue:44451|issue:45520", "issue:45372|issue:45459", "issue:44448|issue:44466", "issue:43065|issue:45412", "issue:43065|issue:43994", "issue:25251|issue:32090" ], - "prepared_review_unit_hash": "bf3dc7e4f618d31c5147b390fb11bc6e6f96959baf75612d56985900c56850fb", + "prepared_review_unit_hash": "c729571a62cc493c123410759a22d1ffce878ede92ddd5ec3b4a66b72674cd2f", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 35, - "serialized_chars": 38386, - "estimated_input_tokens": 9597, - "estimated_eval_tokens": 19450 + "serialized_chars": 38319, + "estimated_input_tokens": 9580, + "estimated_eval_tokens": 19416 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is over-broad: the titles span many unrelated Transformers bugs, and none of the soft-edge pairs look like the same underlying issue. The strongest overlaps are only subsystem-level similarity (e.g. Gemma4, tokenizer, SwitchTransformers, flash-attn), not duplicate bugs.", - "confidence": 0.96, - "canonical_issue_reason": "No clear canonical duplicate emerges; issue 43335 is the most connected by the soft graph, but its matches are mostly superficial and unrelated. ", + "summary": "All suggested soft edges look like false positives: they share broad language (model loading, attention, quantization, CI, etc.) but describe different failures, code paths, or model families. No duplicate cluster is strong enough to merge.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue emerges; the candidate pairs are mostly unrelated bugs with only superficial subsystem overlap.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43335 is the closest thing to a hub in this cluster, but it is still not a good duplicate anchor because the neighboring issues describe different failure modes and code paths.", + "best_issue_reason": "No issue is a safe global representative because the set is heterogeneous across distinct models, loaders, and runtime errors.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Both are model/inference problems, but one is flash-attention generation corruption and the other is multi-GPU CUDA OOM; different failure modes and fixes." + }, { "left": "issue:29127", "right": "issue:32090", "accept": false, - "reason": "Different problems: LayoutLMv3 error messaging vs Trainer GPU broadcast NoneType failure." + "reason": "One is a LayoutLMv3 validation/error-message issue, the other is a Trainer broadcast TypeError; unrelated code paths." }, { - "left": "issue:43335", - "right": "issue:43653", + "left": "issue:44292", + "right": "issue:45520", "accept": false, - "reason": "SwitchTransformers config bug vs BigBirdTokenizer special-token decode issue; unrelated code paths." - }, - { - "left": "issue:44977", - "right": "issue:45229", - "accept": false, - "reason": "Flash-attention generation issue vs multi-GPU CUDA OOM; not the same bug." + "reason": "Qwen-3 NVFP4 runtime failure and a Python 3.13 flash_attn import KeyError are distinct issues with different causes." }, { "left": "issue:38175", "right": "issue:44493", "accept": false, - "reason": "Siglip2 zero-probabilities vs unexpected position-id key on many models; distinct symptoms and likely causes." + "reason": "SigLIP2 zero probabilities and unexpected position-id keys are separate model-behavior bugs with no clear shared root cause." }, { "left": "issue:43335", - "right": "issue:45083", + "right": "issue:43504", "accept": false, - "reason": "Sparse-layer config bug vs qwen3_omni_moe helper function behavior; not duplicates." + "reason": "A SwitchTransformers config edge case and a BEiT pretrained-load legacy-field issue are unrelated." }, { "left": "issue:43335", - "right": "issue:43504", + "right": "issue:45083", "accept": false, - "reason": "SwitchTransformers sparse-layer creation vs BEiT legacy-field loading failure; different models and failures." + "reason": "Sparse-layer construction in SwitchTransformers and a qwen3_omni_moe feature-length helper bug are different code paths." }, { "left": "issue:43550", "right": "issue:44112", "accept": false, - "reason": "torch.compile/SDPA failure in Bamba vs stale CI test in GraniteSpeech; unrelated." + "reason": "torch.compile/SDPA failure in Bamba is unrelated to a GraniteSpeech CI test stale-device override." }, { "left": "issue:43335", "right": "issue:45072", "accept": false, - "reason": "Same model family, but one is config/sparse-layer creation and the other is bfloat16 dtype mismatch in inference." + "reason": "SwitchTransformers sparse-layer config and dtype mismatch CI failures in other models are not the same bug." }, { "left": "issue:44568", "right": "issue:45491", "accept": false, - "reason": "Tokenizer add_special_tokens regression vs Gemma3 NaN embeddings during mixed-length batching; unrelated." + "reason": "Tokenizer special-token behavior and Gemma3 NaN embeddings under sliding-window attention are unrelated." }, { "left": "issue:43720", "right": "issue:44112", "accept": false, - "reason": "Packed-weight loading bug vs CI test staleness; not the same code-path problem." + "reason": "Packed-weight loading failure in BitNet does not match a GraniteSpeech CI device-override test issue." }, { "left": "issue:44448", "right": "issue:44977", "accept": false, - "reason": "Pegasus v4/v5 output regression vs Qwen3.5 flash-attn generation issue; different models and causes." + "reason": "Pegasus v4/v5 output drift is a model regression, while the other issue is Qwen3.5 flash-attention generation failure." }, { "left": "issue:40444", "right": "issue:43526", "accept": false, - "reason": "Qwen2.5-VL multi-image dataset failure vs BEiT label reduction bug; both vision-related but not the same issue." + "reason": "Qwen2.5-VL multi-image iterable dataset finetuning and BEiT reduce_labels returning one label are different preprocessing bugs." }, { "left": "issue:43723", "right": "issue:44554", "accept": false, - "reason": "Tokenizer loading regression vs MPS attention correctness issue; unrelated." + "reason": "AutoTokenizer loading in v5 and MPS attention correctness with mismatched head dims are unrelated." }, { "left": "issue:43335", "right": "issue:44112", "accept": false, - "reason": "Sparse-layer config bug vs stale device-override test; no shared underlying defect." + "reason": "SwitchTransformers sparse-layer config and GraniteSpeech CI test failure are unrelated." }, { "left": "issue:34689", "right": "issue:35141", "accept": false, - "reason": "Llama 3.2 Vision load breakage vs embedding reinit after resize; different symptoms and mechanisms." + "reason": "Llama 3.2 vision model loading breakage and embedding reinitialization after resizing are distinct issues." }, { "left": "issue:43606", "right": "issue:44857", "accept": false, - "reason": "CPU offload device mismatch vs AMP float16 crash; not the same bug." + "reason": "CPU offload device mismatch and float16 AMP CUDA crash are different runtime/device problems." }, { - "left": "issue:45372", - "right": "issue:45406", + "left": "issue:44857", + "right": "issue:45072", "accept": false, - "reason": "Gemma4 processor import dependency failure vs Gemma4Processor missing _tokenizer in serve; related area, but different concrete bugs." + "reason": "LwDetrImageLoss AMP crash and SwitchTransformers/TimmWrapperModel bfloat16 dtype mismatch are separate failures." }, { - "left": "issue:44857", - "right": "issue:45072", + "left": "issue:45372", + "right": "issue:45406", "accept": false, - "reason": "Different dtype issues in different models/code paths; not mergeable into one PR." + "reason": "Gemma4 processor loading blocked by a mistral_common import error is not the same as Gemma4Processor missing _tokenizer in serve." }, { "left": "issue:43873", "right": "issue:44493", "accept": false, - "reason": "Quantization offload issue vs unexpected position-id key regression; unrelated." + "reason": "Quantization offloading behavior and unexpected position-id keys are not the same underlying bug." }, { "left": "issue:43066", "right": "issue:44462", "accept": false, - "reason": "Tokenizer decoder type bug vs AutoTokenizer ignoring tokenizer.json; different loading semantics." + "reason": "Wrong tokenizer decoder type in v5 and AutoTokenizer ignoring tokenizer.json are related to tokenizers broadly but describe different failures." }, { "left": "issue:43873", "right": "issue:45229", "accept": false, - "reason": "Quantization/offload problem vs Gemma4 multi-GPU OOM; unrelated." - }, - { - "left": "issue:37428", - "right": "issue:44987", - "accept": false, - "reason": "Missing flash-attention import symbol vs model loading failure for a specific repo; too vague and not clearly the same bug." + "reason": "Quantization offloading and Gemma4 multi-GPU OOM are both memory-related at a high level, but not the same concrete bug." }, { "left": "issue:43278", "right": "issue:43329", "accept": false, - "reason": "Embedding dtype mismatch across train/eval vs multimodal token-count helper bug; unrelated." + "reason": "Embedding dtype drift in evaluate and a multimodal token-count helper using undefined video variables are unrelated." }, { "left": "issue:43701", "right": "issue:45083", "accept": false, - "reason": "Resume checkpoint key mismatch vs qwen3_omni_moe helper function behavior; different defects." + "reason": "resume_from_checkpoint key mismatch and qwen3_omni_moe feature-length behavior are different parts of the codebase." }, { "left": "issue:29942", "right": "issue:35141", "accept": false, - "reason": "Flash Attention 2 test failures vs embedding reinitialization after resize; unrelated." + "reason": "Flash Attention 2 test failures and embedding reinitialization after resize are unrelated." }, { - "left": "issue:41720", - "right": "issue:45229", + "left": "issue:43550", + "right": "issue:45083", "accept": false, - "reason": "Qwen3 auto-device assert vs Gemma4 OOM; different failure modes." + "reason": "Bamba SDPA compile failure and qwen3_omni_moe feature-length helper behavior are distinct bugs." }, { - "left": "issue:43550", - "right": "issue:45083", + "left": "issue:41720", + "right": "issue:45229", "accept": false, - "reason": "Bamba torch.compile/SDPA issue vs qwen3_omni_moe length helper bug; unrelated." + "reason": "Qwen3 auto device mapping cudaErrorAssert and Gemma4 multi-GPU OOM are different runtime failures." }, { - "left": "issue:44451", - "right": "issue:45520", + "left": "issue:37428", + "right": "issue:44987", "accept": false, - "reason": "ScandiBERT load failure vs Python 3.13 flash_attn import key error; not the same bug." + "reason": "Flash attention utility import breakage and loading physical-intelligence/fast are not the same issue." }, { "left": "issue:32090", "right": "issue:33357", "accept": false, - "reason": "Trainer broadcast NoneType failure vs MacOS bus error on CLIP model; different platform and cause." + "reason": "Trainer _gpu_broadcast_one NoneType error and MacOS bus error with community CLIP are unrelated." }, { "left": "issue:43976", "right": "issue:45440", "accept": false, - "reason": "Python version incompatibility vs DeepSeekV3MoE divergence from remote implementation; unrelated." + "reason": "Python version compatibility issue and DeepseekV3MoE divergence from remote implementation are different classes of bugs." + }, + { + "left": "issue:44451", + "right": "issue:45520", + "accept": false, + "reason": "Cannot load a specific tokenizer/model and flash_attn KeyError on Python 3.13 are unrelated." }, { "left": "issue:45372", "right": "issue:45459", "accept": false, - "reason": "Gemma4 import failure vs protobuf error masking in tokenizer loading; related area but different concrete bug." + "reason": "Gemma4 processor import problem and tokenizer error masking when protobuf is absent are different error paths." }, { "left": "issue:44448", "right": "issue:44466", "accept": false, - "reason": "Pegasus output regression vs lm_head serialization inconsistency; distinct issues." + "reason": "Pegasus output regression and lm_head.weight serialization inconsistency are distinct v4/v5 issues." }, { "left": "issue:43065", "right": "issue:45412", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder vs RT-DETR memory release issue; unrelated." + "reason": "Dummy Conv2d in Sam3PixelDecoder and RT-DETR memory not being released are unrelated model implementation issues." }, { "left": "issue:43065", "right": "issue:43994", "accept": false, - "reason": "Sam3PixelDecoder dummy layer vs Siglip2 nonsensical outputs; different models and bugs." + "reason": "Sam3PixelDecoder dummy Conv2d and SigLIP2 nonsensical outputs are not the same bug." }, { "left": "issue:25251", "right": "issue:32090", "accept": false, - "reason": "Pipeline top_k nesting bug vs Trainer GPU broadcast NoneType error; unrelated." + "reason": "Pipeline top_k nesting change and Trainer GPU broadcast TypeError are unrelated." } ] }, @@ -7317,7 +7188,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:29127", "issue:29942", @@ -7375,10 +7246,10 @@ "issue:44265|issue:45198", "issue:44361|issue:45198", "issue:43257|issue:43278", - "issue:42915|issue:42994", "issue:44871|issue:44977", - "issue:43421|issue:45491", "issue:43994|issue:45412", + "issue:43421|issue:45491", + "issue:42915|issue:42994", "issue:44206|issue:44479", "issue:43756|issue:45440", "issue:43504|issue:44857", @@ -7386,8 +7257,8 @@ "issue:29942|issue:30064", "issue:33357|issue:39692", "issue:45103|issue:45372", - "issue:29127|issue:34567", "issue:32090|issue:33290", + "issue:29127|issue:34567", "issue:45072|issue:45229", "issue:43824|issue:44279", "issue:43381|issue:43701", @@ -7401,20 +7272,20 @@ "issue:29127|issue:33357", "issue:29127|issue:35141" ], - "prepared_review_unit_hash": "ce6f1e2b5ca13463693bfe6e10608b6661f7f633c003746ceee146ad85058820", + "prepared_review_unit_hash": "2bfec5282f4862bf48952da2cb856cb58505e8bf595199b75bb75aab0a579968", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37066, + "serialized_chars": 37065, "estimated_input_tokens": 9267, "estimated_eval_tokens": 18790 }, @@ -7423,198 +7294,198 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is not a true duplicate set: it mixes unrelated bugs, regressions, model-specific issues, and docs/runtime problems across many subsystems. All soft edges are name-similarity only and should stay separate.", - "confidence": 0.97, - "canonical_issue_reason": "No clear canonical issue: the items span unrelated bugs and features across different models, tokenizers, processors, and training/runtime paths, so there is no single underlying report.", + "summary": "This cluster is mostly heterogeneous: the soft-similarity pairs share broad themes (tokenizers, Qwen, vision, trainer, quantization) but not the same concrete code-path bug. I would not merge any of the proposed soft pairs as duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "issue:45198 is the most self-contained, concrete bug report in the set (clear failure mode plus save_pretrained/tokenization reproduction), so it is the best single representative if one must be chosen, though it does not subsume the rest.", "canonical_pr_reason": null, - "best_issue_reason": "No best issue candidate; any pick would be arbitrary because the cluster is heterogeneous rather than duplicate-driven.", + "best_issue_reason": "issue:45198 is a strong issue to anchor triage because it describes a specific, reproducible runtime failure with a narrow surface area and clear user impact.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44186", "right": "issue:45198", "accept": false, - "reason": "Different components and failures: LayoutLMv2 tokenizer NER/padding crash vs Wav2Vec2 save_pretrained/tokenization failure." + "reason": "Both are tokenizer failures, but they hit different models and code paths: LayoutLMv2 NER/padding vs Wav2Vec2 save_pretrained/tokenization." }, { "left": "issue:43278", "right": "issue:43381", "accept": false, - "reason": "Unrelated bugs: embedding dtype drift during eval vs gradient checkpointing being disallowed in eval mode." + "reason": "One is a dtype change between train/eval, the other is a gradient-checkpointing restriction in eval mode; related context, not the same bug." }, { "left": "issue:43653", "right": "issue:44743", "accept": false, - "reason": "Different code paths: BigBirdTokenizer special-token decode issue vs Qwen3-MoE recurrent-state reset with cache." + "reason": "BigBird tokenizer special-token registration and Qwen3-MoE recurrent-state resetting are unrelated subsystems and failure modes." }, { "left": "issue:44265", "right": "issue:45198", "accept": false, - "reason": "Different failures: torch.export / torch_compilable_check compatibility vs Wav2Vec2 save/tokenization behavior." + "reason": "torch.export with torch_compilable_check is a model export problem; 45198 is a tokenizer/save_pretrained issue." }, { "left": "issue:44361", "right": "issue:45198", "accept": false, - "reason": "Different tokenizer/task error vs Wav2Vec2 save/tokenization failure; no shared underlying bug." + "reason": "Different tokenizer/model families and different failures: MLuke task AttributeError vs Wav2Vec2 save/tokenization failure." }, { "left": "issue:43257", "right": "issue:43278", "accept": false, - "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to BF16/FP32 evaluation dtype drift." + "reason": "Both involve Qwen/precision-adjacent complaints, but one is MoE weight conversion under accelerate+deepspeed and the other is eval-time dtype drift." }, { - "left": "issue:42915", - "right": "issue:42994", + "left": "issue:44871", + "right": "issue:44977", "accept": false, - "reason": "FineGrainedFP8Config failure in Qwen3Moe is not the same as quantized model saving." + "reason": "Gemma EOS config mismatch and Qwen3.5 flash-attention generation issues are separate model-specific bugs." }, { - "left": "issue:44871", - "right": "issue:44977", + "left": "issue:43994", + "right": "issue:45412", "accept": false, - "reason": "Different models and causes: Gemma-3 eos_token_id config mismatch vs Qwen3.5 flash-attention generation bug." + "reason": "SigLIP2 inference nonsense and RT-DETR memory not released are unrelated symptoms and code paths." }, { "left": "issue:43421", "right": "issue:45491", "accept": false, - "reason": "TokenizersBackend post-processor update issue is unrelated to Gemma3 NaN embeddings from sliding-window attention." + "reason": "Runtime special-token post-processor updates and Gemma3 NaN embeddings from sliding-window attention are different tokenizer/model bugs." }, { - "left": "issue:43994", - "right": "issue:45412", + "left": "issue:42915", + "right": "issue:42994", "accept": false, - "reason": "SigLIP2 AutoModel/pipeline wrong outputs vs RT-DETR memory leak on deletion are different problems." + "reason": "Both mention quantization, but one is Qwen3Moe with FineGrainedFP8Config and the other is quantized model saving; not the same concrete defect." }, { "left": "issue:44206", "right": "issue:44479", "accept": false, - "reason": "Unsupported center arg in LasrFeatureExtractor is unrelated to the Qwen video-input regression." + "reason": "Feature extractor unsupported-arg crash and Qwen VL video regression are distinct input/architecture problems." }, { "left": "issue:43756", "right": "issue:45440", "accept": false, - "reason": "Smollm3 RoPE-layer mismatch is a different bug from DeepseekV3MoE divergence from remote implementation." + "reason": "Both are model implementation divergences, but for different models and different missing behaviors; not mergeable as one fix." }, { "left": "issue:43504", "right": "issue:44857", "accept": false, - "reason": "BEiT legacy-field loading bug vs LwDetrImageLoss AMP/CUDA crash are different code paths." + "reason": "Beit pretrained-load legacy field handling and LwDetr AMP/CUDA loss crash are unrelated." }, { "left": "issue:44610", "right": "issue:45412", "accept": false, - "reason": "Processor input-size mismatch for OmDet-Turbo is unrelated to RT-DETR garbage-collection memory retention." + "reason": "Processor image-size mismatch and RT-DETR memory retention are different bugs with no shared code-path." }, { "left": "issue:29942", "right": "issue:30064", "accept": false, - "reason": "Flash Attention 2 test failures and void segmentation-map processing are separate issues." + "reason": "Flash Attention 2 test failures and void segmentation-map processing are separate CI/runtime issues." }, { "left": "issue:33357", "right": "issue:39692", "accept": false, - "reason": "MacOS CLIP bus error is not the same as SigLIP2 docs/example errors." + "reason": "Both involve vision-language models, but one is a MacOS bus error on CLIP and the other is a SigLIP2 doc example with model/processor and quantization errors." }, { "left": "issue:45103", "right": "issue:45372", "accept": false, - "reason": "auto_docstring annotation handling has nothing in common with the mistral_common ReasoningEffort import error." + "reason": "auto_docstring annotation handling and Gemma 4 processor loading/import errors are unrelated packaging/runtime problems." }, { - "left": "issue:29127", - "right": "issue:34567", + "left": "issue:32090", + "right": "issue:33290", "accept": false, - "reason": "LayoutLMv3 error-message clarity is unrelated to TrainerState token counter not updating." + "reason": "Trainer broadcast NoneType and deepspeed+adafactor OOM are different training failures." }, { - "left": "issue:32090", - "right": "issue:33290", + "left": "issue:29127", + "right": "issue:34567", "accept": false, - "reason": "Trainer NoneType broadcast TypeError vs deepspeed/adafactor OOM are different failure modes." + "reason": "LayoutLMv3 error messaging and TrainerState token-count tracking are unrelated issues." }, { "left": "issue:45072", "right": "issue:45229", "accept": false, - "reason": "dtype mismatches in bfloat16 inference are unrelated to Gemma4 multi-GPU CUDA OOM." + "reason": "dtype mismatch in bfloat16 inference and multi-GPU CUDA OOM are not the same bug." }, { "left": "issue:43824", "right": "issue:44279", "accept": false, - "reason": "Qwen2_5_VL import error is not the same as a general dependency issue with transformers." + "reason": "Missing Qwen2.5-VL import and a generic transformers dependency issue are too broad to treat as the same failure." }, { "left": "issue:43381", "right": "issue:43701", "accept": false, - "reason": "Eval-mode gradient checkpointing restriction is unrelated to resume_from_checkpoint key mismatch." + "reason": "Gradient checkpointing in eval mode and resume-from-checkpoint key mismatch do not share the same root cause." }, { "left": "issue:43653", "right": "issue:44442", "accept": false, - "reason": "BigBirdTokenizer special-token registration bug is unrelated to AutoTokenizer loading FastSpeech2ConformerTokenizer." + "reason": "BigBird tokenizer special-token registration and AutoTokenizer failing to load FastSpeech2ConformerTokenizer are different tokenizer-loading bugs." }, { "left": "issue:43334", "right": "issue:43531", "accept": false, - "reason": "Qwen3-VL pad_token_id loading failure and Qwen3-MoE sliding_window behavior are different problems." + "reason": "Qwen3-VL pad_token_id loading failure and Qwen3-MoE sliding-window behavior are separate model/config issues." }, { "left": "issue:43526", "right": "issue:44186", "accept": false, - "reason": "BeitImageProcessorFast reduce_labels bug is unrelated to LayoutLMv2Tokenizer NER/padding crashes." + "reason": "BeitImageProcessorFast label reduction and LayoutLMv2 tokenizer crashes are unrelated preprocessing bugs." }, { "left": "issue:44464", "right": "issue:45491", "accept": false, - "reason": "Compiled forward chunked generation inconsistency is unrelated to Gemma3 NaN embeddings with mixed-length batching." + "reason": "Compiled-forward chunked generation inconsistency and Gemma3 NaN embeddings from mixed-length batching are different generation paths." }, { "left": "issue:43262", "right": "issue:44206", "accept": false, - "reason": "Audio processor chat-template sampling-rate default mismatch is unrelated to LasrFeatureExtractor's unsupported center arg." + "reason": "Audio chat-template sampling-rate defaulting and unsupported center-arg crash are unrelated processor bugs." }, { "left": "issue:44977", "right": "issue:45245", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug has no relation to the category-count limit error." + "reason": "Qwen3.5 flash-attention generation and category-count overflow are entirely different failures." }, { "left": "issue:29942", "right": "issue:36010", "accept": false, - "reason": "Flash Attention 2 test failures and GenerationMixin import error are different regressions." + "reason": "Flash Attention test failures and GenerationMixin import failure are separate issues." }, { "left": "issue:29127", "right": "issue:33357", "accept": false, - "reason": "LayoutLMv3 error-message issue and CLIP MacOS bus error are unrelated." + "reason": "LayoutLMv3 error messaging and CLIP MacOS bus error are unrelated." }, { "left": "issue:29127", "right": "issue:35141", "accept": false, - "reason": "LayoutLMv3 error clarity is unrelated to post_init reinitializing untied output embeddings." + "reason": "LayoutLMv3 box-validation messaging and embedding reinitialization after resize are different code paths and symptoms." } ] }, @@ -7636,7 +7507,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:29942", "issue:33290", @@ -7699,234 +7570,234 @@ "issue:43723|issue:45479", "issue:45003|issue:45440", "issue:43575|issue:43701", - "issue:43316|issue:43525", "issue:43931|issue:45216", "issue:43646|issue:44077", "issue:43761|issue:44188", + "issue:43316|issue:43525", "issue:43575|issue:44387", "issue:43994|issue:44206", "issue:44964|issue:45440", "issue:29942|issue:33666", "issue:39692|issue:45491", - "issue:43673|issue:44743", "issue:44442|issue:44898", "issue:33290|issue:34567", + "issue:43673|issue:44743", "issue:34689|issue:43611", "issue:43381|issue:43873", "issue:43493|issue:43827", "issue:44077|issue:44479", - "issue:33290|issue:34689", "issue:33290|issue:35141", + "issue:33290|issue:34689", "issue:41628|issue:45372" ], - "prepared_review_unit_hash": "0806280343f06ba8b5b6f8d9bcd461c8c36f558725c33152d8dd6c233386d386", + "prepared_review_unit_hash": "5ea9d4784489ce40c2dbdf05b0015ee7a815f5979c09f5a438c9eaf9d68a8ff0", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36716, - "estimated_input_tokens": 9179, - "estimated_eval_tokens": 18614 + "serialized_chars": 36717, + "estimated_input_tokens": 9180, + "estimated_eval_tokens": 18616 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous: most items are unrelated model-specific bugs, regressions, or docs issues. I do not see any strong duplicate pairs among the provided soft edges, so the safe triage outcome is to keep them separate.", - "confidence": 0.94, - "canonical_issue_reason": "If a single representative is needed, issue 43646 is the broadest version-level regression and the most general in scope; however, the cluster is not a true duplicate set.", + "summary": "This cluster is heterogeneous: the soft pairs mostly connect unrelated issues that share only broad keywords (loading, OOM, config, cache, etc.) rather than the same bug. I would not merge any of the candidate pairs.", + "confidence": 0.98, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Issue 43646 is the best generic representative because it describes a broad Transformers 5.0.0 initialization regression rather than a narrow model- or component-specific symptom.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43761", "right": "issue:44079", "accept": false, - "reason": "Different bugs: one is a CLIPVisionModel hidden-states regression, the other is ModelOutput key handling." + "reason": "Different bugs: CLIPVisionModel hidden_states regression vs ModelOutput key assignment behavior." }, { "left": "issue:44442", "right": "issue:45072", "accept": false, - "reason": "Unrelated areas: tokenizer loading failure vs bfloat16 dtype mismatch in inference." + "reason": "Tokenizer loading failure and bfloat16 dtype mismatch are unrelated problems." }, { "left": "issue:42915", "right": "issue:43931", "accept": false, - "reason": "Different model families and failures: FP8 config issue vs Qwen3-VL shape mismatch on load." + "reason": "Different models and failures: Qwen3Moe FP8 config vs Qwen3-VL weight shape mismatch." }, { "left": "issue:43526", "right": "issue:44857", "accept": false, - "reason": "Different subsystems: image label reduction bug vs AMP/CUDA loss crash." + "reason": "BeitImageProcessorFast label reduction bug is unrelated to LwDetrImageLoss AMP crash." }, { "left": "issue:43232", "right": "issue:43388", "accept": false, - "reason": "Generation kwargs update bug and metrics gathering bug are not the same code path." + "reason": "Generation kwargs handling and metric label gathering are distinct code paths." }, { "left": "issue:43526", "right": "issue:44610", "accept": false, - "reason": "Different processor bugs: label reduction vs processor output size mismatch." + "reason": "Different processor bugs: label reduction vs image size mismatch." }, { "left": "issue:44479", "right": "issue:45290", "accept": false, - "reason": "Different regressions: video input processing vs chat template tool-call handling." + "reason": "Video input regression and chat template tool-call crash are separate issues." }, { "left": "issue:44743", "right": "issue:44811", "accept": false, - "reason": "Cache/state reset in Qwen3_5 is unrelated to Whisper batch_decode skip_special_tokens handling." + "reason": "Qwen recurrent-state reset and Whisper batch_decode token skipping are unrelated." }, { "left": "issue:43723", "right": "issue:45479", "accept": false, - "reason": "Tokenizer loading regression and sequence-classification loss bug are unrelated." + "reason": "Tokenizer loading error and sequence-classification zero-loss bug do not match." }, { "left": "issue:45003", "right": "issue:45440", "accept": false, - "reason": "Generic sys.modules access bug vs DeepSeekV3MoE divergence are different problems." + "reason": "sys.modules access bug and DeepseekV3MoE divergence are unrelated." }, { "left": "issue:43575", "right": "issue:43701", "accept": false, - "reason": "Tensor-parallel OOM and checkpoint key mismatch are unrelated." - }, - { - "left": "issue:43316", - "right": "issue:43525", - "accept": false, - "reason": "Both mention config inconsistencies, but they are different missing-attribute/API issues for different models." + "reason": "Tensor-parallel OOM and resume_from_checkpoint key mismatch are different failures." }, { "left": "issue:43931", "right": "issue:45216", "accept": false, - "reason": "Loading shape mismatch and save_pretrained checkpoint corruption are different failure modes." + "reason": "Both involve model loading/saving, but the concrete bugs differ: shape mismatch vs bad checkpoint save." }, { "left": "issue:43646", "right": "issue:44077", "accept": false, - "reason": "Custom model initialization breakage is broader, but patchtsmixer post_init is a distinct model-specific issue." + "reason": "Custom model initialization regression is not the same as patchtsmixer post_init handling." }, { "left": "issue:43761", "right": "issue:44188", "accept": false, - "reason": "Hidden_states regression and attention-kernel divergence under torch.compile are unrelated." + "reason": "Hidden_states regression and attention-kernel divergence under compile are unrelated." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Different config APIs for different models; not the same missing-attribute bug." }, { "left": "issue:43575", "right": "issue:44387", "accept": false, - "reason": "Both are OOM-related, but one is TP loading OOM and the other is quantization memory growth." + "reason": "Both mention OOM, but one is tensor-parallel load OOM and the other is int4 reserved-memory growth." }, { "left": "issue:43994", "right": "issue:44206", "accept": false, - "reason": "SigLIP2 nonsensical outputs and LasrFeatureExtractor center-arg crash are separate issues." + "reason": "SigLIP2 bad outputs and LasrFeatureExtractor center-arg crash are separate issues." }, { "left": "issue:44964", "right": "issue:45440", "accept": false, - "reason": "Phi-4 multimodal loading error and DeepSeekV3MoE divergence are different model problems." + "reason": "Phi-4 multimodal loading and DeepseekV3MoE divergence are unrelated." }, { "left": "issue:29942", "right": "issue:33666", "accept": false, - "reason": "Flash Attention 2 test failures and Qwen2-VL multi-GPU training are unrelated." + "reason": "Flash Attention 2 test failures and Qwen2-VL multi-GPU training are not the same bug." }, { "left": "issue:39692", "right": "issue:45491", "accept": false, - "reason": "SigLIP2 docs errors/quantization failure and Gemma3 NaN embeddings are different bugs." - }, - { - "left": "issue:43673", - "right": "issue:44743", - "accept": false, - "reason": "Generation cache missing in v5 and recurrent-state reset in qwen3_5 are not the same concrete defect." + "reason": "Documentation/example errors are unrelated to Gemma3 NaN embeddings." }, { "left": "issue:44442", "right": "issue:44898", "accept": false, - "reason": "Tokenizer loading failure is unrelated to Perceiver interpolation/image-size failure." + "reason": "Tokenizer load failure and Perceiver non-default-resolution failure are different problems." }, { "left": "issue:33290", "right": "issue:34567", "accept": false, - "reason": "Adafactor OOM in deepspeed and TrainerState token counter not updating are unrelated." + "reason": "Adafactor/DeepSpeed OOM is unrelated to TrainerState token counter not updating." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Both mention cache, but they concern different models and different cache/state logic." }, { "left": "issue:34689", "right": "issue:43611", "accept": false, - "reason": "Both are loading regressions, but the affected model code paths and causes differ." + "reason": "Two separate model-loading regressions with different causes and versions." }, { "left": "issue:43381", "right": "issue:43873", "accept": false, - "reason": "Gradient checkpointing eval-mode restriction and quantization offloading behavior are unrelated." + "reason": "Gradient checkpointing eval-mode restriction is unrelated to quantization offloading." }, { "left": "issue:43493", "right": "issue:43827", "accept": false, - "reason": "SigLIP2 implementation discrepancy and docs still referencing pipeline() are different issue classes." + "reason": "SigLIP2 JAX parity and docs still using pipeline() are unrelated." }, { "left": "issue:44077", "right": "issue:44479", "accept": false, - "reason": "PatchTSMixer post_init behavior and Qwen video-input regression do not share a code path." + "reason": "patchtsmixer post_init handling and Qwen video-input regression do not share a code path." }, { "left": "issue:33290", - "right": "issue:34689", + "right": "issue:35141", "accept": false, - "reason": "OOM in optimizer setup and Llama 3.2 vision model loading failure are unrelated." + "reason": "OOM in DeepSpeed AdaFactor and embedding reinit after resize are unrelated." }, { "left": "issue:33290", - "right": "issue:35141", + "right": "issue:34689", "accept": false, - "reason": "Optimizer OOM and embedding reinitialization in post_init are different bugs." + "reason": "Different failures: optimizer OOM vs model-loading breakage." }, { "left": "issue:41628", "right": "issue:45372", "accept": false, - "reason": "Different import failures in different libraries/components; not the same underlying bug." + "reason": "Both are import errors, but for different symbols, modules, and processor-loading flows." } ] }, @@ -7948,7 +7819,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:16998", "issue:22355", @@ -7974,13 +7845,13 @@ "issue:43688", "issue:43716", "issue:43720", - "issue:43742", "issue:43749", "issue:43761", "issue:43825", "issue:43856", "issue:43873", "issue:43931", + "issue:44077", "issue:44162", "issue:44190", "issue:44206", @@ -7988,6 +7859,7 @@ "issue:44479", "issue:44496", "issue:44514", + "issue:44534", "issue:44617", "issue:44792", "issue:44855", @@ -8007,202 +7879,202 @@ "issue:44190|issue:44496", "issue:33290|issue:36331", "issue:43381|issue:43856", - "issue:43316|issue:44877", + "issue:44855|issue:45137", "issue:34689|issue:36010", + "issue:43316|issue:44877", "issue:43540|issue:44514", - "issue:44855|issue:45137", - "issue:42175|issue:43316", "issue:44162|issue:44792", "issue:43388|issue:43688", - "issue:43381|issue:43742", + "issue:42175|issue:43316", "issue:43404|issue:43441", "issue:16998|issue:22355", - "issue:43316|issue:43716", "issue:25251|issue:28282", - "issue:41950|issue:43475", + "issue:43316|issue:43716", "issue:43065|issue:44617", + "issue:41950|issue:43475", "issue:43295|issue:43825", "issue:45127|issue:45200", - "issue:43761|issue:44479" + "issue:43761|issue:44479", + "issue:44077|issue:44534" ], - "prepared_review_unit_hash": "e5055e4827cbd998a8424552c181238f25e95d969cbb0cefe4308cd9a86ac08c", + "prepared_review_unit_hash": "0f3b53f93390986d3682abf2eaa5de210c407358b4438bcb9354c12892273305", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, + "node_count": 48, + "item_count": 48, "soft_pair_count": 25, - "serialized_chars": 34582, - "estimated_input_tokens": 8646, - "estimated_eval_tokens": 17548 + "serialized_chars": 35154, + "estimated_input_tokens": 8789, + "estimated_eval_tokens": 17834 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are mostly unrelated issues across different models, pipelines, and failure modes. I do not see any soft pair that is clearly the same underlying bug/change, so all soft edges are rejected. No pull requests are present.", - "confidence": 0.93, - "canonical_issue_reason": "No clear duplicate cluster exists here. If one issue must serve as the representative, issue 43749 is the most canonical because it describes a concrete, high-impact regression with a specific failing feature and actionable scope.", + "summary": "This cluster is a heterogeneous grab-bag of unrelated Transformers issues; none of the soft pairs look like the same underlying bug/change closely enough to merge. The most representative issue is the high-activity core loading regression around FSDP CPU RAM efficient loading.", + "confidence": 0.34, + "canonical_issue_reason": "issue:43749 is the strongest anchor: it has the highest discussion/inbound activity and describes a core loading-path regression with broad impact and a clear bug report.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43749 is the best global representative: it is a concrete runtime breakage, clearly stated, and easier to triage than the more ambiguous config/docs/reporting issues.", + "best_issue_reason": "issue:43749 is the best overall representative because it is a substantive, widely impactful runtime bug rather than a niche model-specific, docs, or API-edge case report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43720", "right": "issue:44442", "accept": false, - "reason": "Different components and failures: BitNet accelerate-loading/unpacked weights vs AutoTokenizer loading a specific tokenizer class." + "reason": "Different subsystems and failures: BitNet packed-weight loading vs AutoTokenizer loading a specific tokenizer class." }, { "left": "issue:43749", "right": "issue:43931", "accept": false, - "reason": "One is FSDP CPU-efficient loading breakage; the other is a Qwen3-VL weight-shape mismatch. Different code paths and symptoms." + "reason": "Both involve loading, but one is FSDP CPU-efficient loading and the other is a Qwen3-VL shape mismatch; not the same bug." }, { "left": "issue:43643", "right": "issue:44913", "accept": false, - "reason": "Both concern config behavior, but one is missing fields with trust_remote_code and the other is GPTNeoX rotary_pct reload persistence. Not the same bug." + "reason": "Unrelated config issues: trust_remote_code missing fields vs GPTNeoX rotary_pct persistence on reload." }, { "left": "issue:43873", "right": "issue:44871", "accept": false, - "reason": "Quantization/offloading behavior versus Gemma-3 eos_token_id config inconsistency. No shared underlying failure." + "reason": "Quantization/offloading behavior is unrelated to Gemma-3 eos_token_id config inconsistency." }, { "left": "issue:39692", "right": "issue:44206", "accept": false, - "reason": "SigLIP2 documentation example errors are unrelated to a LASR feature extractor regression in image preprocessing." + "reason": "Docs example errors for SigLIP2 are not the same as LasrFeatureExtractor passing an unsupported argument." }, { "left": "issue:44190", "right": "issue:44496", "accept": false, - "reason": "Local dataset loading in a training script is unrelated to an unrecognized model/config.json loading error." + "reason": "Local dataset loading in a training script is unrelated to an unrecognized model/config loading failure." }, { "left": "issue:33290", "right": "issue:36331", "accept": false, - "reason": "AdaFactor/DeepSpeed OOM is a resource issue; compute_loss num_items_in_batch is an API signature mismatch. Different bugs." + "reason": "OOM during Adafactor+DeepSpeed training is a different problem from a CustomTrainer signature mismatch." }, { "left": "issue:43381", "right": "issue:43856", "accept": false, - "reason": "Gradient checkpointing eval-mode restriction is unrelated to Qwen3 MoE memory inefficiency." + "reason": "Gradient checkpointing in eval mode and Qwen3 MoE memory usage are different code paths and symptoms." }, { - "left": "issue:43316", - "right": "issue:44877", + "left": "issue:44855", + "right": "issue:45137", "accept": false, - "reason": "Both touch config APIs, but one is a Gemma3TextConfig discrepancy and the other is strict loading for granite_speech. Different models and problems." + "reason": "Python 3.13 DeBERTa import parsing bug is unrelated to DeepSpeed ZeRO3 deque underflow." }, { "left": "issue:34689", "right": "issue:36010", "accept": false, - "reason": "Llama 3.2 Vision loading regression and GenerationMixin import failure are separate import/load issues with different causes." - }, - { - "left": "issue:43540", - "right": "issue:44514", - "accept": false, - "reason": "Video-input processing in Qwen3OmniMoe and batched chat-template crashes in Qwen2_5_VL are different processor paths." + "reason": "Llama 3.2 model loading regression is not the same as failing to import GenerationMixin from a moved module." }, { - "left": "issue:44855", - "right": "issue:45137", + "left": "issue:43316", + "right": "issue:44877", "accept": false, - "reason": "Python 3.13 Deberta import parsing failure is unrelated to the DeepSpeed ZeRO3 deque error." + "reason": "Gemma3TextConfig API inconsistency is different from strict config rejection for granite_speech." }, { - "left": "issue:42175", - "right": "issue:43316", + "left": "issue:43540", + "right": "issue:44514", "accept": false, - "reason": "Backend dependency packaging issue versus Gemma3TextConfig API discrepancy; no shared bug." + "reason": "Qwen3OmniMoe video-processing ValueError is unrelated to Qwen2_5_VL batched chat-template padding behavior." }, { "left": "issue:44162", "right": "issue:44792", "accept": false, - "reason": "ESM2 model breakage and janus image-generation test failure are unrelated model-specific issues." + "reason": "ESM2 model breakage is not the same as a Janus test failure around image generation." }, { "left": "issue:43388", "right": "issue:43688", "accept": false, - "reason": "Metrics gathering tuple truncation and auxiliary loss normalization are different training bugs." + "reason": "Metric gathering drops tuple labels; auxiliary-loss normalization in OLMoE/GPT Oss is a different training-loss issue." }, { - "left": "issue:43381", - "right": "issue:43742", + "left": "issue:42175", + "right": "issue:43316", "accept": false, - "reason": "Eval-mode gradient checkpointing limitation is unrelated to the MobileLLM loading key error." + "reason": "Missing TensorFlow from a torch extra install has nothing to do with Gemma3TextConfig API discrepancies." }, { "left": "issue:43404", "right": "issue:43441", "accept": false, - "reason": "Weight tying in Mistral3ForConditionalGeneration and FlashAttention failure in Ministral-3 are distinct code-path problems." + "reason": "These are different model bugs: lm_head tying in Mistral3 versus FlashAttention failure in Ministral-3." }, { "left": "issue:16998", "right": "issue:22355", "accept": false, - "reason": "A model_max_length question for DeBERTa-V3 is unrelated to the transformers.onnx import error." - }, - { - "left": "issue:43316", - "right": "issue:43716", - "accept": false, - "reason": "Gemma3TextConfig API discrepancy and Mistral-3 image-preprocessor dtype mismatch are unrelated issues." + "reason": "A model_max_length question for DeBERTa-V3 is unrelated to a missing transformers.onnx module import." }, { "left": "issue:25251", "right": "issue:28282", "accept": false, - "reason": "Pipeline top_k output nesting and missing PyTorch import are completely different issues." + "reason": "Pipeline top_k output-shape behavior is unrelated to an AutoModel import error requiring PyTorch." }, { - "left": "issue:41950", - "right": "issue:43475", + "left": "issue:43316", + "right": "issue:43716", "accept": false, - "reason": "Video-classification pipeline processor selection and Sam3VisionEncoderOutput missing attribute are not the same bug." + "reason": "Config API discrepancy and Mistral-3 image-preprocessor dtype mismatch are distinct issues." }, { "left": "issue:43065", "right": "issue:44617", "accept": false, - "reason": "A dummy Conv2d in Sam3PixelDecoder is a different issue from Sam3Video CUDA OOM." + "reason": "A dummy Conv2d in Sam3PixelDecoder is not the same as CUDA OOM in Sam3Video." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Video-classification pipeline processor lookup and missing fpn_position_embeddings are different Sam3/video bugs." }, { "left": "issue:43295", "right": "issue:43825", "accept": false, - "reason": "Processor.tokenizer regression and an incorrect translation-support error message are different v5 issues." + "reason": "Custom processor.tokenizer regression is unrelated to an incorrect translation-task error message." }, { "left": "issue:45127", "right": "issue:45200", "accept": false, - "reason": "LoRA merge collapse with extended vocab is unrelated to Gemma 4 mm_token_type_ids defaults for text-only fine-tuning." + "reason": "LoRA merge collapse with extended vocab is a different model-training issue from Gemma 4 mm_token_type_ids defaults." }, { "left": "issue:43761", "right": "issue:44479", "accept": false, - "reason": "CLIPVisionModel hidden_states regression and Qwen video-input regression are different model families and failure modes." + "reason": "CLIPVisionModel hidden_states regression is unrelated to Qwen video-input regressions." + }, + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "patchtsmixer post_init validation and non-persistent buffer initialization are different v5 behavior changes." } ] }, @@ -8224,7 +8096,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:32090", "issue:35141", @@ -8251,7 +8123,6 @@ "issue:43931", "issue:43994", "issue:44060", - "issue:44077", "issue:44190", "issue:44220", "issue:44242", @@ -8262,245 +8133,239 @@ "issue:44534", "issue:44610", "issue:44734", - "issue:44743", "issue:44792", "issue:44871", "issue:44913", "issue:44933", + "issue:44938", "issue:44964", "issue:44977", "issue:45245", "issue:45290", "issue:45362", "issue:45381", - "issue:45406" + "issue:45406", + "issue:45542" ], "soft_pairs": [ - "issue:44077|issue:44534", - "issue:44734|issue:45406", "issue:43550|issue:43701", + "issue:44734|issue:45406", "issue:43295|issue:44871", "issue:43606|issue:43701", "issue:43452|issue:44496", "issue:43646|issue:44913", "issue:43906|issue:45290", "issue:43761|issue:44242", - "issue:42617|issue:43575", "issue:43575|issue:43742", + "issue:42617|issue:43575", "issue:43931|issue:44060", "issue:44521|issue:45381", "issue:43575|issue:43873", - "issue:43525|issue:43531", "issue:44479|issue:44734", "issue:44792|issue:44977", "issue:32090|issue:35141", "issue:44464|issue:44871", + "issue:43525|issue:43531", "issue:44933|issue:45362", "issue:43065|issue:44610", "issue:43827|issue:43994", "issue:44534|issue:44913", "issue:41720|issue:45245", - "issue:44190|issue:44964", + "issue:44938|issue:45542", "issue:43650|issue:44220", - "issue:43388|issue:43582", - "issue:43761|issue:44743" + "issue:44190|issue:44964", + "issue:43388|issue:43582" ], - "prepared_review_unit_hash": "289ce11460d9c35ae45d82ffa0d2522af48120dcfb3520639025a7b4a14a34bc", + "prepared_review_unit_hash": "7e46caf48aa19e73a253f83321bbcfb32838b198fd003a6b30db31a00cd1de64", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 28, - "serialized_chars": 35941, - "estimated_input_tokens": 8986, - "estimated_eval_tokens": 18228 + "soft_pair_count": 27, + "serialized_chars": 35604, + "estimated_input_tokens": 8901, + "estimated_eval_tokens": 18058 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are largely unrelated individual issues rather than a duplicate cluster. None of the soft edges look like the same underlying bug/change; issue 44521 is the best representative because it is open, specific, and has the strongest discussion/reference activity.", - "confidence": 0.71, - "canonical_issue_reason": "Issue 44521 is the clearest live, well-scoped bug report with the strongest external references, so it is the best anchor if a single representative issue is needed.", + "summary": "The cluster is heterogeneous: the issues span unrelated models, regressions, and subsystems. None of the suggested soft pairs look like the same underlying bug or change, so all should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No clear canonical issue: the items are distinct bug reports across different code paths, models, and failure modes, with no strong duplicate center.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44521 is specific, reproducible, and still open, which makes it the most useful tracking issue among this set.", + "best_issue_reason": "No single issue stands out as a global representative because the set is not a cohesive duplicate cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44077", - "right": "issue:44534", + "left": "issue:43550", + "right": "issue:43701", "accept": false, - "reason": "Both involve v5/post-init behavior, but one is about optional post_init being disallowed and the other about non-persistent buffers being junk; different bugs." + "reason": "Different failures: torch.compile+SDPA crash vs resume_from_checkpoint key mismatch." }, { "left": "issue:44734", "right": "issue:45406", "accept": false, - "reason": "KV-cache continuation tensor indexing in serve is unrelated to Gemma4Processor missing `_tokenizer`." - }, - { - "left": "issue:43550", - "right": "issue:43701", - "accept": false, - "reason": "Torch.compile/SDPA OOM for Bamba is unrelated to resume_from_checkpoint key mismatch." + "reason": "Different areas: KV-cache continuation indexing vs Gemma4Processor missing _tokenizer." }, { "left": "issue:43295", "right": "issue:44871", "accept": false, - "reason": "Processor/tokenizer regression in v4.57.5 is not the same as inconsistent eos_token_id configuration." + "reason": "Both are regression/config issues, but one is processor/tokenizer API breakage and the other is eos_token_id mismatch." }, { "left": "issue:43606", "right": "issue:43701", "accept": false, - "reason": "CPU offload device mismatch for bark-small is unrelated to checkpoint key mismatch." + "reason": "CPU offload device mismatch is unrelated to checkpoint resume key mismatch." }, { "left": "issue:43452", "right": "issue:44496", "accept": false, - "reason": "GGUF loading failures and unrecognized model/config key errors are different code paths and different failures." + "reason": "gguf_file loading paths and unrecognized-model config errors are different model-loading problems." }, { "left": "issue:43646", "right": "issue:44913", "accept": false, - "reason": "A broad custom-model initialization regression is not the same as GPTNeoX rotary_pct not persisting on reload." + "reason": "Custom initialization breakage is not the same as GPTNeoX rotary_pct not persisting on reload." }, { "left": "issue:43906", "right": "issue:45290", "accept": false, - "reason": "An isolated reproduction of another issue is not enough to match the specific assistant-message tool-call crash here." + "reason": "An isolated reproduction of another issue is not the same as apply_chat_template crashing on tool-call messages." }, { "left": "issue:43761", "right": "issue:44242", "accept": false, - "reason": "CLIPVisionModel hidden_states handling and MoE load-balancing loss are unrelated." + "reason": "CLIPVision hidden_states regression is unrelated to load balancing loss handling in MoE routing." }, { - "left": "issue:42617", - "right": "issue:43575", + "left": "issue:43575", + "right": "issue:43742", "accept": false, - "reason": "3d_parallel.py startup failure is unrelated to Qwen2 tensor-parallel OOM." + "reason": "OOM on tensor-parallel load and key error on loading MobileLLM are different load-time problems." }, { - "left": "issue:43575", - "right": "issue:43742", + "left": "issue:42617", + "right": "issue:43575", "accept": false, - "reason": "Qwen2 TP OOM and MobileLLM key errors are distinct loading problems." + "reason": "3d_parallel.py failure is unrelated to Qwen2-57B tensor-parallel OOM." }, { "left": "issue:43931", "right": "issue:44060", "accept": false, - "reason": "Weight-shape mismatch in Qwen3-VL is not the same as a tied-weights warning in Qwen3-Next." + "reason": "Weight-shape mismatch loading Qwen3-VL differs from tied-weights warning on Qwen3-Next." }, { "left": "issue:44521", "right": "issue:45381", "accept": false, - "reason": "Both are multimodal regressions, but assistant-mask generation and video vision_position_ids are different failure modes and likely different fixes." + "reason": "All-zero assistant masks in chat templating is distinct from Qwen2.5-VL video vision_position_ids regression." }, { "left": "issue:43575", "right": "issue:43873", "accept": false, - "reason": "TP OOM during model load is unrelated to quantization offloading not working as expected." - }, - { - "left": "issue:43525", - "right": "issue:43531", - "accept": false, - "reason": "Missing `pad_token_id` in one config is unrelated to the Qwen3-MoE `sliding_window` issue." + "reason": "Tensor-parallel OOM and quantization offload behavior are not the same bug." }, { "left": "issue:44479", "right": "issue:44734", "accept": false, - "reason": "Video-input regression and KV-cache continuation indexing are different bugs in different paths." + "reason": "Video-input regression for Qwen VL models is unrelated to serve KV-cache continuation indexing." }, { "left": "issue:44792", "right": "issue:44977", "accept": false, - "reason": "A Janus image-generation test failure is not the same underlying issue as Qwen3.5 flash-attention generation breakage." + "reason": "Failed janus image-generation test is not the same as Qwen3.5 flash-attention generation bug." }, { "left": "issue:32090", "right": "issue:35141", "accept": false, - "reason": "Trainer `_gpu_broadcast_one` NoneType failure and embedding reinitialization after resize are unrelated." + "reason": "Trainer _gpu_broadcast_one NoneType error and embedding reinit on post_init are different bugs." }, { "left": "issue:44464", "right": "issue:44871", "accept": false, - "reason": "Chunked generation inconsistency under compilation is not the same as eos_token_id config mismatch." + "reason": "Chunked generation with compiled forward is unrelated to eos_token_id configuration inconsistency." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Missing pad_token_id in Llama4Config and sliding_window issues in Qwen3-MoE are different model config bugs." }, { "left": "issue:44933", "right": "issue:45362", "accept": false, - "reason": "A missing import from image_utils is unrelated to Gemma4Processor missing `_tokenizer` in serve." + "reason": "Missing import from image_utils is unrelated to Qwen3.5 chat crashes." }, { "left": "issue:43065", "right": "issue:44610", "accept": false, - "reason": "A dummy Conv2d in Sam3PixelDecoder is unrelated to OmDet-Turbo's processor/model input-size mismatch." + "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo input-size mismatch are different model-specific issues." }, { "left": "issue:43827", "right": "issue:43994", "accept": false, - "reason": "Docs still mentioning pipeline() and SigLIP2 producing nonsense are not the same bug." + "reason": "Docs still referencing pipeline() is unrelated to SigLIP2 nonsensical outputs." }, { "left": "issue:44534", "right": "issue:44913", "accept": false, - "reason": "Non-persistent buffers being filled with junk and GPTNeoX rotary_pct not persisting are separate serialization/regression issues." + "reason": "Non-persistent buffer junk in v5 is not the same as rotary_pct resetting on reload." }, { "left": "issue:41720", "right": "issue:45245", "accept": false, - "reason": "Qwen3 auto device-mapping cuda asserts are unrelated to the 2^24 category limit error." + "reason": "Qwen3 auto device mapping cuda assert and category-count limit error are unrelated runtime failures." }, { - "left": "issue:44190", - "right": "issue:44964", + "left": "issue:44938", + "right": "issue:45542", "accept": false, - "reason": "Local dataset loading in a training script is unrelated to loading the Phi-4 multimodal model." + "reason": "Python 3.14 load failure is not the same as missing TensorFlow backend when only tensorboard is installed." }, { "left": "issue:43650", "right": "issue:44220", "accept": false, - "reason": "This placeholder issue is unrelated to `_torch_extract_fbank_features()`." + "reason": "ADD THE DATA is not a meaningful duplicate of _torch_extract_fbank_features()." }, { - "left": "issue:43388", - "right": "issue:43582", + "left": "issue:44190", + "right": "issue:44964", "accept": false, - "reason": "gather_for_metrics label dropping and AppleSilicon allocator TypeError are different subsystems and failures." + "reason": "Local dataset loading in an example script is unrelated to loading Phi-4-multimodal-instruct." }, { - "left": "issue:43761", - "right": "issue:44743", + "left": "issue:43388", + "right": "issue:43582", "accept": false, - "reason": "CLIP hidden_states regression is unrelated to recurrent states resetting with cache." + "reason": "gather_for_metrics tuple-label truncation is unrelated to Apple Silicon warmup TypeError." } ] }, @@ -8522,7 +8387,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:36246", "issue:38175", @@ -8534,7 +8399,6 @@ "issue:43065", "issue:43116", "issue:43295", - "issue:43299", "issue:43335", "issue:43404", "issue:43479", @@ -8542,6 +8406,7 @@ "issue:43550", "issue:43643", "issue:43644", + "issue:43761", "issue:43854", "issue:43976", "issue:44016", @@ -8557,6 +8422,7 @@ "issue:44496", "issue:44683", "issue:44734", + "issue:44743", "issue:44792", "issue:44855", "issue:44871", @@ -8573,245 +8439,245 @@ "issue:45446" ], "soft_pairs": [ + "issue:43761|issue:44743", "issue:45127|issue:45357", "issue:44355|issue:44855", "issue:43644|issue:44913", - "issue:44496|issue:45372", - "issue:43540|issue:44734", "issue:44077|issue:44877", + "issue:43540|issue:44734", "issue:43976|issue:44855", - "issue:43550|issue:44190", + "issue:44496|issue:45372", "issue:43295|issue:45276", - "issue:43404|issue:45216", + "issue:43550|issue:44190", "issue:43335|issue:44855", + "issue:43404|issue:45216", "issue:43116|issue:44190", "issue:41950|issue:44016", - "issue:36246|issue:41628", "issue:39692|issue:44190", "issue:43644|issue:44871", + "issue:36246|issue:41628", "issue:43479|issue:45375", - "issue:36246|issue:38617", "issue:44186|issue:44734", + "issue:36246|issue:38617", + "issue:38175|issue:43065", "issue:44246|issue:45003", "issue:44016|issue:44220", - "issue:38175|issue:43065", "issue:44466|issue:44913", "issue:44683|issue:45446", "issue:42175|issue:44279", "issue:44112|issue:44792", "issue:43854|issue:44792", "issue:43295|issue:45335", - "issue:43643|issue:44496", - "issue:43299|issue:45216" + "issue:43643|issue:44496" ], - "prepared_review_unit_hash": "49aeac034336f40ecd6c7408240f771bce2e03255706975b28cc94016d43723d", + "prepared_review_unit_hash": "6612d019d4a883a74825af21ff3eadfe6a609fa6f817e46079492f5915863ae8", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, + "node_count": 48, + "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36371, - "estimated_input_tokens": 9093, - "estimated_eval_tokens": 18442 + "serialized_chars": 36997, + "estimated_input_tokens": 9250, + "estimated_eval_tokens": 18756 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is not one duplicate cluster; it mixes unrelated import errors, config/model regressions, tokenizer/pipeline bugs, and example/test failures. I would not merge any of the proposed soft edges.", - "confidence": 0.86, - "canonical_issue_reason": "No single canonical issue fits this group: the items span different models, code paths, and bug classes rather than one underlying defect.", + "summary": "This cluster is heterogeneous: most pairs share only broad subsystem similarity (imports, serialization, multimodal, config handling) but not the same concrete bug. I would not merge any of the soft edges, and there is no clear canonical issue/PR for the whole set.", + "confidence": 0.96, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No global best issue stands out because the set is heterogeneous; the safest triage is to keep these as separate reports.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "Both are regression-style model bugs, but one is CLIPVision hidden states and the other is Qwen3.5 recurrent state reset; different code paths." + }, { "left": "issue:45127", "right": "issue:45357", "accept": false, - "reason": "LoRA/extended-vocab merge collapse and incorrect visual-encoder checkpoint keys are different save/merge bugs." + "reason": "Both involve tied/serialized weights, but they affect different models and different save/merge paths; not the same bug." }, { "left": "issue:44355", "right": "issue:44855", "accept": false, - "reason": "Generic compiled-file import errors do not match a specific Python 3.13 torch.jit.script indentation parsing bug." + "reason": "One is compiled Python file errors, the other is a Python 3.13 torch.jit indentation parsing issue; unrelated failures." }, { "left": "issue:43644", "right": "issue:44913", "accept": false, - "reason": "Non-persistent buffer junk on v5 is unrelated to GPTNeoXConfig rotary_pct not persisting on reload." + "reason": "Both concern state persistence on reload, but one is non-persistent buffers and the other is a specific GPTNeoX config field; too different to merge." }, { - "left": "issue:44496", - "right": "issue:45372", + "left": "issue:44077", + "right": "issue:44877", "accept": false, - "reason": "Missing model_type in config.json is a different failure from the ReasoningEffort import breakage in Gemma 4 processor loading." + "reason": "Both mention config handling, but one is a patchtsmixer post_init policy issue and the other is granite_speech loading under strict config; different bugs." }, { "left": "issue:43540", "right": "issue:44734", "accept": false, - "reason": "Qwen3OmniMoe video input processing and serve KV-cache continuation index errors affect different paths." - }, - { - "left": "issue:44077", - "right": "issue:44877", - "accept": false, - "reason": "PatchTSMixer post_init validation and granite_speech strict-config loading are separate model/config issues." + "reason": "Qwen3OmniMoe video-input validation and transformers serve KV-cache indexing are unrelated code paths." }, { "left": "issue:43976", "right": "issue:44855", "accept": false, - "reason": "Packaging/Python version compatibility is unrelated to the DebertaV2Model import syntax error on Python 3.13." + "reason": "One is a Python/version compatibility report, the other is a JIT parsing/import syntax problem; not the same underlying issue." }, { - "left": "issue:43550", - "right": "issue:44190", + "left": "issue:44496", + "right": "issue:45372", "accept": false, - "reason": "torch.compile SDPA failure in Bamba is unrelated to local-dataset loading in an image-classification script." + "reason": "Both are model-loading failures, but one is missing model_type and the other is a processor import dependency break; different root causes." }, { "left": "issue:43295", "right": "issue:45276", "accept": false, - "reason": "processor.tokenizer regression and resize_token_embeddings not updating Gemma4 embeddings are different code paths." + "reason": "Both mention embedding/token resizing, but one is a processor/tokenizer regression and the other is Gemma4 embedding propagation; not the same concrete change." }, { - "left": "issue:43404", - "right": "issue:45216", + "left": "issue:43550", + "right": "issue:44190", "accept": false, - "reason": "Mistral3 lm_head weight tying and Qwen3.5 save_pretrained checkpoint corruption are separate regressions." + "reason": "Torch.compile/SDPA model failure and local dataset loading in an image-classification script are unrelated." }, { "left": "issue:43335", "right": "issue:44855", "accept": false, - "reason": "SwitchTransformers sparse-layer construction and a Python 3.13 import parse failure are unrelated." + "reason": "SwitchTransformers sparse-layer config bug is unrelated to the Python 3.13 torch.jit parsing issue." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Both involve model weight handling, but one is Mistral3 lm_head tying and the other is Qwen3.5 save_pretrained checkpoint contents; not mergeable as one fix." }, { "left": "issue:43116", "right": "issue:44190", "accept": false, - "reason": "A multi-label classification example returning empty results is not the same bug as failing to load a local dataset." + "reason": "A multi-label classification example bug and a local dataset loading issue in a different script are unrelated example problems." }, { "left": "issue:41950", "right": "issue:44016", "accept": false, - "reason": "Video-classification pipeline processor selection is unrelated to a notebook syntax error." - }, - { - "left": "issue:36246", - "right": "issue:41628", - "accept": false, - "reason": "Missing Qwen2.5 VL image-processor export and missing AutoImageProcessor import are different import errors." + "reason": "Video-classification pipeline processor lookup and a notebook syntax error are completely different classes of bug." }, { "left": "issue:39692", "right": "issue:44190", "accept": false, - "reason": "SigLIP2 docs/example issues do not match the local-dataset loading failure in image classification." + "reason": "SigLIP2 docs/example issues and image-classification script dataset loading are unrelated example failures." }, { "left": "issue:43644", "right": "issue:44871", "accept": false, - "reason": "Buffer serialization corruption is unrelated to Gemma-3 eos_token_id configuration mismatch." + "reason": "One is a global buffer serialization regression; the other is a Gemma3 eos_token_id config mismatch. Same broad area, different bugs." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but one is a missing class in a model submodule and the other is a missing top-level AutoImageProcessor export; different symbols and causes." }, { "left": "issue:43479", "right": "issue:45375", "accept": false, - "reason": "Phi4MultimodalConfig default-init behavior and Qwen3_5MoeVisionConfig field stripping are different config bugs." + "reason": "Both are config-field handling issues, but one is default vision/audio config initialization and the other is a missing deepstack_visual_indexes field in Qwen3_5MoeVisionConfig." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Tokenizer padding/crash in LayoutLMv2 and KV-cache continuation in transformers serve are unrelated." }, { "left": "issue:36246", "right": "issue:38617", "accept": false, - "reason": "Qwen2_5_VLImageProcessor export failure and layer_type_validation import failure are unrelated." + "reason": "Both are import errors, but they affect different modules and different missing names; no shared underlying fix is evident." }, { - "left": "issue:44186", - "right": "issue:44734", + "left": "issue:38175", + "right": "issue:43065", "accept": false, - "reason": "LayoutLMv2 tokenizer batching crashes and serve KV-cache continuation indexing bugs are different components." + "reason": "Unexpected zero probabilities in SigLIP2 and a dummy Conv2d in Sam3PixelDecoder are unrelated model-specific bugs." }, { "left": "issue:44246", "right": "issue:45003", "accept": false, - "reason": "Import slowness and unsafe sys.modules access may be loosely related, but the titles do not indicate the same concrete bug." + "reason": "Import-time slowness and unsafe sys.modules access are related only loosely; not the same concrete bug or fix." }, { "left": "issue:44016", "right": "issue:44220", "accept": false, - "reason": "A notebook syntax error is unrelated to _torch_extract_fbank_features() behavior." - }, - { - "left": "issue:38175", - "right": "issue:43065", - "accept": false, - "reason": "SigLIP2 zero probabilities and Sam3PixelDecoder dummy Conv2d are different model-specific issues." + "reason": "Notebook syntax error and _torch_extract_fbank_features() failure are unrelated." }, { "left": "issue:44466", "right": "issue:44913", "accept": false, - "reason": "Device-dependent lm_head serialization and GPTNeoX rotary_pct reload loss are different persistence bugs." + "reason": "Both concern serialization/reload behavior, but one is tied-weight serialization and the other is a GPTNeoX config default reset; different fixes." }, { "left": "issue:44683", "right": "issue:45446", "accept": false, - "reason": "Both mention flex_attention, but one is a broad compiled-failure report and the other is a specific PyTorch version-check/import bug; not the same concrete fix." + "reason": "Compiled flex_attention on torch>=2.9 and a PyTorch version check for AuxRequest import are different compatibility bugs." }, { "left": "issue:42175", "right": "issue:44279", "accept": false, - "reason": "Missing TensorFlow in the torch extra and a generic dependency issue are not the same dependency bug." + "reason": "TensorFlow missing from a torch extra install and a generic dependency issue are not the same dependency bug." }, { "left": "issue:44112", "right": "issue:44792", "accept": false, - "reason": "GraniteSpeech CI stale-device test and janus image-generation test failure are unrelated test cases." + "reason": "CI stale device override failure in GraniteSpeech and janus image-generation test failure are different test issues." }, { "left": "issue:43854", "right": "issue:44792", "accept": false, - "reason": "GLM-4.7-Flash load correctness in unit tests is unrelated to the janus model test failure." + "reason": "Model loading failure for GLM-4.7-Flash and a janus generate-images test failure are unrelated." }, { "left": "issue:43295", "right": "issue:45335", "accept": false, - "reason": "processor.tokenizer regression and t5gemma resize_token_embeddings not updating decoder.embed_tokens are different APIs and failures." + "reason": "Both are resize_token_embeddings regressions, but they hit different model architectures and different internal embeddings." }, { "left": "issue:43643", "right": "issue:44496", "accept": false, - "reason": "trust_remote_code returning missing fields is a different config-loading problem from unrecognized model_type in config.json." - }, - { - "left": "issue:43299", - "right": "issue:45216", - "accept": false, - "reason": "Qwen3VL MoE loading breakage and Qwen3.5 save_pretrained corruption affect different model families and code paths." + "reason": "A trust_remote_code config-field loss and an unrecognized-model error are separate loading issues with different root causes." } ] }, @@ -8833,9 +8699,8 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ - "issue:38175", "issue:41950", "issue:42907", "issue:43295", @@ -8866,7 +8731,7 @@ "issue:44464", "issue:44496", "issue:44521", - "issue:44617", + "issue:44683", "issue:44779", "issue:44871", "issue:44877", @@ -8882,262 +8747,270 @@ "issue:45278", "issue:45325", "issue:45357", - "issue:45405" + "issue:45405", + "issue:45542" ], "soft_pairs": [ + "issue:43299|issue:45216", "issue:42907|issue:43441", "issue:44016|issue:44062", "issue:44112|issue:44964", "issue:44871|issue:45245", "issue:43673|issue:44336", "issue:43976|issue:44987", - "issue:44496|issue:45042", "issue:43526|issue:43901", + "issue:44496|issue:45042", "issue:43824|issue:44933", - "issue:43299|issue:45357", "issue:43976|issue:44279", + "issue:43299|issue:45357", "issue:43577|issue:45216", - "issue:44075|issue:44368", "issue:43901|issue:44496", + "issue:44075|issue:44368", "issue:44521|issue:45325", "issue:43854|issue:44964", + "issue:44683|issue:45542", "issue:44242|issue:45200", - "issue:41950|issue:45020", "issue:44464|issue:45245", + "issue:43976|issue:45405", "issue:44351|issue:45278", "issue:41950|issue:43650", - "issue:43976|issue:45405", "issue:43479|issue:43525", + "issue:41950|issue:45020", "issue:44496|issue:44877", "issue:44462|issue:45081", "issue:44297|issue:44462", - "issue:44279|issue:45405", "issue:43550|issue:44964", - "issue:44462|issue:44779", + "issue:44279|issue:45405", "issue:43299|issue:43854", "issue:43295|issue:44779", - "issue:38175|issue:44617" + "issue:44462|issue:44779" ], - "prepared_review_unit_hash": "ff1c6ea3c387ffb7b5f843595813e2ddcda2006d2639890436ea85d44e247e64", + "prepared_review_unit_hash": "5212e2e145c5ba8d397fad818a7bc701447cdd0a5d1bc0842a59f4bedc4bdd31", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 32, - "serialized_chars": 37242, - "estimated_input_tokens": 9311, - "estimated_eval_tokens": 18878 + "soft_pair_count": 33, + "serialized_chars": 37579, + "estimated_input_tokens": 9395, + "estimated_eval_tokens": 19046 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is highly heterogeneous: the suggested soft pairs mostly share only superficial themes like version regressions, model loading, or tokenizer issues, but they describe different bugs and code paths. I would not merge any of the proposed pairs.", - "confidence": 0.93, - "canonical_issue_reason": "issue:45020 is the broadest open issue in the set and best matches the loose theme of recent-version model-loading regressions, though it is still too general to be a precise duplicate anchor.", + "summary": "This set is a loose collection of unrelated Transformers issues, mostly version-specific regressions and model-loading bugs. None of the soft pairs look like true duplicates or mergeable PR-equivalents, so I would reject all soft edges.", + "confidence": 0.2, + "canonical_issue_reason": "issue:43824 is the strongest representative issue here: it is concrete, well-scoped, has the most discussion/inbound references, and clearly describes a reproducible import failure with a specific model class.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 is the most suitable umbrella issue because it is open, broad, and central to the shared regression theme across several of these reports.", + "best_issue_reason": "issue:43824 is the best single issue to keep as the representative bug report because it is precise, high-signal, and appears to have broader community impact than the other items.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Different regressions on different Qwen model families: one is loading Qwen3VL MoE models, the other is save_pretrained corruption for Qwen3.5." + }, { "left": "issue:42907", "right": "issue:43441", "accept": false, - "reason": "Different failures: saving dequantized Ministral/Devstral models vs FlashAttention load/runtime breakage in Ministral-3." + "reason": "Both mention Ministral, but one is about saving dequantized models while the other is a FlashAttention loading/runtime failure; not the same bug." }, { "left": "issue:44016", "right": "issue:44062", "accept": false, - "reason": "Unrelated bugs: a notebook syntax error vs an AddedToken keyword-argument conflict." + "reason": "Notebook syntax error and AddedToken keyword collision are unrelated failures in different code paths." }, { "left": "issue:44112", "right": "issue:44964", "accept": false, - "reason": "Different targets and code paths: GraniteSpeech CI device-override test vs Phi-4 multimodal model loading." + "reason": "A stale CI test in GraniteSpeech and a Phi-4 multimodal loading failure are distinct issues." }, { "left": "issue:44871", "right": "issue:45245", "accept": false, - "reason": "Different symptoms: eos_token_id config mismatch vs a runtime category-count limit error." + "reason": "Gemma eos_token_id mismatch and a categories-limit runtime error are unrelated." }, { "left": "issue:43673", "right": "issue:44336", "accept": false, - "reason": "Different subsystems: chunked-prefill generation cache vs ANSI formatting in loading_report." + "reason": "Chunked_prefill cache handling and ANSI formatting in loading_report are different subsystems and symptoms." }, { "left": "issue:43976", "right": "issue:44987", "accept": false, - "reason": "Different issues: Python version compatibility vs loading a specific model checkpoint." + "reason": "Python version compatibility and loading physical-intelligence/fast are not the same underlying bug." }, { - "left": "issue:44496", - "right": "issue:45042", + "left": "issue:43526", + "right": "issue:43901", "accept": false, - "reason": "Different code paths: unrecognized model loading vs PIL image processors wrongly requiring torchvision." + "reason": "BeitImageProcessorFast label reduction is unrelated to a docs mismatch about return_all_scores." }, { - "left": "issue:43526", - "right": "issue:43901", + "left": "issue:44496", + "right": "issue:45042", "accept": false, - "reason": "Different classes of bug: processor label reduction vs documentation mentioning a removed return_all_scores option." + "reason": "One is an unrecognized model/config issue; the other is an image processor torchvision dependency regression." }, { "left": "issue:43824", "right": "issue:44933", "accept": false, - "reason": "Both are import errors, but for different symbols and modules; not the same underlying bug." + "reason": "Different import failures: missing Qwen2_5_VL export versus a nonexistent image_utils import." }, { - "left": "issue:43299", - "right": "issue:45357", + "left": "issue:43976", + "right": "issue:44279", "accept": false, - "reason": "Different Qwen regressions: model loading failure vs incorrect save_pretrained visual-encoder keys." + "reason": "A Python support issue and a generic dependency issue are too broad and distinct to be duplicates." }, { - "left": "issue:43976", - "right": "issue:44279", + "left": "issue:43299", + "right": "issue:45357", "accept": false, - "reason": "Only loosely related by dependency/version complaints; the concrete failures are different." + "reason": "Qwen3VL loading and Qwen3.5 visual-encoder save_pretrained corruption are separate regressions." }, { "left": "issue:43577", "right": "issue:45216", "accept": false, - "reason": "Different model behaviors: BLIP2 dtype propagation vs Qwen3.5 checkpoint save corruption." + "reason": "Blip2 dtype propagation and Qwen3.5 checkpoint saving are unrelated." }, { - "left": "issue:44075", - "right": "issue:44368", + "left": "issue:43901", + "right": "issue:44496", "accept": false, - "reason": "Different functionality: SGD optimizer argument handling vs a config warning about tied embeddings." + "reason": "Documentation drift for return_all_scores is not the same as a model auto-mapping/load failure." }, { - "left": "issue:43901", - "right": "issue:44496", + "left": "issue:44075", + "right": "issue:44368", "accept": false, - "reason": "Docs mismatch vs model loading failure; not the same bug." + "reason": "SGD optimizer args being ignored and a tie_word_embeddings warning are unrelated." }, { "left": "issue:44521", "right": "issue:45325", "accept": false, - "reason": "Different multimodal bugs: assistant mask generation vs Qwen2.5-VL rope-index scaling." + "reason": "Assistant mask generation for multimodal chat templates is unrelated to Qwen2.5-VL rope index scaling." }, { "left": "issue:43854", "right": "issue:44964", "accept": false, - "reason": "Different model-loading failures for different models and causes." + "reason": "Both are model-loading issues, but they affect different models and failure points; no evidence they share the same code path." }, { - "left": "issue:44242", - "right": "issue:45200", + "left": "issue:44683", + "right": "issue:45542", "accept": false, - "reason": "Different Mixture-of-Experts issues: missing load-balancing loss vs mm_token_type_ids defaults." + "reason": "Compiled flex_attention on torch>=2.9 and a missing TensorFlow backend import are unrelated." }, { - "left": "issue:41950", - "right": "issue:45020", + "left": "issue:44242", + "right": "issue:45200", "accept": false, - "reason": "One is a specific pipeline processor lookup bug; the other is a broad remote_code loading regression report." + "reason": "Load-balancing loss omission and Gemma 4 mm_token_type_ids defaulting are different training-time issues." }, { "left": "issue:44464", "right": "issue:45245", "accept": false, - "reason": "Different bugs: inconsistent chunked generation under compilation vs category cardinality overflow." + "reason": "Chunked generation inconsistency and a categories cardinality limit error are unrelated." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "Python compatibility and an unreleased PEFT version bump are different dependency problems." }, { "left": "issue:44351", "right": "issue:45278", "accept": false, - "reason": "Related only at a very broad level; one missing import is not the same as a general post-upgrade import-error report." + "reason": "A specific missing HybridCache export may be one symptom of broader import breakage, but the evidence is too weak to call it the same bug as the generic multi-import failure." }, { "left": "issue:41950", "right": "issue:43650", "accept": false, - "reason": "Completely unrelated: a pipeline bug vs a malformed/non-informative issue title." + "reason": "Video-classification processor lookup and a vague 'ADD THE DATA' issue are not related." }, { - "left": "issue:43976", - "right": "issue:45405", + "left": "issue:43479", + "right": "issue:43525", "accept": false, - "reason": "Different concerns: Python support regression vs an unrelated PEFT version bump issue." + "reason": "Phi4MultimodalConfig default initialization and a Llama4Config pad_token_id AttributeError are separate model config bugs." }, { - "left": "issue:43479", - "right": "issue:43525", + "left": "issue:41950", + "right": "issue:45020", "accept": false, - "reason": "Different config bugs in different models: default multimodal subconfigs vs missing pad_token_id on Llama4Config." + "reason": "A specific video-classification pipeline bug and a broad remote_code regression are not the same concrete issue." }, { "left": "issue:44496", "right": "issue:44877", "accept": false, - "reason": "Both are load-time/config problems, but they affect different models and failure modes." + "reason": "Unrecognized model loading and strict granite_speech config validation are different failures." }, { "left": "issue:44462", "right": "issue:45081", "accept": false, - "reason": "Different tokenizer code paths: AutoTokenizer ignoring tokenizer.json vs a Mistral regex patch crash." + "reason": "Tokenizer JSON loading and a Mistral regex patch crash are unrelated tokenizer issues." }, { "left": "issue:44297", "right": "issue:44462", "accept": false, - "reason": "Related to tokenizer save/load, but one is a wrong tokenizer_class written on save and the other is AutoTokenizer ignoring tokenizer.json." - }, - { - "left": "issue:44279", - "right": "issue:45405", - "accept": false, - "reason": "A vague dependency complaint is not the same bug as bumping the required PEFT version." + "reason": "Tokenizer class metadata mismatch on save and AutoTokenizer ignoring tokenizer.json are different load/save bugs." }, { "left": "issue:43550", "right": "issue:44964", "accept": false, - "reason": "Different failures: torch.compile/SDPA on Bamba vs Phi-4 multimodal loading." + "reason": "Bamba torch.compile/SDPA failure and Phi-4 multimodal loading are unrelated." }, { - "left": "issue:44462", - "right": "issue:44779", + "left": "issue:44279", + "right": "issue:45405", "accept": false, - "reason": "Different tokenizer regressions affecting different mechanisms and models." + "reason": "A generic transformers dependency complaint and a specific unreleased PEFT minimum version bump are not duplicates." }, { "left": "issue:43299", "right": "issue:43854", "accept": false, - "reason": "Different model-specific load failures with no shared concrete code-path." + "reason": "Different model families and different failure modes; no shared code path is evident." }, { "left": "issue:43295", "right": "issue:44779", "accept": false, - "reason": "Processor/tokenizer API regression vs Deepseek tokenization correctness regression." + "reason": "Both are tokenizer regressions, but they affect different models and report different incorrect behaviors." }, { - "left": "issue:38175", - "right": "issue:44617", + "left": "issue:44462", + "right": "issue:44779", "accept": false, - "reason": "Unrelated: zero probabilities from SigLIP2 vs CUDA OOM in Sam3Video." + "reason": "Deepseek tokenizer correctness regression is not the same as AutoTokenizer ignoring repository tokenizer.json." } ] }, @@ -9159,11 +9032,10 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ + "issue:38175", "issue:39692", - "issue:41628", - "issue:41950", "issue:42175", "issue:42994", "issue:43065", @@ -9180,6 +9052,7 @@ "issue:43673", "issue:43723", "issue:43761", + "issue:43784", "issue:43824", "issue:43881", "issue:43901", @@ -9187,7 +9060,6 @@ "issue:43994", "issue:44016", "issue:44162", - "issue:44220", "issue:44246", "issue:44261", "issue:44263", @@ -9196,7 +9068,6 @@ "issue:44297", "issue:44466", "issue:44521", - "issue:44589", "issue:44617", "issue:44623", "issue:44671", @@ -9204,245 +9075,239 @@ "issue:44749", "issue:44779", "issue:44898", + "issue:44987", "issue:45250", "issue:45276", "issue:45290", - "issue:45362", - "issue:45397" + "issue:45397", + "issue:45542" ], "soft_pairs": [ + "issue:38175|issue:44617", "issue:39692|issue:43901", "issue:43650|issue:45290", "issue:44016|issue:44291", - "issue:42994|issue:43317", - "issue:43441|issue:44683", "issue:44466|issue:45276", + "issue:43441|issue:44683", "issue:43450|issue:44279", + "issue:42994|issue:43317", "issue:43065|issue:44898", - "issue:43262|issue:44521", "issue:43723|issue:44162", + "issue:43262|issue:44521", "issue:44261|issue:44263", "issue:44623|issue:45397", "issue:43122|issue:43650", "issue:44623|issue:44749", + "issue:43784|issue:45542", "issue:44671|issue:44779", "issue:43673|issue:43761", + "issue:44987|issue:45542", "issue:43901|issue:43994", - "issue:42175|issue:43976", "issue:44297|issue:45397", + "issue:42175|issue:43976", "issue:43475|issue:44016", "issue:43824|issue:43976", - "issue:43295|issue:44617", - "issue:44589|issue:45362", "issue:43901|issue:44246", - "issue:43525|issue:43881", "issue:43352|issue:45250", - "issue:43450|issue:44617", - "issue:41950|issue:44220", - "issue:41628|issue:43976" + "issue:43525|issue:43881", + "issue:43295|issue:44617" ], - "prepared_review_unit_hash": "a00266bb8023ffcbceb22fe3fc83a83d4cab8184ea632fef9ea45aae42d358e2", + "prepared_review_unit_hash": "34618173c6223d8165cb271fc63780ab31b2be3e6568b867fcdde7c6efadd74d", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 29, - "serialized_chars": 35993, - "estimated_input_tokens": 8999, - "estimated_eval_tokens": 18254 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35249, + "estimated_input_tokens": 8813, + "estimated_eval_tokens": 17882 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The items are mostly unrelated single-issue reports across docs, tokenization, loading, processors, quantization, and model-specific regressions. I do not see a coherent duplicate cluster, so there is no good canonical issue here.", - "confidence": 0.95, - "canonical_issue_reason": null, + "summary": "All soft pairs are rejected: they share broad keywords or subsystems, but each pair points to a different concrete bug, regression, or docs issue. The most representative issue in this set is the SigLIP2 output-quality report, which is the clearest self-contained user-facing bug.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43994 is the clearest, most self-contained bug report: it names the model, the wrong behavior, and the affected entry points (AutoModel and pipeline).", "canonical_pr_reason": null, - "best_issue_reason": null, + "best_issue_reason": "issue:43994 is the strongest standalone issue in this set because it gives a concrete end-user symptom and a broad repro surface, making it the best representative issue.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "SigLIP2 zero-probability outputs vs SAM3 video CUDA OOM are unrelated failures." + }, { "left": "issue:39692", "right": "issue:43901", "accept": false, - "reason": "Both are documentation-related, but they describe different problems in different docs areas: SigLIP2 example errors vs. TextClassificationPipeline return_all_scores docs mismatch." + "reason": "Both mention docs/examples, but one is a SigLIP2 example bug and the other is a pipeline-docs behavior mismatch." }, { "left": "issue:43650", "right": "issue:45290", "accept": false, - "reason": "Unrelated topics: a vague data request vs. apply_chat_template crashing on assistant tool-call messages with no content." + "reason": "Generic placeholder issue vs apply_chat_template crash on tool-call assistant messages; no shared underlying bug." }, { "left": "issue:44016", "right": "issue:44291", "accept": false, - "reason": "Notebook syntax error and init_empty_weights TypeError are different failure modes with no shared code-path evidence." + "reason": "Notebook syntax error is unrelated to the init_empty_weights TypeError regression." }, { - "left": "issue:42994", - "right": "issue:43317", + "left": "issue:44466", + "right": "issue:45276", "accept": false, - "reason": "Both involve quantization/loading, but one is about saving quantized models and the other about dequantized model loading with device_map offload." + "reason": "Both touch weights/serialization-adjacent behavior, but one is inconsistent lm_head serialization and the other is resize_token_embeddings not propagating." }, { "left": "issue:43441", "right": "issue:44683", "accept": false, - "reason": "FlashAttention/flex_attention issues are different layers and different failures; not the same concrete bug." + "reason": "Both involve Flash Attention, but one is model support gating and the other is a compiled flex_attention failure on newer torch." }, { - "left": "issue:44466", - "right": "issue:45276", + "left": "issue:43450", + "right": "issue:44279", "accept": false, - "reason": "lm_head serialization inconsistency and resize_token_embeddings behavior in gemma4 are distinct model-state bugs." + "reason": "Video processor batching shape bug vs dependency issue; different layers and symptoms." }, { - "left": "issue:43450", - "right": "issue:44279", + "left": "issue:42994", + "right": "issue:43317", "accept": false, - "reason": "Video processor shape bug and a generic dependency issue are not the same underlying issue." + "reason": "Quantized model saving failure and dequantized offload loading failure are different code paths." }, { "left": "issue:43065", "right": "issue:44898", "accept": false, - "reason": "SAM3 pixel decoder dummy conv2d and Perceiver interpolation failure affect different models and code paths." + "reason": "Dummy Conv2d in Sam3PixelDecoder is unrelated to Perceiver interpolation behavior." }, { - "left": "issue:43262", - "right": "issue:44521", + "left": "issue:43723", + "right": "issue:44162", "accept": false, - "reason": "Audio chat-template sampling-rate default and multimodal assistant_masks being all-zero are separate processor/template bugs." + "reason": "Tokenizer loading regression vs ESM2 model breakage; not the same underlying issue." }, { - "left": "issue:43723", - "right": "issue:44162", + "left": "issue:43262", + "right": "issue:44521", "accept": false, - "reason": "Tokenizer import/loading failure and ESM2 breakage are unrelated model-specific regressions." + "reason": "Audio chat-template sampling-rate default and multimodal assistant_mask all-zero bug are different apply_chat_template problems." }, { "left": "issue:44261", "right": "issue:44263", "accept": false, - "reason": "Different low-level bugs: missing rms_norm_eps precision issue vs. torch.split return-value handling in a MoE indexer." + "reason": "MLA layernorm precision issue and GlmMoeDsaIndexer torch.split return-value issue are unrelated." }, { "left": "issue:44623", "right": "issue:45397", "accept": false, - "reason": "processor.save_pretrained missing files and gemma-4 zero3 from_pretrained failure are distinct serialization/loading problems." + "reason": "processor.save_pretrained missing files is distinct from gemma-4 zero3 from_pretrained failure." }, { "left": "issue:43122", "right": "issue:43650", "accept": false, - "reason": "Tokenization regression and a vague data request have no meaningful overlap." + "reason": "Tokenizer behavior regression is unrelated to the placeholder 'ADD THE DATA' issue." }, { "left": "issue:44623", "right": "issue:44749", "accept": false, - "reason": "Saving processor files and slower filtering during data processing are unrelated." + "reason": "Save-pretrained file omission vs Chinese-reported performance slowdown after upgrade are different bugs." + }, + { + "left": "issue:43784", + "right": "issue:45542", + "accept": false, + "reason": "Missing nn import in sentence-transformers is unrelated to TensorBoard/TensorFlow backend installation behavior." }, { "left": "issue:44671", "right": "issue:44779", "accept": false, - "reason": "CamemBERT MLM prediction regression and Deepseek tokenizer regression are separate model/tokenizer issues." + "reason": "CamemBERT masked-LM prediction regression and Deepseek tokenizer regression are different model/tokenizer bugs." }, { "left": "issue:43673", "right": "issue:43761", "accept": false, - "reason": "Chunked prefill cache regression and CLIP hidden_states handling are different Generation/vision bugs." + "reason": "Generation cache missing in chunked_prefill vs CLIP hidden_states output regression are separate code paths." }, { - "left": "issue:43901", - "right": "issue:43994", + "left": "issue:44987", + "right": "issue:45542", "accept": false, - "reason": "Docs mismatch for text classification and SigLIP2 bad outputs with AutoModel/pipeline are not the same issue." + "reason": "Model loading failure on transformers>=5.1.0 is unrelated to missing TensorFlow backend after pip install '.[torch]'." }, { - "left": "issue:42175", - "right": "issue:43976", + "left": "issue:43901", + "right": "issue:43994", "accept": false, - "reason": "Package-extra backend omission and Python-version compatibility failure are unrelated installation/environment issues." + "reason": "Docs mention of return_all_scores is not the same as SigLIP2 producing nonsensical outputs." }, { "left": "issue:44297", "right": "issue:45397", "accept": false, - "reason": "tokenizer.save_pretrained metadata mismatch and gemma-4 zero3 loading failure are different tokenizer/model-loading bugs." - }, - { - "left": "issue:43475", - "right": "issue:44016", - "accept": false, - "reason": "SAM3 video encoder attribute error and a notebook syntax error do not share a code path." + "reason": "Tokenizer save metadata mismatch and gemma-4 zero3 loading failure do not describe the same bug." }, { - "left": "issue:43824", + "left": "issue:42175", "right": "issue:43976", "accept": false, - "reason": "ImportError for Qwen2.5 VL class and a Python version incompatibility report are separate release/import issues." + "reason": "Missing TensorFlow in the torch extra and Python-version compatibility failure are different dependency issues." }, { - "left": "issue:43295", - "right": "issue:44617", + "left": "issue:43475", + "right": "issue:44016", "accept": false, - "reason": "Processor/tokenizer regression in v4.57.5 and SAM3 video CUDA OOM are unrelated." + "reason": "SAM3 video encoder attribute error is unrelated to a notebook syntax error." }, { - "left": "issue:44589", - "right": "issue:45362", + "left": "issue:43824", + "right": "issue:43976", "accept": false, - "reason": "Float8 storage lookup failure and Qwen3.5-35B chat crash are different runtime errors." + "reason": "Missing Qwen2_5_VL import and Python version support regression are unrelated." }, { "left": "issue:43901", "right": "issue:44246", "accept": false, - "reason": "Pipeline docs mismatch and import taking long sometimes are unrelated." - }, - { - "left": "issue:43525", - "right": "issue:43881", - "accept": false, - "reason": "Missing pad_token_id on Llama4Config and glm-4v-9b loading failure are model-specific and not the same bug." + "reason": "Pipeline docs mismatch vs import being slow sometimes are not the same problem." }, { "left": "issue:43352", "right": "issue:45250", "accept": false, - "reason": "Specific model unsupported by Flash Attention 2 and a generic Flash Attention topic are not one concrete fix." - }, - { - "left": "issue:43450", - "right": "issue:44617", - "accept": false, - "reason": "Incorrect batched video processor shapes and SAM3 video OOM are different problems." + "reason": "Specific model lacking Flash Attention 2.0 support is not the same as the generic Flash Attention 2.0 issue/request." }, { - "left": "issue:41950", - "right": "issue:44220", + "left": "issue:43525", + "right": "issue:43881", "accept": false, - "reason": "Video-classification pipeline image-processor lookup and _torch_extract_fbank_features are different pipeline/audio bugs." + "reason": "Llama4Config missing pad_token_id and glm-4v-9b loading failed are different attribute/loading failures." }, { - "left": "issue:41628", - "right": "issue:43976", + "left": "issue:43295", + "right": "issue:44617", "accept": false, - "reason": "Missing AutoImageProcessor import and Python 3.9/3.10 compatibility are unrelated import/environment issues." + "reason": "Processor.tokenizer regression with image passing is unrelated to SAM3 video CUDA OOM." } ] }, @@ -9464,28 +9329,27 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:36246", + "issue:41628", "issue:41950", "issue:42491", "issue:42757", - "issue:42886", - "issue:43065", "issue:43066", - "issue:43257", "issue:43299", - "issue:43408", "issue:43441", + "issue:43450", "issue:43526", "issue:43531", "issue:43540", "issue:43596", "issue:43673", - "issue:43901", + "issue:43976", "issue:43994", "issue:44162", "issue:44188", + "issue:44220", "issue:44242", "issue:44246", "issue:44295", @@ -9496,31 +9360,37 @@ "issue:44485", "issue:44496", "issue:44561", + "issue:44589", + "issue:44617", + "issue:44661", "issue:44749", "issue:44779", "issue:44857", "issue:44871", "issue:44877", "issue:44987", - "issue:44995", "issue:45003", - "issue:45137", "issue:45200", - "issue:45229", "issue:45276", "issue:45292", "issue:45356", "issue:45362", "issue:45375", "issue:45397", - "issue:45459" + "issue:45459", + "issue:45542" ], "soft_pairs": [ + "issue:44589|issue:45362", + "issue:43450|issue:44617", "issue:44188|issue:44336", + "issue:41628|issue:43976", + "issue:41950|issue:44220", "issue:44295|issue:45397", "issue:42491|issue:43299", "issue:44246|issue:44496", "issue:44336|issue:44373", + "issue:44661|issue:45542", "issue:44462|issue:45356", "issue:43526|issue:45003", "issue:43994|issue:45276", @@ -9529,225 +9399,213 @@ "issue:36246|issue:44351", "issue:43673|issue:44561", "issue:44336|issue:44485", - "issue:36246|issue:43596", "issue:44162|issue:45200", - "issue:44246|issue:44871", "issue:44242|issue:44857", - "issue:44779|issue:45292", - "issue:44987|issue:45459", + "issue:44246|issue:44871", "issue:44295|issue:44749", - "issue:43441|issue:43540", + "issue:44779|issue:45292", "issue:44877|issue:45375", - "issue:41950|issue:43596", - "issue:43066|issue:44779", - "issue:43901|issue:45003", - "issue:42886|issue:45229", - "issue:44995|issue:45200", - "issue:43257|issue:45137", - "issue:43065|issue:43408" - ], - "prepared_review_unit_hash": "1e3382228b6fcd369d23412c8f4fecee82b617cb403bb91bbf527d3adb36e836", + "issue:36246|issue:43596", + "issue:43441|issue:43540", + "issue:44987|issue:45459", + "issue:43066|issue:44779" + ], + "prepared_review_unit_hash": "c2ba531fa00f668233b267b3f6d2f1ffb8696230ac230bd6865849558a52c2a4", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 29, - "serialized_chars": 36188, - "estimated_input_tokens": 9047, - "estimated_eval_tokens": 18350 + "soft_pair_count": 28, + "serialized_chars": 35863, + "estimated_input_tokens": 8966, + "estimated_eval_tokens": 18188 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a heterogeneous set of Transformers issues covering import regressions, tokenizer/config mismatches, model-specific bugs, and distributed-training failures. None of the proposed soft pairs look like true duplicates; they usually share only a broad subsystem or version context.", - "confidence": 0.46, - "canonical_issue_reason": "If forced to pick one issue as an anchor, issue 44561 is the broadest root-cause style regression: a v5 API removal breaking many trust_remote_code models. It is more central than the model-specific or symptom-only reports in this set.", + "summary": "The cluster is highly heterogeneous: most pairs are only broad Transformers v5 regressions or entirely unrelated model/pipeline bugs, so no soft edge looks like the same concrete issue. If forced to pick a representative, issue 41628 is the broadest user-facing import-regression report.", + "confidence": 0.27, + "canonical_issue_reason": "Issue 41628 is the cleanest, most generic top-level API breakage report in the set (a direct import failure from `transformers`), so it is the least-wrong representative of the cluster\u2019s loose v5 import/regression theme.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44561 is the best global representative because it describes a concrete upstream compatibility break with clear version-scope and a plausible shared fix path. The rest of the batch is too fragmented to use as a single canonical duplicate target.", + "best_issue_reason": "41628 is the most broadly applicable and user-visible failure mode among these issues, making it the best canonical issue candidate despite the overall heterogeneity.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Different failures: a storage/type import/runtime error versus a Qwen chat crash. Same ecosystem, not the same bug." + }, + { + "left": "issue:43450", + "right": "issue:44617", + "accept": false, + "reason": "One is a batched video processor shape bug; the other is CUDA OOM in Sam3Video. Not the same underlying issue." + }, { "left": "issue:44188", "right": "issue:44336", "accept": false, - "reason": "torch.compile attention-kernel divergence and ANSI logging output are unrelated bugs." + "reason": "Diverging attention kernels under `torch.compile` is unrelated to ANSI code output from loading_report." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "ImportError for `AutoImageProcessor` and a Python version compatibility report are different problem classes." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Video pipeline choosing image processors and `_torch_extract_fbank_features()` failures are unrelated code paths." }, { "left": "issue:44295", "right": "issue:45397", "accept": false, - "reason": "A buffer access error for position_ids is not the same underlying problem as Gemma-4 zero3 from_pretrained failure." + "reason": "Buffer access on `position_ids` and Zero3 `from_pretrained` breakage are different bugs." }, { "left": "issue:42491", "right": "issue:43299", "accept": false, - "reason": "Both involve Qwen3/MoE, but one is a LoRA compatibility break on hf4.x and the other is a Qwen3VL MoE load regression on v5; different concrete failures." + "reason": "Both involve Qwen3 MoE, but one is HF4.x LoRA compatibility and the other is v5 loading breakage for Qwen3VL MoE; not mergeable as one bug." }, { "left": "issue:44246", "right": "issue:44496", "accept": false, - "reason": "Import slowness and unrecognized model/config loading are different issues." + "reason": "Slow import and unrecognized model/config loading are not the same underlying failure." }, { "left": "issue:44336", "right": "issue:44373", "accept": false, - "reason": "ANSI codes in loading_report is unrelated to a wrong docstring for position_ids." + "reason": "Terminal ANSI escape output is unrelated to a wrong docstring for `position_ids`." + }, + { + "left": "issue:44661", + "right": "issue:45542", + "accept": false, + "reason": "Tokenizer mapping-name handling and missing TensorFlow backend installation are unrelated." }, { "left": "issue:44462", "right": "issue:45356", "accept": false, - "reason": "Tokenizer.json being ignored and Kimi-K2.5 codec/regression handling are both tokenizer-related but not the same bug." + "reason": "Both are tokenizer regressions, but they affect different models and different failure modes; too broad to treat as one bug." }, { "left": "issue:43526", "right": "issue:45003", "accept": false, - "reason": "A BeitImageProcessorFast label-reduction bug is unrelated to unsafe sys.modules access in modeling_utils." + "reason": "BeitImageProcessorFast label reduction and unsafe `sys.modules` access are unrelated." }, { "left": "issue:43994", "right": "issue:45276", "accept": false, - "reason": "Siglip2 bad outputs and Gemma-4 resize_token_embeddings propagation are different code paths and symptoms." + "reason": "Nonsensical model outputs and resize-token-embedding propagation are different issues." }, { "left": "issue:43531", "right": "issue:45362", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior and Qwen3.5-35B chat crashes are distinct model bugs." + "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to a Qwen3.5-35B chat crash." }, { "left": "issue:36246", "right": "issue:42757", "accept": false, - "reason": "Both are import errors, but they involve different missing symbols from different packages and different root causes." + "reason": "Both are import errors, but for different missing symbols and different root causes." }, { "left": "issue:36246", "right": "issue:44351", "accept": false, - "reason": "Missing Qwen2_5_VLImageProcessor and missing HybridCache exports are separate API breakages." + "reason": "Missing `Qwen2_5_VLImageProcessor` and missing `HybridCache` are separate export regressions." }, { "left": "issue:43673", "right": "issue:44561", "accept": false, - "reason": "Chunked-prefill cache issues and is_torch_fx_available removal are unrelated regressions." + "reason": "Chunked-prefill cache behavior and removal of `is_torch_fx_available` break different code paths." }, { "left": "issue:44336", "right": "issue:44485", "accept": false, - "reason": "Terminal ANSI formatting and GLM-5 RoPE implementation are not the same bug." - }, - { - "left": "issue:36246", - "right": "issue:43596", - "accept": false, - "reason": "An import error for a model class is unrelated to a DeepSpeed ZeRO3 IndexError in BertModel." + "reason": "ANSI output noise and GLM-5 RoPE implementation are unrelated." }, { "left": "issue:44162", "right": "issue:45200", "accept": false, - "reason": "ESM2 breakage and Gemma-4 mm_token_type_ids defaults are different model-specific problems." - }, - { - "left": "issue:44246", - "right": "issue:44871", - "accept": false, - "reason": "Import performance and eos_token_id inconsistency are unrelated." + "reason": "ESM2 breakage and Gemma 4 `mm_token_type_ids` defaults are different model-specific bugs." }, { "left": "issue:44242", "right": "issue:44857", "accept": false, - "reason": "Missing load balancing loss and fp16 CUDA crash in LwDetrImageLoss do not share the same underlying fix." + "reason": "Missing load-balancing loss and float16 AMP crash are unrelated training/runtime failures." }, { - "left": "issue:44779", - "right": "issue:45292", - "accept": false, - "reason": "DeepSeek tokenizer regression and resize_token_embeddings not updating output embeddings are different subsystems." - }, - { - "left": "issue:44987", - "right": "issue:45459", + "left": "issue:44246", + "right": "issue:44871", "accept": false, - "reason": "Model loading failure on v5.1.0 and protobuf exception masking are different tokenizer/loading bugs." + "reason": "Import slowness and Gemma eos-token-id inconsistency are not the same bug." }, { "left": "issue:44295", "right": "issue:44749", "accept": false, - "reason": "A position_ids buffer issue is not the same as a performance regression after upgrading Transformers." + "reason": "`position_ids` buffer access and data-filtering slowdown after upgrade are unrelated." }, { - "left": "issue:43441", - "right": "issue:43540", + "left": "issue:44779", + "right": "issue:45292", "accept": false, - "reason": "Ministral-3 FlashAttention failure and Qwen3OmniMoe video-input ValueError are different model-path bugs." + "reason": "Tokenizer regression and `resize_token_embeddings` not updating output embeddings are different code paths." }, { "left": "issue:44877", "right": "issue:45375", "accept": false, - "reason": "Strict config rejection for granite_speech and missing deepstack_visual_indexes in Qwen3_5MoeVisionConfig are distinct config-schema problems." + "reason": "Strict config rejection for granite_speech and a silently dropped Qwen config field are similar only at a high level, not the same concrete defect." }, { - "left": "issue:41950", + "left": "issue:36246", "right": "issue:43596", "accept": false, - "reason": "Video-classification pipeline processor lookup and ZeRO3/BertModel deque errors are unrelated." - }, - { - "left": "issue:43066", - "right": "issue:44779", - "accept": false, - "reason": "Both mention tokenizer behavior in v5, but the decoder-type bug and DeepSeek incorrect results are not clearly the same code-path problem." - }, - { - "left": "issue:43901", - "right": "issue:45003", - "accept": false, - "reason": "A docs mismatch for return_all_scores is unrelated to unsafe sys.modules access." - }, - { - "left": "issue:42886", - "right": "issue:45229", - "accept": false, - "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU OOM are different failure modes." + "reason": "An import error and a DeepSpeed ZeRO3/BertModel index error are unrelated." }, { - "left": "issue:44995", - "right": "issue:45200", + "left": "issue:43441", + "right": "issue:43540", "accept": false, - "reason": "A stale indexer cache crash in GlmMoeDsa is unrelated to Gemma-4 text-only mm_token_type_ids defaults." + "reason": "Ministral flash-attention failure and Qwen3OmniMoe video-input ValueError are different bugs." }, { - "left": "issue:43257", - "right": "issue:45137", + "left": "issue:44987", + "right": "issue:45459", "accept": false, - "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed and empty-deque ZeRO3 errors are both distributed-training related, but not the same bug." + "reason": "Loading a specific model and masking tokenizer decode errors when protobuf is absent are unrelated." }, { - "left": "issue:43065", - "right": "issue:43408", + "left": "issue:43066", + "right": "issue:44779", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder and sam3_video/sam3_tracker type mismatch are separate SAM3 issues." + "reason": "Both are tokenizer regressions in v5, but they concern different models and different decoder/codec issues, so they should not be merged." } ] }, @@ -9769,27 +9627,29 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ - "issue:28282", - "issue:30990", + "issue:41628", + "issue:41950", "issue:42673", + "issue:42886", + "issue:43065", "issue:43232", + "issue:43257", "issue:43262", "issue:43295", "issue:43381", + "issue:43408", "issue:43441", "issue:43475", "issue:43540", "issue:43606", - "issue:43643", "issue:43646", "issue:43653", "issue:43673", - "issue:43701", "issue:43704", + "issue:43901", "issue:43931", - "issue:44038", "issue:44060", "issue:44077", "issue:44162", @@ -9806,20 +9666,24 @@ "issue:44869", "issue:44933", "issue:44977", + "issue:44995", "issue:45003", - "issue:45081", - "issue:45103", - "issue:45230", + "issue:45137", + "issue:45200", + "issue:45229", "issue:45278", "issue:45292", - "issue:45310", "issue:45335", "issue:45356", - "issue:45381", - "issue:45397", "issue:45459" ], "soft_pairs": [ + "issue:43901|issue:45003", + "issue:41628|issue:41950", + "issue:42886|issue:45229", + "issue:44995|issue:45200", + "issue:43257|issue:45137", + "issue:43065|issue:43408", "issue:43232|issue:44336", "issue:44496|issue:45003", "issue:43262|issue:44246", @@ -9830,248 +9694,221 @@ "issue:43653|issue:44162", "issue:44368|issue:44977", "issue:44933|issue:45459", - "issue:43673|issue:44734", "issue:43441|issue:43606", + "issue:43673|issue:44734", "issue:44869|issue:45356", "issue:43232|issue:43673", "issue:43931|issue:44368", "issue:44297|issue:44623", - "issue:43673|issue:45278", "issue:43646|issue:44162", + "issue:43673|issue:45278", "issue:43381|issue:45292", "issue:43381|issue:45335", "issue:42673|issue:43704", - "issue:28282|issue:30990", - "issue:44361|issue:44749", - "issue:44077|issue:45003", - "issue:44749|issue:45310", - "issue:45230|issue:45397", - "issue:44038|issue:44623", - "issue:43475|issue:45381", - "issue:43475|issue:43643", - "issue:43701|issue:44749", - "issue:45081|issue:45103" + "issue:44361|issue:44749" ], - "prepared_review_unit_hash": "59eba8f2e19dce1bdfbf4988a7e3ab8112317d25b6bbc87f5b85c9126c50f57a", + "prepared_review_unit_hash": "ae9788d5886e100cfb50f20a3865eadf72b0c49c1e740f1075b739b94d0932d9", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 47, "item_count": 47, - "soft_pair_count": 31, - "serialized_chars": 36506, - "estimated_input_tokens": 9127, - "estimated_eval_tokens": 18510 + "soft_pair_count": 28, + "serialized_chars": 35530, + "estimated_input_tokens": 8883, + "estimated_eval_tokens": 18022 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly a grab bag of unrelated issues; the only clear duplicate pair is the Qwen3ForCausalLM VRAM leak report. I would anchor the cluster on the earlier, more-discussed issue and reject the rest of the soft matches as different bugs.", - "confidence": 0.92, - "canonical_issue_reason": "issue:42673 is the clearer canonical issue because it is the earlier report of the Qwen3ForCausalLM VRAM leak in multiple dataloader threads and has more discussion context; issue:43704 is an exact duplicate of the same bug.", + "summary": "This cluster is mostly a loose topical grouping, not a real duplicate set. The only clear duplicate pair is the Qwen3ForCausalLM VRAM leak report; the rest differ by model, API surface, or failure mode.", + "confidence": 0.89, + "canonical_issue_reason": "issue:42673 is the original, more detailed report of the Qwen3ForCausalLM VRAM leak; issue:43704 repeats the same title/symptoms and reads as the duplicate.", "canonical_pr_reason": null, - "best_issue_reason": "issue:42673 best represents the only true duplicate pair in this cluster and has the richer original report.", + "best_issue_reason": "issue:42673 is the clearest canonical issue in the cluster because it is the true duplicate anchor and has the fuller report.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch for text classification vs unsafe sys.modules access in modeling_utils; unrelated bugs." + }, + { + "left": "issue:41628", + "right": "issue:41950", + "accept": false, + "reason": "Both mention image processors, but one is an import error and the other is a video pipeline lookup bug; different failure modes." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline cache loading for tokenizers is unrelated to Gemma4 multi-GPU CUDA OOM." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "GlmMoeDsa stale indexer cache on second forward is not the same as Gemma4 mm_token_type_ids defaults." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both involve DeepSpeed, but one is MOE weight conversion and the other is a ZeRO3 deque error; different code paths." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "SAM3 dummy Conv2d vs sam3_video/sam3_tracker config warning are distinct model issues." + }, { "left": "issue:43232", "right": "issue:44336", "accept": false, - "reason": "Different bugs: generation kwargs after sync_gpus vs ANSI codes emitted by loading_report." + "reason": "Generation kwargs after sync_gpus is unrelated to ANSI output in loading_report." }, { "left": "issue:44496", "right": "issue:45003", "accept": false, - "reason": "Different areas and symptoms: missing model_type on trust_remote_code config vs unsafe sys.modules access in modeling_utils." + "reason": "Unrecognized model/config loading error is unrelated to sys.modules access in modeling_utils." }, { "left": "issue:43262", "right": "issue:44246", "accept": false, - "reason": "Unrelated: audio chat template sample rate handling vs import-time slowdown." + "reason": "Audio processor sampling-rate default is unrelated to slow import timing." }, { "left": "issue:43295", "right": "issue:45292", "accept": false, - "reason": "Different code paths: processor/tokenizer regression vs resize_token_embeddings not updating output embeddings." + "reason": "Custom processor.tokenizer regression is not the same bug as resize_token_embeddings not updating output embeddings." }, { "left": "issue:44060", "right": "issue:44077", "accept": false, - "reason": "Different issues: incorrect tied-weights warning vs patchtsmixer post_init policy." + "reason": "Tied-weights warning bug and patchtsmixer post_init deprecation are unrelated." }, { "left": "issue:43475", "right": "issue:43540", "accept": false, - "reason": "Both are video-related but they describe different model failures and different underlying bugs." + "reason": "SAM 3 Video missing attribute vs Qwen3OmniMoe video ValueError are different model failures." }, { "left": "issue:44466", "right": "issue:45335", "accept": false, - "reason": "Both concern embedding/weight tying, but one is device-dependent serialization and the other is resize_token_embeddings not propagating to decoder.embed_tokens." + "reason": "Serialization inconsistency of tied weights is different from resize_token_embeddings not affecting decoder.embed_tokens." }, { "left": "issue:43653", "right": "issue:44162", "accept": false, - "reason": "Different models and symptoms: BigBird tokenizer special-token registration vs ESM2 breakage." + "reason": "BigBirdTokenizer special-token registration bug is unrelated to ESM2 breakage." }, { "left": "issue:44368", "right": "issue:44977", "accept": false, - "reason": "Different bugs: tie_word_embeddings warning noise vs flash-attention generation failure." + "reason": "Tie_word_embeddings warning during LoRA fine-tuning is not the same as flash-attention generation failure." }, { "left": "issue:44933", "right": "issue:45459", "accept": false, - "reason": "Different tokenizer/import problems: nonexistent image_utils import vs protobuf decode error masking." + "reason": "Missing import from image_utils and protobuf decode-error masking are different import/exception issues." }, { - "left": "issue:43673", - "right": "issue:44734", + "left": "issue:43441", + "right": "issue:43606", "accept": false, - "reason": "Both are generation/cache issues, but they affect different code paths and concrete failures." + "reason": "FlashAttention failure in Ministral-3 is unrelated to Bark-small CPU offload device mismatch." }, { - "left": "issue:43441", - "right": "issue:43606", + "left": "issue:43673", + "right": "issue:44734", "accept": false, - "reason": "Unrelated model/backend failures: FlashAttention on Ministral-3 vs CPU offload device mismatch on bark-small." + "reason": "Missing GenerationMixin cache in chunked_prefill and server KV-cache tensor indexing are both cache-related but not the same concrete bug." }, { "left": "issue:44869", "right": "issue:45356", "accept": false, - "reason": "Different tokenizer regressions: Whisper timestamp decode crash vs Kimi-K2.5 codec/fix_mistral_regex regression." + "reason": "Whisper trailing replacement-character crash is unrelated to Kimi-K2.5 codec handling." }, { "left": "issue:43232", "right": "issue:43673", "accept": false, - "reason": "Both involve generation internals, but sync_gpus/_update_model_kwargs and chunked_prefill cache missing are distinct bugs." + "reason": "Generation kwargs sync_gpus issue and missing cache in chunked_prefill are separate generation bugs." }, { "left": "issue:43931", "right": "issue:44368", "accept": false, - "reason": "Different issues: weight-shape mismatch loading Qwen3-VL-30B vs tied-weights warning behavior." + "reason": "Qwen3-VL weight-shape mismatch is unrelated to tie_word_embeddings warnings." }, { "left": "issue:44297", "right": "issue:44623", "accept": false, - "reason": "Both are save_pretrained bugs, but tokenizer_class mismatch and processor missing files are separate failures." + "reason": "Tokenizer save_pretrained class mismatch and processor save_pretrained missing files are different serialization defects." }, { - "left": "issue:43673", - "right": "issue:45278", + "left": "issue:43646", + "right": "issue:44162", "accept": false, - "reason": "Too broad: one specific cache bug vs a general report of multiple import errors after upgrade." + "reason": "Generic custom model initialization breakage does not match the ESM2-specific failure." }, { - "left": "issue:43646", - "right": "issue:44162", + "left": "issue:43673", + "right": "issue:45278", "accept": false, - "reason": "Different regressions: custom model initialization breakage vs ESM2-specific breakage." + "reason": "Generation cache bug is not the same as broad import errors after version upgrade." }, { "left": "issue:43381", "right": "issue:45292", "accept": false, - "reason": "No overlap in underlying bug: eval-mode gradient checkpointing vs embedding resizing." + "reason": "Gradient checkpointing in eval mode is unrelated to resize_token_embeddings behavior." }, { "left": "issue:43381", "right": "issue:45335", "accept": false, - "reason": "No overlap in underlying bug: eval-mode gradient checkpointing vs t5gemma embedding resize propagation." + "reason": "Gradient checkpointing in eval mode is unrelated to t5gemma embedding resizing." }, { "left": "issue:42673", "right": "issue:43704", "accept": true, - "reason": "Exact duplicate title and same underlying bug: Qwen3ForCausalLM VRAM leak with multiple dataloader threads." - }, - { - "left": "issue:28282", - "right": "issue:30990", - "accept": false, - "reason": "Different symptoms: missing PyTorch import error vs Sentence Transformers hanging on load." + "reason": "Same Qwen3ForCausalLM VRAM leak, same title, same symptom, and the later issue is a duplicate report." }, { "left": "issue:44361", "right": "issue:44749", "accept": false, - "reason": "Different bugs: MLukeTokenizer AttributeError vs performance slowdown after upgrading Transformers." - }, - { - "left": "issue:44077", - "right": "issue:45003", - "accept": false, - "reason": "Unrelated: patchtsmixer post_init policy vs unsafe sys.modules access." - }, - { - "left": "issue:44749", - "right": "issue:45310", - "accept": false, - "reason": "Different issues: global performance slowdown vs Qwen3.5 MoE from_pretrained error." - }, - { - "left": "issue:45230", - "right": "issue:45397", - "accept": false, - "reason": "Insufficient evidence of sameness; generic bug report versus gemma-4 zero3 from_pretrained failure." - }, - { - "left": "issue:44038", - "right": "issue:44623", - "accept": false, - "reason": "Different model/configuration failures: Qwen3-VL-Moe bug vs processor.save_pretrained missing files." - }, - { - "left": "issue:43475", - "right": "issue:45381", - "accept": false, - "reason": "Both are multimodal/video-related, but they involve different models and different tensor/attribute problems." - }, - { - "left": "issue:43475", - "right": "issue:43643", - "accept": false, - "reason": "Different areas: SAM3 vision encoder attribute error vs AutoConfig trust_remote_code missing fields." - }, - { - "left": "issue:43701", - "right": "issue:44749", - "accept": false, - "reason": "Different bugs: checkpoint resume key mismatch vs performance regression after upgrade." - }, - { - "left": "issue:45081", - "right": "issue:45103", - "accept": false, - "reason": "Different AttributeErrors in unrelated subsystems: tokenizer regex patching vs auto_docstring annotation handling." + "reason": "MLukeTokenizer AttributeError and Chinese import/filtering slowdown are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The canonical pair selection is well grounded: issue:42673 and issue:43704 are exact title duplicates, and the rest of the cluster is treated conservatively as non-duplicates. The soft-edge rejections are appropriately cautious and do not overclaim sameness." + "feedback": "The summary is well grounded in the packet. The canonical duplicate choice for issue:42673 vs issue:43704 is supported by identical titles and the fuller earlier report, and the soft-edge verdicts stay conservative overall. I don\u2019t see any overstated evidence that would require rejection." }, "evaluator_used": true, "retried": false, @@ -10090,310 +9927,325 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ + "issue:28282", "issue:30990", "issue:35141", "issue:36010", "issue:41628", - "issue:42175", "issue:42222", "issue:42371", "issue:42831", "issue:42898", "issue:42994", "issue:43232", - "issue:43278", "issue:43377", "issue:43475", "issue:43479", + "issue:43482", "issue:43525", + "issue:43643", "issue:43673", - "issue:43784", - "issue:43824", + "issue:43701", "issue:43873", - "issue:43976", "issue:44038", + "issue:44077", "issue:44079", "issue:44164", "issue:44188", "issue:44222", "issue:44242", "issue:44351", - "issue:44373", "issue:44568", "issue:44623", "issue:44661", - "issue:44734", + "issue:44749", "issue:44792", "issue:44869", "issue:44933", "issue:44945", - "issue:44964", + "issue:45003", + "issue:45081", "issue:45092", "issue:45103", - "issue:45137", "issue:45200", + "issue:45230", "issue:45310", "issue:45362", "issue:45375", + "issue:45381", "issue:45397", "issue:45478" ], "soft_pairs": [ + "issue:28282|issue:30990", + "issue:44077|issue:45003", + "issue:44749|issue:45310", + "issue:45230|issue:45397", + "issue:44038|issue:44623", + "issue:42994|issue:43482", + "issue:43475|issue:45381", + "issue:43475|issue:43643", + "issue:43701|issue:44749", + "issue:45081|issue:45103", "issue:42371|issue:45092", "issue:45375|issue:45478", "issue:41628|issue:44933", + "issue:45310|issue:45375", "issue:42994|issue:43525", "issue:43475|issue:44661", - "issue:45310|issue:45375", - "issue:42222|issue:45200", "issue:43873|issue:44945", + "issue:42222|issue:45200", "issue:42831|issue:44222", "issue:45362|issue:45375", "issue:45310|issue:45397", - "issue:41628|issue:44351", "issue:43232|issue:44188", + "issue:41628|issue:44351", "issue:44222|issue:45310", "issue:42994|issue:44164", "issue:43232|issue:44792", "issue:44038|issue:45310", - "issue:36010|issue:41628", "issue:30990|issue:35141", - "issue:43377|issue:44242", "issue:43479|issue:44079", + "issue:43377|issue:44242", + "issue:36010|issue:41628", "issue:43673|issue:44188", "issue:42898|issue:44568", - "issue:43479|issue:44869", - "issue:44964|issue:45137", - "issue:42175|issue:44933", - "issue:43976|issue:44933", - "issue:43784|issue:43824", - "issue:43278|issue:45137", - "issue:44623|issue:45310", - "issue:44188|issue:44373", - "issue:44734|issue:45103" + "issue:43479|issue:44869" ], - "prepared_review_unit_hash": "3e75fdb3c44d0655980a2faa593b19df3f0f07d43b34f74b87c3cf122b3e7650", + "prepared_review_unit_hash": "2e1580dc417ca862b9b6712231ac31fa5693f1a23d2fe69909c16c95c79e8d50", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 32, - "serialized_chars": 36335, - "estimated_input_tokens": 9084, - "estimated_eval_tokens": 18424 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37538, + "estimated_input_tokens": 9385, + "estimated_eval_tokens": 19026 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly heterogeneous and the provided soft edges are not true duplicates. The only clearly repeated concrete bug theme in the set is the Qwen3.5-MoE `from_pretrained` regression, which should be the representative issue, but it is not one of the soft-edge pairs shown.", - "confidence": 0.83, - "canonical_issue_reason": "Issue 45310 is the best representative of the concrete Qwen3.5-MoE `from_pretrained` regression: it is specific, externally reproducible, and the earlier report of that failure mode. Related later reports appear to be the same underlying problem.", + "summary": "This cluster is not a duplicate set: it mixes many unrelated import, config, generation, tokenizer, quantization, and model-specific regressions. No soft pair is close enough to merge as the same underlying bug/change.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a safe canonical duplicate because the items span distinct failure modes and different code paths.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45310 is the strongest canonical candidate because it names the exact failing code path and model family, making it the most useful anchor for deduping nearby reports.", + "best_issue_reason": "issue:45310 is the most narrowly scoped, concrete loader regression in the set, so it is the best standalone representative, but it does not subsume the others.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "PyTorch-missing import error vs Sentence Transformers hanging during load; different symptoms and code paths." + }, + { + "left": "issue:44077", + "right": "issue:45003", + "accept": false, + "reason": "Optional post_init policy change vs unsafe sys.modules access; unrelated bugs." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "Data filtering slowdown after upgrade vs Qwen3.5 from_pretrained failure; not the same issue." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "Generic bug report vs Gemma-4 zero3 from_pretrained failure; no shared concrete defect." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe loading bug vs processor.save_pretrained missing files; different subsystems." + }, + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Quantized model saving failure vs Qwen2.5-GGUF loading failure; unrelated." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "SAM 3 video attribute error vs qwen2.5-vl video position-id bug; different models and paths." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "SAM 3 VisionEncoder output bug vs AutoConfig trust_remote_code field loss; unrelated." + }, + { + "left": "issue:43701", + "right": "issue:44749", + "accept": false, + "reason": "resume_from_checkpoint key mismatch vs performance regression in data filtering; not the same bug." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Mistral tokenizer patch crash vs auto_docstring annotation crash; unrelated code paths." + }, { "left": "issue:42371", "right": "issue:45092", "accept": false, - "reason": "Different bugs: TF32 behavior settings vs a meta-init loading incompatibility for old InternVL2 checkpoints." + "reason": "TF32 settings warning vs remote-code meta-init incompatibility; different problems." }, { "left": "issue:45375", "right": "issue:45478", "accept": false, - "reason": "Related Qwen3.5-MoE area, but not the same concrete failure: missing config field vs a `from_pretrained` error." + "reason": "Missing vision-config field in strict handling vs Qwen3.5 Moe from_pretrained error; related family but not the same defect." }, { "left": "issue:41628", "right": "issue:44933", "accept": false, - "reason": "Both are import-related, but they concern different missing symbols and likely different regressions." + "reason": "AutoImageProcessor export error vs missing image_utils import; similar theme, different symbols and fixes." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "Both concern Qwen3.5-family models, but one is a from_pretrained loader failure and the other is a missing config field; not mergeable as one bug." }, { "left": "issue:42994", "right": "issue:43525", "accept": false, - "reason": "Saving quantized models is a different failure mode from `Llama4Config` missing `pad_token_id`." + "reason": "Quantized save failure vs Llama4Config missing pad_token_id; unrelated." }, { "left": "issue:43475", "right": "issue:44661", "accept": false, - "reason": "SAM3 vision encoder attribute error vs tokenizer-mapping failure in model registration; unrelated code paths." + "reason": "SAM 3 attribute error vs add-new-model-like/tokenizer mapping failure; no shared code path." }, { - "left": "issue:45310", - "right": "issue:45375", + "left": "issue:43873", + "right": "issue:44945", "accept": false, - "reason": "Both mention Qwen3.5 MoE, but one is a `from_pretrained` error and the other is a missing config field; not the same bug." + "reason": "Quantization offloading issue vs pipeline-parallel output mismatch; different runtime behaviors." }, { "left": "issue:42222", "right": "issue:45200", "accept": false, - "reason": "Different models and symptoms: vitpose breakage vs Gemma 4 missing default `mm_token_type_ids`." - }, - { - "left": "issue:43873", - "right": "issue:44945", - "accept": false, - "reason": "Quantization/offloading behavior is not the same as incorrect LLM output under pipeline parallelism." + "reason": "vitpose model breakage vs Gemma 4 token-type defaulting; unrelated model families." }, { "left": "issue:42831", "right": "issue:44222", "accept": false, - "reason": "FineGrainedFP8 accuracy regression and FP8 `save_pretrained`/MoE issues are distinct." + "reason": "FP8 accuracy regression vs FP8 save_pretrained moe; different failure modes." }, { "left": "issue:45362", "right": "issue:45375", "accept": false, - "reason": "One is a chat/runtime crash for Qwen3.5-35B; the other is a config field omission for Qwen3.5-Moe vision." + "reason": "Qwen3.5-35B chat crash vs missing deepstack_visual_indexes field; not the same bug." }, { "left": "issue:45310", "right": "issue:45397", "accept": false, - "reason": "Both involve model loading, but they are different models and different failure causes." + "reason": "Qwen3.5 Moe from_pretrained error vs Gemma-4 zero3 from_pretrained issue; unrelated models." }, { - "left": "issue:41628", - "right": "issue:44351", + "left": "issue:43232", + "right": "issue:44188", "accept": false, - "reason": "Import errors, but for different APIs (`AutoImageProcessor` vs `HybridCache`), so not the same underlying bug." + "reason": "Generation kwargs update bug vs torch.compile attention-kernel divergence; different code paths." }, { - "left": "issue:43232", - "right": "issue:44188", + "left": "issue:41628", + "right": "issue:44351", "accept": false, - "reason": "Generation kwargs handling after `sync_gpus` is unrelated to attention-kernel divergence under `torch.compile`." + "reason": "AutoImageProcessor import error vs HybridCache import error; both imports, but not the same missing symbol or fix." }, { "left": "issue:44222", "right": "issue:45310", "accept": false, - "reason": "FP8 MoE saving and Qwen3.5-MoE loading are different code paths and symptoms." + "reason": "FP8 save_pretrained moe vs Qwen3.5 Moe from_pretrained error; unrelated." }, { "left": "issue:42994", "right": "issue:44164", "accept": false, - "reason": "Quantized model saving failure is not the same as `extra_state` handling in save/from_pretrained." + "reason": "Quantized saving failure vs extra_state save/from_pretrained handling; different persistence bugs." }, { "left": "issue:43232", "right": "issue:44792", "accept": false, - "reason": "Generation cache update bug vs failed janus test case; no shared concrete failure." + "reason": "Generation sync_gpus kwargs bug vs janus test failure; no shared underlying defect." }, { "left": "issue:44038", "right": "issue:45310", "accept": false, - "reason": "Qwen3/VL-Moe issue and Qwen3.5-MoE loading error are different regressions." + "reason": "Qwen3-VL-Moe bug vs Qwen3.5 Moe from_pretrained error; same broad family, but not the same concrete failure." }, { - "left": "issue:36010", - "right": "issue:41628", + "left": "issue:30990", + "right": "issue:35141", "accept": false, - "reason": "Both are import errors, but they refer to different missing imports and different package breakages." + "reason": "Loading stall vs embedding resizing/post_init reinitialization; unrelated." }, { - "left": "issue:30990", - "right": "issue:35141", + "left": "issue:43479", + "right": "issue:44079", "accept": false, - "reason": "Sentence Transformers loading hang and embedding reinit on resize are unrelated." + "reason": "Phi4Multimodal config defaults vs ModelOutput key assignment; different object semantics." }, { "left": "issue:43377", "right": "issue:44242", "accept": false, - "reason": "MIMI padding-mask mismatch vs load-balancing loss omission; different bugs." + "reason": "MIMI batched-vs-single padding-mask bug vs MoE load-balancing loss omission; unrelated." }, { - "left": "issue:43479", - "right": "issue:44079", + "left": "issue:36010", + "right": "issue:41628", "accept": false, - "reason": "Different model config/state bugs: default multimodal config initialization vs `ModelOutput` key assignment." + "reason": "GenerationMixin import missing vs AutoImageProcessor import missing; both are import regressions, but they concern different exports and fixes." }, { "left": "issue:43673", "right": "issue:44188", "accept": false, - "reason": "Missing `GenerationMixin` cache in chunked prefill and attention-kernel branching divergence are separate generation/runtime issues." + "reason": "GenerationMixin cache missing in chunked prefill vs torch.compile branching divergence; not the same code-path problem." }, { "left": "issue:42898", "right": "issue:44568", "accept": false, - "reason": "Tokenizer cleanup behavior changes and missing BOS/EOS addition are different tokenizer regressions." + "reason": "v5 clean_up_tokenization_spaces behavior change vs missing BOS/EOS when add_special_tokens=True; different tokenizer regressions." }, { "left": "issue:43479", "right": "issue:44869", "accept": false, - "reason": "Multimodal config initialization and Whisper word-timestamp decode crash are unrelated." - }, - { - "left": "issue:44964", - "right": "issue:45137", - "accept": false, - "reason": "Phi-4 multimodal loading failure is not the same as a DeepSpeed ZeRO3 deque underflow." - }, - { - "left": "issue:42175", - "right": "issue:44933", - "accept": false, - "reason": "Backend extras installation and nonexistent image_utils import are unrelated packaging/import problems." - }, - { - "left": "issue:43976", - "right": "issue:44933", - "accept": false, - "reason": "Python version compatibility for Transformers 5.1.0 is unrelated to the missing image_utils import." - }, - { - "left": "issue:43784", - "right": "issue:43824", - "accept": false, - "reason": "Different missing imports in sentence-transformers/latest transformers; not the same symbol or code path." - }, - { - "left": "issue:43278", - "right": "issue:45137", - "accept": false, - "reason": "Embedding dtype drift during evaluate vs ZeRO3 deque failure are unrelated." - }, - { - "left": "issue:44623", - "right": "issue:45310", - "accept": false, - "reason": "Missing files in `processor.save_pretrained(...)` is a serialization bug, not the Qwen3.5-MoE load regression." - }, - { - "left": "issue:44188", - "right": "issue:44373", - "accept": false, - "reason": "Attention kernel divergence under `torch.compile` is unrelated to a wrong docstring for `position_ids`." - }, - { - "left": "issue:44734", - "right": "issue:45103", - "accept": false, - "reason": "Serve KV-cache continuation indexing crash is unrelated to the auto-docstring crash with future annotations." + "reason": "Config initialization bug vs Whisper word timestamp decode crash; unrelated." } ] }, @@ -10415,27 +10267,30 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:22355", "issue:28282", - "issue:30990", - "issue:38175", "issue:41628", + "issue:42175", "issue:42757", "issue:42831", "issue:42898", + "issue:43278", "issue:43295", "issue:43479", "issue:43644", "issue:43723", + "issue:43784", + "issue:43824", "issue:43874", + "issue:43976", "issue:44060", "issue:44077", + "issue:44188", "issue:44206", "issue:44222", "issue:44242", - "issue:44261", "issue:44263", "issue:44295", "issue:44297", @@ -10445,15 +10300,14 @@ "issue:44448", "issue:44483", "issue:44484", - "issue:44485", "issue:44514", "issue:44568", "issue:44623", + "issue:44734", "issue:44779", - "issue:44871", - "issue:44913", + "issue:44933", + "issue:44964", "issue:44977", - "issue:44991", "issue:45092", "issue:45103", "issue:45125", @@ -10461,12 +10315,19 @@ "issue:45276", "issue:45310", "issue:45356", - "issue:45357", "issue:45406", "issue:45459" ], "soft_pairs": [ + "issue:44964|issue:45137", + "issue:43976|issue:44933", + "issue:42175|issue:44933", + "issue:43784|issue:43824", + "issue:43278|issue:45137", + "issue:44623|issue:45310", + "issue:44188|issue:44373", "issue:44060|issue:45125", + "issue:44734|issue:45103", "issue:42898|issue:45356", "issue:43723|issue:44568", "issue:45137|issue:45276", @@ -10476,273 +10337,262 @@ "issue:42898|issue:44448", "issue:41628|issue:45459", "issue:44222|issue:44297", + "issue:43479|issue:43644", "issue:42898|issue:43295", "issue:44442|issue:45459", - "issue:43479|issue:43644", "issue:44222|issue:44623", - "issue:44206|issue:45103", "issue:42831|issue:44623", + "issue:44206|issue:45103", "issue:22355|issue:28282", - "issue:41628|issue:42757", "issue:44297|issue:45310", + "issue:41628|issue:42757", "issue:44483|issue:45406", "issue:44222|issue:44484", "issue:44263|issue:44360", "issue:43723|issue:44977", "issue:44263|issue:45092", "issue:44514|issue:45103", - "issue:44295|issue:44623", - "issue:43479|issue:44913", - "issue:43723|issue:44871", - "issue:43723|issue:44991", - "issue:44261|issue:45092", - "issue:30990|issue:38175", - "issue:43723|issue:44442", - "issue:44373|issue:44485", - "issue:45092|issue:45357" + "issue:44295|issue:44623" ], - "prepared_review_unit_hash": "a32047fe00b5e44992835cc56dc3f556d0a18f3bdb962a695d8df0c97fbd8b7b", + "prepared_review_unit_hash": "44cde44f61b66aba35f1964e9338ffb340a6e9acfd71528fb171ec472ba92471", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, + "node_count": 48, + "item_count": 48, "soft_pair_count": 34, - "serialized_chars": 37227, - "estimated_input_tokens": 9307, - "estimated_eval_tokens": 18870 + "serialized_chars": 37829, + "estimated_input_tokens": 9458, + "estimated_eval_tokens": 19172 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly a heterogeneous mix of unrelated Transformers issues. The only clear duplicate-style subcluster is the AutoTokenizer loading failures; most other soft pairs are same subsystem or same version but different underlying bugs and should stay separate.", - "confidence": 0.79, - "canonical_issue_reason": "issue:43723 is the best umbrella issue for the tokenizer-loading regression subcluster: it is generic, v5-specific, and closest to the model-specific AutoTokenizer failures among the candidates.", + "summary": "This cluster is mostly a loose semantic neighborhood of unrelated Transformers issues, not true duplicates. The shared wording (tokenizer, save_pretrained, import, v5 regression, multimodal) is superficial and usually points to different code paths or different models/components.", + "confidence": 0.96, + "canonical_issue_reason": "issue:42898 is the broadest, most general regression in the set and has the clearest cross-cutting impact; it is also more central than the highly model-specific or packaging-only reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43723 best represents the strongest duplicate candidate set and is the broadest, most reusable issue for the AutoTokenizer.from_pretrained failures.", + "best_issue_reason": "issue:42898 is the best representative issue for the cluster because it describes a concrete Transformers v5 behavior regression with wider applicability than the other narrowly scoped bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Different problems: Phi-4 multimodal loading vs a DeepSpeed ZeRO3 deque/indexing failure." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version/backend support issue vs an import name missing from image_utils; unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Packaging/backend dependency omission vs missing image_utils import; not the same bug." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Sentence-transformers import NameError vs missing Qwen2.5-VL class export; different failure modes." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Training/eval dtype mismatch vs DeepSpeed ZeRO3 empty deque error; unrelated code paths." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Processor save_pretrained file emission bug vs Qwen3.5 MoE from_pretrained regression." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "Attention kernel divergence under torch.compile vs a docstring issue; not a duplicate." + }, { "left": "issue:44060", "right": "issue:45125", "accept": false, - "reason": "Both are Qwen3 MoE-related, but one is a tied-weights warning and the other is a missing _tp_plan attribute. Different bugs and fixes." + "reason": "Incorrect tied-weights warning vs missing _tp_plan for tensor parallelism; separate model bugs." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Serving KV-cache indexing crash vs auto_docstring crash from future annotations; different subsystems." }, { "left": "issue:42898", "right": "issue:45356", "accept": false, - "reason": "Both involve tokenizer regressions, but one is clean_up_tokenization_spaces behavior and the other is Kimi-K2.5 codec/warning handling. Not the same underlying issue." + "reason": "Tokenizer cleanup behavior change vs Kimi codec handling / warning regression; not the same underlying bug." }, { "left": "issue:43723", "right": "issue:44568", "accept": false, - "reason": "Both are tokenizer-related, but one is a load failure and the other is add_special_tokens not adding BOS/EOS. Different code paths." + "reason": "AutoTokenizer loading failure vs BOS/EOS not being added; both tokenizer-related but distinct." }, { "left": "issue:45137", "right": "issue:45276", "accept": false, - "reason": "DeepSpeed ZeRO3 deque errors and gemma4 resize_token_embeddings behavior are unrelated bugs." + "reason": "ZeRO3 deque/index failure vs resize_token_embeddings not updating Gemma4 embeddings; unrelated." }, { "left": "issue:42898", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer regressions in v5, but the reported failure modes differ substantially and likely need different fixes." + "reason": "clean_up_tokenization_spaces regression vs Deepseek tokenizer output regression; different tokenizer bugs." }, { "left": "issue:43874", "right": "issue:45092", "accept": false, - "reason": "Both are multimodal/model compatibility issues, but one is a missing image-patch method and the other is meta-init incompatibility. Not the same bug." + "reason": "Missing image-patch method vs remote-code/meta-init incompatibility; different multimodal issues." }, { "left": "issue:44077", "right": "issue:44242", "accept": false, - "reason": "Different MoE/TSMixer behaviors: optional post_init vs missing load-balancing loss when output_router_logits=False." + "reason": "patchtsmixer post_init API restriction vs MoE load-balancing loss omission; unrelated." }, { "left": "issue:42898", "right": "issue:44448", "accept": false, - "reason": "Both mention v4/v5 tokenizer/output differences, but one is a spacing cleanup regression and the other is Pegasus output drift. Different underlying issues." + "reason": "Tokenization cleanup behavior change vs Pegasus output mismatch; not the same defect." }, { "left": "issue:41628", "right": "issue:45459", "accept": false, - "reason": "An AutoImageProcessor import failure and a protobuf-related tokenizer error-hiding issue are unrelated." + "reason": "Missing AutoImageProcessor import vs protobuf-related tokenizer error masking; different import paths." }, { "left": "issue:44222", "right": "issue:44297", "accept": false, - "reason": "FP8 save_pretrained behavior and tokenizer_class mismatch in tokenizer_config.json are different save paths and different failures." + "reason": "FP8 save_pretrained/MoE issue vs tokenizer_class metadata mismatch; unrelated." + }, + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Phi4MultimodalConfig default initialization bug vs non-persistent buffer junk in v5; different layers of the stack." }, { "left": "issue:42898", "right": "issue:43295", "accept": false, - "reason": "One is a tokenizer spacing regression; the other is a processor.tokenizer / image-passing regression. Different concrete bugs." + "reason": "Tokenization cleanup regression vs processor.tokenizer / image passing regression; separate processor changes." }, { "left": "issue:44442", "right": "issue:45459", "accept": false, - "reason": "FastSpeech2ConformerTokenizer loading and protobuf error masking are different tokenizer issues." + "reason": "FastSpeech2ConformerTokenizer loading failure vs hidden tokenizer errors when protobuf is absent; not a duplicate." }, { - "left": "issue:43479", - "right": "issue:43644", + "left": "issue:44222", + "right": "issue:44623", "accept": false, - "reason": "Phi4MultimodalConfig default initialization and non-persistent buffer junk are unrelated configuration/state bugs." + "reason": "FP8 save_pretrained/MoE bug vs processor.save_pretrained missing files; different save paths and objects." }, { - "left": "issue:44222", + "left": "issue:42831", "right": "issue:44623", "accept": false, - "reason": "Both mention save_pretrained, but one is FP8 MOE serialization and the other is missing processor files. Different artifacts and fixes." + "reason": "FineGrainedFP8 accuracy problem vs processor.save_pretrained file omission; unrelated." }, { "left": "issue:44206", "right": "issue:45103", "accept": false, - "reason": "Feature-extractor arg validation and auto_docstring crashes from future annotations are unrelated." - }, - { - "left": "issue:42831", - "right": "issue:44623", - "accept": false, - "reason": "FineGrainedFP8 accuracy issues and processor.save_pretrained missing files are different bugs." + "reason": "Unsupported center argument in feature extractor vs auto_docstring annotations crash; different failures." }, { "left": "issue:22355", "right": "issue:28282", "accept": false, - "reason": "Both are import errors, but one is missing transformers.onnx and the other is missing PyTorch. Different dependencies and failure modes." + "reason": "Missing transformers.onnx module import vs AutoModel requiring PyTorch; not the same import error." }, { - "left": "issue:41628", - "right": "issue:42757", + "left": "issue:44297", + "right": "issue:45310", "accept": false, - "reason": "Importing AutoImageProcessor and importing is_offline_mode from huggingface_hub are unrelated import problems." + "reason": "Tokenizer config metadata mismatch vs Qwen3.5 MoE from_pretrained error; separate issues." }, { - "left": "issue:44297", - "right": "issue:45310", + "left": "issue:41628", + "right": "issue:42757", "accept": false, - "reason": "Tokenizer save_pretrained class metadata mismatch and Qwen3.5 MoE from_pretrained error are different issues." + "reason": "AutoImageProcessor import failure vs is_offline_mode import failure from huggingface_hub; unrelated imports." }, { "left": "issue:44483", "right": "issue:45406", "accept": false, - "reason": "A chat-completions API request rejection and a Gemma4Processor missing _tokenizer crash are unrelated." + "reason": "chat/completions request handling vs Gemma4Processor missing _tokenizer; different serve failures." }, { "left": "issue:44222", "right": "issue:44484", "accept": false, - "reason": "FP8 save_pretrained behavior and the 50GB default max_shard_size question are not the same bug." + "reason": "FP8 save_pretrained/MoE regression vs max_shard_size default question; not a duplicate." }, { "left": "issue:44263", "right": "issue:44360", "accept": false, - "reason": "Both reference the DSA indexer, but one is torch.split return handling and the other is a missing ReLU. Different code changes." + "reason": "torch.split return values issue vs DSA indexer lacking ReLU; same area, but different concrete bug." }, { - "left": "issue:43723", - "right": "issue:44977", + "left": "issue:44295", + "right": "issue:45459", "accept": false, - "reason": "Tokenizer loading failures and Qwen3.5 flash-attention generation issues are unrelated." + "reason": "position_ids buffer read error vs protobuf import masking tokenizer errors; unrelated." }, { "left": "issue:44263", "right": "issue:45092", "accept": false, - "reason": "DSA indexer behavior and InternVL2 meta initialization compatibility are unrelated." + "reason": "GlmMoeDsaIndexer split behavior vs InternVL2 meta-initialization incompatibility; different components." }, { "left": "issue:44514", "right": "issue:45103", "accept": false, - "reason": "Qwen2_5_VLProcessor chat-template batching and docstring parsing with future annotations are unrelated." + "reason": "Batched apply_chat_template padding bug vs auto_docstring future-annotations crash; unrelated." }, { "left": "issue:44295", "right": "issue:44623", "accept": false, - "reason": "Buffer registration/read errors and processor.save_pretrained missing files are different problems." - }, - { - "left": "issue:43479", - "right": "issue:44913", - "accept": false, - "reason": "Phi4MultimodalConfig default config initialization and GPTNeoX rotary_pct reload behavior are unrelated config bugs." - }, - { - "left": "issue:43723", - "right": "issue:44871", - "accept": false, - "reason": "AutoTokenizer load failure and eos_token_id configuration inconsistency are different tokenizer issues." - }, - { - "left": "issue:43723", - "right": "issue:44991", - "accept": true, - "reason": "Both describe AutoTokenizer.from_pretrained failing to load a tokenizer in v5, so they plausibly share the same loading-regression code path." - }, - { - "left": "issue:44261", - "right": "issue:45092", - "accept": false, - "reason": "MLA layernorm epsilon precision and InternVL2 meta-init incompatibility are unrelated." - }, - { - "left": "issue:30990", - "right": "issue:38175", - "accept": false, - "reason": "Sentence Transformers loading hangs and SIGLIP2 zero-probability outputs are different failures." - }, - { - "left": "issue:43723", - "right": "issue:44442", - "accept": true, - "reason": "Both are AutoTokenizer.from_pretrained load failures in v5 and look like the same tokenizer-loading regression affecting different checkpoints." - }, - { - "left": "issue:44373", - "right": "issue:44485", - "accept": false, - "reason": "A docstring issue for position_ids and GLM-5 RoPE implementation are unrelated." - }, - { - "left": "issue:45092", - "right": "issue:45357", - "accept": false, - "reason": "InternVL2 meta-init incompatibility and Qwen3.5 save_pretrained visual-key serialization are different multimodal bugs." + "reason": "buffer registration/read error vs missing processor save files; different bugs." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The clustering summary is grounded in the packet and stays conservative. The AutoTokenizer loading issues are the only clearly supported duplicate-style subcluster, and the accepted soft edges are limited to closely related tokenizer-loading failures with matching v5 regression framing. The remaining pair rejections are appropriately cautious given the distinct titles and likely different failure modes." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -10757,12 +10607,11 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ - "issue:29127", "issue:30990", "issue:33357", - "issue:36296", + "issue:38175", "issue:39401", "issue:41628", "issue:42371", @@ -10772,324 +10621,282 @@ "issue:42907", "issue:42994", "issue:43066", - "issue:43116", "issue:43122", "issue:43232", "issue:43408", - "issue:43452", "issue:43475", + "issue:43479", "issue:43576", + "issue:43723", "issue:43784", "issue:43824", "issue:43937", "issue:43976", - "issue:43992", "issue:44038", "issue:44117", "issue:44261", "issue:44263", "issue:44295", "issue:44297", - "issue:44360", "issue:44373", + "issue:44442", "issue:44485", "issue:44488", "issue:44514", "issue:44521", - "issue:44625", "issue:44704", "issue:44743", - "issue:44779", - "issue:44829", + "issue:44871", + "issue:44913", "issue:44945", - "issue:45200", + "issue:44991", + "issue:45092", "issue:45245", "issue:45278", "issue:45290", - "issue:45310" + "issue:45310", + "issue:45357" ], "soft_pairs": [ + "issue:43479|issue:44913", + "issue:43723|issue:44871", + "issue:44373|issue:44485", + "issue:44261|issue:45092", + "issue:43723|issue:44991", + "issue:30990|issue:38175", + "issue:43723|issue:44442", + "issue:45092|issue:45357", "issue:42886|issue:42994", - "issue:41628|issue:43824", "issue:42831|issue:45310", - "issue:43576|issue:43784", + "issue:41628|issue:43824", "issue:42831|issue:44038", + "issue:43576|issue:43784", "issue:43122|issue:44521", "issue:42757|issue:45278", "issue:44263|issue:44373", - "issue:42886|issue:43066", "issue:44117|issue:44704", + "issue:42886|issue:43066", "issue:43122|issue:44514", "issue:30990|issue:33357", "issue:44704|issue:45290", "issue:43232|issue:45245", - "issue:42907|issue:44945", "issue:42371|issue:44263", + "issue:42907|issue:44945", "issue:43232|issue:44743", "issue:44295|issue:44297", "issue:43976|issue:44945", "issue:39401|issue:44488", "issue:42907|issue:43937", - "issue:43408|issue:43475", - "issue:44625|issue:44829", - "issue:29127|issue:30990", - "issue:41628|issue:45278", - "issue:44360|issue:44485", - "issue:43116|issue:45200", - "issue:43992|issue:44704", - "issue:39401|issue:44521", - "issue:43122|issue:44779", - "issue:44038|issue:44297", - "issue:44261|issue:44373", - "issue:42371|issue:42831", - "issue:44038|issue:44295", - "issue:36296|issue:43116", - "issue:42907|issue:43452" + "issue:43408|issue:43475" ], - "prepared_review_unit_hash": "14fdfd0c25cfba1ad6786fdb6f7adb07c2c40b6c02283c0c626fa2831046cac5", + "prepared_review_unit_hash": "09652ddf11c18f0946e8e7e087db6ed28985595bd08192c20c6f5ab5045f217e", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 36, - "serialized_chars": 37990, - "estimated_input_tokens": 9498, - "estimated_eval_tokens": 19252 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 35658, + "estimated_input_tokens": 8915, + "estimated_eval_tokens": 18086 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: most items are unrelated bug reports spanning import errors, tokenizer behavior, generation, multimodal chat templates, and model-specific issues. There is no strong duplicate core; at best, one broad import-regression report can serve as a loose representative.", - "confidence": 0.95, - "canonical_issue_reason": "No true duplicate center exists. If one issue must represent the cluster, 45278 is the broadest umbrella because it captures the general post-upgrade import-regression theme seen in a few nearby reports, though the overall set is still too mixed to treat as one bug.", + "summary": "This cluster is mostly a loose theme of Transformers v5 regressions, but the paired items point to different concrete bugs, models, or API surfaces. I would not merge any of the soft-edge pairs as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "Issue 43723 is the best representative issue for the cluster because it captures the broad tokenizer-loading regression theme seen in several nearby reports, even though the overall cluster is not a true duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "45278 is the least-bad representative only because it is the most general and could subsume some import-related complaints; otherwise the cluster does not form a coherent issue duplicate set.", + "best_issue_reason": "43723 is the most generally representative issue title in this set for v5 loading regressions, especially around tokenizer / from_pretrained behavior.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:42886", - "right": "issue:42994", + "left": "issue:43479", + "right": "issue:44913", "accept": false, - "reason": "Different failures: offline tokenizer cache loading vs quantized model saving." + "reason": "Different bugs: default sub-config initialization vs rotary_pct not persisting on reload." }, { - "left": "issue:41628", - "right": "issue:43824", + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Both involve config/loading behavior, but one is a tokenizer-loading regression and the other is an eos_token_id inconsistency in Gemma-3." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Docstring fix for position_ids is unrelated to GLM-5 RoPE implementation behavior." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Different model families and code paths: MLA layernorm epsilon precision issue vs InternVL2 meta-init compatibility." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer load failures, but the reports describe different models and likely different root causes." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence Transformers loading hang and SIGLIP2 zero probabilities are unrelated." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "AutoTokenizer failing for a specific tokenizer is not the same as the broader tokenizer-loading regression in 43723." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen-related, but one is meta initialization incompatibility and the other is save_pretrained writing wrong visual encoder keys." + }, + { + "left": "issue:42886", + "right": "issue:42994", "accept": false, - "reason": "Both are import errors, but they concern different missing symbols and different model APIs." + "reason": "Offline cache tokenizer loading and quantized model saving are different operations and failure modes." }, { "left": "issue:42831", "right": "issue:45310", "accept": false, - "reason": "Accuracy regression in FP8 vs a Qwen3.5 MoE from_pretrained error are different bugs." + "reason": "FineGrainedFP8 accuracy regression is not the same bug as Qwen3.5 from_pretrained loading failure." }, { - "left": "issue:43576", - "right": "issue:43784", + "left": "issue:41628", + "right": "issue:43824", "accept": false, - "reason": "Broken env command and sentence-transformers import NameError are unrelated code paths." + "reason": "Different missing imports for different symbols; same broad API-surface area, but not the same underlying bug." }, { "left": "issue:42831", "right": "issue:44038", "accept": false, - "reason": "FP8 accuracy issue is not the same as a Qwen3-VL-Moe loading problem." + "reason": "These are different Qwen/transformers issues with different symptoms and likely different fixes." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "transformers env command and sentence-transformers import NameError are unrelated." }, { "left": "issue:43122", "right": "issue:44521", "accept": false, - "reason": "Tokenizer offset-mapping differences and multimodal assistant-mask generation are different behaviors." + "reason": "Tokenization differences across versions and all-zero assistant masks are distinct behaviors in different code paths." }, { "left": "issue:42757", "right": "issue:45278", "accept": false, - "reason": "The latter is a broad import-regression report, but it is not clearly the same concrete missing import as this one." + "reason": "A missing huggingface_hub symbol and a broad set of import errors after upgrade are not the same concrete issue." }, { "left": "issue:44263", "right": "issue:44373", "accept": false, - "reason": "torch.split return behavior in an indexer is unrelated to a position_ids docstring problem." + "reason": "torch.split return handling in GlmMoeDsaIndexer is unrelated to a wrong docstring for position_ids." }, { - "left": "issue:42886", - "right": "issue:43066", + "left": "issue:44117", + "right": "issue:44704", "accept": false, - "reason": "Offline cache loading failure is different from tokenizer decoder-type mismatch in v5." + "reason": "TOKENIZER_MAPPING_NAMES returning None and AutoProcessor kwargs forwarding are different loading bugs." }, { - "left": "issue:44117", - "right": "issue:44704", + "left": "issue:42886", + "right": "issue:43066", "accept": false, - "reason": "A tokenizer mapping None bug and missing kwargs in AutoProcessor are different from different call sites." + "reason": "Offline tokenizer cache loading is unrelated to wrong tokenizer decoder type in v5." }, { "left": "issue:43122", "right": "issue:44514", "accept": false, - "reason": "General tokenization differences are not the same as a batched apply_chat_template crash." + "reason": "General tokenization changes are not the same as batched chat template crash with padding=False." }, { "left": "issue:30990", "right": "issue:33357", "accept": false, - "reason": "Sentence-transformers loading hangs and a MacOS bus error are not the same underlying bug." + "reason": "Sentence Transformers loading hang and MacOS bus error on CLIP model are unrelated failures." }, { "left": "issue:44704", "right": "issue:45290", "accept": false, - "reason": "Missing kwargs forwarding is unrelated to assistant-message tool-call crashes in apply_chat_template." + "reason": "AutoProcessor kwargs forwarding and chat_template crash with tool-call messages affect different code paths." }, { "left": "issue:43232", "right": "issue:45245", "accept": false, - "reason": "Generation kwargs update bug and a category-limit runtime error are different issues." + "reason": "Generation kwargs update logic and category-cardinality runtime error are unrelated." }, { - "left": "issue:42907", - "right": "issue:44945", + "left": "issue:42371", + "right": "issue:44263", "accept": false, - "reason": "Saving dequantized models and incorrect pipeline-parallel outputs are different code paths." + "reason": "TF32 settings guidance and GlmMoeDsaIndexer split behavior are unrelated." }, { - "left": "issue:42371", - "right": "issue:44263", + "left": "issue:42907", + "right": "issue:44945", "accept": false, - "reason": "TF32 settings guidance is unrelated to the DSA indexer torch.split issue." + "reason": "Saving dequantized models and incorrect output under pipeline parallelism are different bugs." }, { "left": "issue:43232", "right": "issue:44743", "accept": false, - "reason": "Both touch generation/cache behavior, but one is about sync_gpus kwargs and the other about recurrent state reset; not the same bug." + "reason": "Both touch generation/caching, but one is a sync_gpus kwargs issue and the other is recurrent state reset in modular_qwen3_5.py." }, { "left": "issue:44295", "right": "issue:44297", "accept": false, - "reason": "Reading position_ids as a buffer and tokenizer_class mismatch in saved config are unrelated." + "reason": "Reading position_ids after buffer registration and tokenizer_class mismatch on save_pretrained are unrelated." }, { "left": "issue:43976", "right": "issue:44945", "accept": false, - "reason": "Python version compatibility and pipeline-parallel output corruption are different problems." + "reason": "Python version compatibility and pipeline-parallel output correctness are not the same issue." }, { "left": "issue:39401", "right": "issue:44488", "accept": false, - "reason": "Qwen3 tokenizer offset mapping and failure to load cjvt/sleng-bert are separate tokenizer issues." + "reason": "Offset_mapping bug in Qwen3 tokenizer and failure to load a specific model checkpoint are not the same underlying problem." }, { "left": "issue:42907", "right": "issue:43937", "accept": false, - "reason": "Dequantized model saving and invalid GenerationConfig for GLM-5 are unrelated." + "reason": "Quantized model saving and invalid GenerationConfig for GLM-5 are unrelated." }, { "left": "issue:43408", "right": "issue:43475", "accept": false, - "reason": "A config-type mismatch warning and a missing fpn_position_embeddings attribute are different SAM 3 Video bugs." - }, - { - "left": "issue:44625", - "right": "issue:44829", - "accept": false, - "reason": "num_labels propagation and flash_attention_3 training degeneration are unrelated." - }, - { - "left": "issue:29127", - "right": "issue:30990", - "accept": false, - "reason": "LayoutLMv3 error messaging and Sentence Transformers loading hang are unrelated." - }, - { - "left": "issue:41628", - "right": "issue:45278", - "accept": false, - "reason": "Both mention imports, but one is a specific missing symbol and the other is only a broad report of many import errors." - }, - { - "left": "issue:44360", - "right": "issue:44485", - "accept": false, - "reason": "A missing ReLU in DSA indexer and GLM-5 RoPE implementation are unrelated." - }, - { - "left": "issue:43116", - "right": "issue:45200", - "accept": false, - "reason": "Multi-label classification example behavior and Gemma 4 token-type defaults are different bugs." - }, - { - "left": "issue:43992", - "right": "issue:44704", - "accept": false, - "reason": "UMT5Encoder missing embed_tokens weights is unrelated to AutoProcessor kwargs forwarding." - }, - { - "left": "issue:39401", - "right": "issue:44521", - "accept": false, - "reason": "Tokenizer offset mapping and multimodal assistant masks are distinct failures." - }, - { - "left": "issue:43122", - "right": "issue:44779", - "accept": false, - "reason": "Version-dependent DeepSeek tokenizer output and Qwen tokenization differences are model-specific but not the same bug." - }, - { - "left": "issue:44038", - "right": "issue:44297", - "accept": false, - "reason": "Qwen3-VL-Moe loading issues and tokenizer save config mismatch are unrelated." - }, - { - "left": "issue:44261", - "right": "issue:44373", - "accept": false, - "reason": "MLA layer norm epsilon precision mismatch and a wrong docstring are not the same issue." - }, - { - "left": "issue:42371", - "right": "issue:42831", - "accept": false, - "reason": "TF32 API guidance and FineGrainedFP8 accuracy regression are unrelated." - }, - { - "left": "issue:44038", - "right": "issue:44295", - "accept": false, - "reason": "Qwen3-VL-Moe loading bugs and position_ids buffer access are unrelated." - }, - { - "left": "issue:36296", - "right": "issue:43116", - "accept": false, - "reason": "Tensor-parallel training bug and example-script multi-label output bug are different failure modes." - }, - { - "left": "issue:42907", - "right": "issue:43452", - "accept": false, - "reason": "Quantized model saving and gguf_file loading breaks are different loading/saving paths." + "reason": "Model-type warning on load and missing fpn_position_embeddings are different SAM 3 issues." } ] }, @@ -11111,318 +10918,297 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:22355", "issue:29127", - "issue:33453", + "issue:30990", + "issue:36296", "issue:38617", "issue:39401", + "issue:41628", "issue:42222", "issue:42371", "issue:42673", "issue:42757", + "issue:42831", "issue:42907", "issue:43064", "issue:43116", "issue:43122", "issue:43278", "issue:43441", - "issue:43450", - "issue:43493", + "issue:43452", "issue:43550", "issue:43673", "issue:43784", "issue:43824", - "issue:43825", - "issue:43827", - "issue:43874", - "issue:43906", - "issue:43931", "issue:43937", - "issue:43976", - "issue:43986", + "issue:43992", "issue:44038", - "issue:44075", "issue:44222", - "issue:44291", + "issue:44261", + "issue:44295", + "issue:44297", "issue:44351", + "issue:44360", + "issue:44373", "issue:44485", + "issue:44521", + "issue:44625", + "issue:44704", "issue:44779", + "issue:44829", "issue:44912", - "issue:44933", - "issue:44936", "issue:44945", "issue:44977", - "issue:45071", "issue:45072", - "issue:45083", + "issue:45200", "issue:45216", "issue:45278", "issue:45446", "issue:45478" ], "soft_pairs": [ + "issue:44625|issue:44829", + "issue:29127|issue:30990", + "issue:41628|issue:45278", + "issue:44360|issue:44485", + "issue:43116|issue:45200", + "issue:39401|issue:44521", + "issue:44038|issue:44297", + "issue:43122|issue:44779", + "issue:44261|issue:44373", + "issue:42371|issue:42831", + "issue:44038|issue:44295", + "issue:42907|issue:43452", + "issue:43992|issue:44704", + "issue:36296|issue:43116", "issue:44038|issue:44222", - "issue:43824|issue:44351", "issue:43122|issue:44977", - "issue:45216|issue:45478", "issue:43278|issue:44945", - "issue:42757|issue:44351", + "issue:45216|issue:45478", + "issue:43824|issue:44351", "issue:42907|issue:44222", + "issue:42757|issue:44351", "issue:42907|issue:44912", "issue:42222|issue:43441", "issue:43784|issue:45446", "issue:42673|issue:45072", "issue:43064|issue:43673", - "issue:43784|issue:44933", - "issue:38617|issue:45278", "issue:43550|issue:43784", - "issue:43937|issue:44485", + "issue:38617|issue:45278", "issue:22355|issue:29127", - "issue:43116|issue:43493", - "issue:42371|issue:43874", - "issue:44075|issue:44936", - "issue:44222|issue:45478", - "issue:43116|issue:43825", - "issue:44038|issue:45478", - "issue:43976|issue:45446", - "issue:39401|issue:44779", - "issue:43931|issue:44945", - "issue:33453|issue:39401", - "issue:45083|issue:45446", - "issue:43937|issue:44977", - "issue:42673|issue:43906", - "issue:43827|issue:45446", - "issue:44291|issue:45071", - "issue:43450|issue:43986" + "issue:43937|issue:44485" ], - "prepared_review_unit_hash": "dd3adb78f27fe6e2c5f0c7b266b785ea27c681b4f64a9ffc2f61303ffb617d41", + "prepared_review_unit_hash": "02fef8d29295dea7d9c150fa3e082e69e8aaee1c808fe7307d971b1d6117ea8d", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37172, - "estimated_input_tokens": 9293, - "estimated_eval_tokens": 18842 + "soft_pair_count": 30, + "serialized_chars": 36239, + "estimated_input_tokens": 9060, + "estimated_eval_tokens": 18376 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is highly heterogeneous: most pairs are only loosely related by broad areas like imports, tokenizers, or Qwen/GLM model families, but not the same concrete bug. I found no soft-edge pair I\u2019d confidently merge as duplicates.", - "confidence": 0.34, - "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set ('many import errors after update from 4.57.0 to 5.5.0') and is the best fit as a representative issue among otherwise mostly unrelated regressions.", + "summary": "No true duplicate pairs stand out here; this is a noisy mix of unrelated Transformers bugs. I selected the broadest umbrella issue as the best representative of the cluster.", + "confidence": 0.62, + "canonical_issue_reason": "Issue 45278 is the broadest umbrella in the set, covering multiple import errors after upgrading to 5.5.0, so it is the closest representative issue for this noisy cluster.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the most general and recent issue, with wording broad enough to cover a subset of the import-regression reports; the other issues are mostly narrower, model-specific, or symptom-specific.", + "best_issue_reason": "Issue 45278 is the most general and cluster-representative report; it can subsume several of the import-error style items better than the more specific bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44038", - "right": "issue:44222", - "accept": false, - "reason": "Both mention MoE models, but one is a Qwen3-VL-Moe flash-attention/generation bug and the other is an FP8 save_pretrained issue; different code paths and symptoms." - }, - { - "left": "issue:43824", - "right": "issue:44351", - "accept": false, - "reason": "Both are import errors, but they are for different missing symbols in different modules, so they do not look like the same underlying bug." - }, - { - "left": "issue:43122", - "right": "issue:44977", - "accept": false, - "reason": "Tokenizer regression vs flash-attention generation failure; different models, APIs, and failure modes." - }, - { - "left": "issue:45216", - "right": "issue:45478", + "left": "issue:44625", + "right": "issue:44829", "accept": false, - "reason": "Same model family/version window, but one is a save_pretrained checkpoint regression and the other is a from_pretrained load error; related, but not clearly the same concrete bug." + "reason": "Different model/task bugs: num_labels propagation vs flash-attention training collapse." }, { - "left": "issue:43278", - "right": "issue:44945", + "left": "issue:29127", + "right": "issue:30990", "accept": false, - "reason": "A dtype mismatch in train/eval versus an incorrect output under pipeline parallelism; too different to be the same bug." + "reason": "Different subsystems and symptoms: layoutlmv3 error messaging vs sentence-transformers loading hang." }, { - "left": "issue:42757", - "right": "issue:44351", + "left": "issue:41628", + "right": "issue:45278", "accept": false, - "reason": "Both are import failures, but from different packages and symbols; no clear shared root cause." + "reason": "Both are import-related, but one is a specific missing symbol and the other is a broad multi-import regression; not the same concrete bug." }, { - "left": "issue:42907", - "right": "issue:44222", + "left": "issue:44360", + "right": "issue:44485", "accept": false, - "reason": "Both involve saving quantized models, but they concern different quantization schemes and different model families." + "reason": "Unrelated topics: DSA indexer/ReLU vs GLM-5 RoPE implementation." }, { - "left": "issue:42907", - "right": "issue:44912", + "left": "issue:43116", + "right": "issue:45200", "accept": false, - "reason": "Saving a dequantized model and loading a model with MXFP4 are distinct behaviors and likely different bugs." + "reason": "Different functionality: example-script multi-label empty results vs Gemma 4 multimodal token-id defaults." }, { - "left": "issue:42222", - "right": "issue:43441", + "left": "issue:39401", + "right": "issue:44521", "accept": false, - "reason": "VitPose model breakage and Ministral flash-attention failure are unrelated model-specific regressions." + "reason": "Different tokenizer bugs: offset_mapping for Qwen3 vs multimodal assistant_masks in apply_chat_template." }, { - "left": "issue:43784", - "right": "issue:45446", + "left": "issue:44038", + "right": "issue:44297", "accept": false, - "reason": "Importing sentence-transformers fails on a missing name, while the other is a bad PyTorch version check for a different import path." + "reason": "One is a Qwen3-VL-Moe model issue; the other is tokenizer save metadata mismatch. Different bugs." }, { - "left": "issue:42673", - "right": "issue:45072", + "left": "issue:43122", + "right": "issue:44779", "accept": false, - "reason": "VRAM leak in Qwen3ForCausalLM multi-thread use versus dtype mismatches in unrelated models; not the same problem." + "reason": "Both mention tokenization regressions, but they affect different models and likely different code paths." }, { - "left": "issue:43064", - "right": "issue:43673", + "left": "issue:44261", + "right": "issue:44373", "accept": false, - "reason": "Training optimizer-state corruption with FSDP/PEFT versus generation cache missing during chunked prefill are separate subsystems." + "reason": "Precision/config bug vs a docstring issue; not the same underlying change." }, { - "left": "issue:43784", - "right": "issue:44933", + "left": "issue:42371", + "right": "issue:42831", "accept": false, - "reason": "Different missing imports in different helpers; no evidence of a shared underlying regression." + "reason": "TF32 API guidance and FineGrainedFP8 accuracy are unrelated." }, { - "left": "issue:38617", - "right": "issue:45278", + "left": "issue:44038", + "right": "issue:44295", "accept": false, - "reason": "45278 is a broad umbrella report, but 38617 is one specific missing import; this is not enough to treat them as the same bug." + "reason": "Different failures: Qwen3-VL-Moe compatibility vs position_ids buffer access." }, { - "left": "issue:43550", - "right": "issue:43784", + "left": "issue:42907", + "right": "issue:43452", "accept": false, - "reason": "torch.compile/SDPA failure versus a NameError on import; unrelated failures." + "reason": "Both involve save/load workflows, but one is dequantized MoE saving and the other is gguf_file loading with Auto* APIs." }, { - "left": "issue:43937", - "right": "issue:44485", + "left": "issue:43992", + "right": "issue:44704", "accept": false, - "reason": "Both mention GLM-5, but one is a generation config validation error and the other is a RoPE implementation discussion; not the same code-path." + "reason": "Different call sites and bugs: missing embed_tokens.weight in UMT5 vs kwargs not forwarded in AutoProcessor." }, { - "left": "issue:22355", - "right": "issue:29127", + "left": "issue:36296", + "right": "issue:43116", "accept": false, - "reason": "A missing module import and a layoutlmv3 box-validation error are unrelated." + "reason": "Tensor-parallel training bug is unrelated to the multi-label classification example issue." }, { - "left": "issue:43116", - "right": "issue:43493", + "left": "issue:44038", + "right": "issue:44222", "accept": false, - "reason": "A multi-label classification example bug and a SigLIP2 implementation discrepancy are unrelated." + "reason": "Both touch MoE/FP8 areas, but one is a general Qwen3-VL-Moe issue and the other is an FP8 save_pretrained bug; not clearly the same bug." }, { - "left": "issue:42371", - "right": "issue:43874", + "left": "issue:43122", + "right": "issue:44977", "accept": false, - "reason": "TF32 API guidance and a GLM46V image processor AttributeError are different topics." + "reason": "Both are tokenizer regressions in v5, but they concern different models and do not look like one concrete fix." }, { - "left": "issue:44075", - "right": "issue:44936", + "left": "issue:43278", + "right": "issue:44945", "accept": false, - "reason": "Unused SGD args in optimizer setup versus evaluate() failing after train() are different bugs." + "reason": "Embedding dtype mismatch during evaluate is unrelated to incorrect outputs under pipeline parallelism." }, { - "left": "issue:44222", + "left": "issue:45216", "right": "issue:45478", "accept": false, - "reason": "Both involve Qwen3.5 MoE and save/load, but the titles point to different failure modes and not clearly the same concrete defect." + "reason": "Same model family, but one is a save_pretrained regression and the other is a from_pretrained load error; different code paths." }, { - "left": "issue:43116", - "right": "issue:43825", + "left": "issue:43824", + "right": "issue:44351", "accept": false, - "reason": "Multi-label classification output bug versus pipeline() translation messaging after v5 removals; unrelated." + "reason": "Two separate import errors for different symbols; broad API breakage, but not the same underlying bug." }, { - "left": "issue:44038", - "right": "issue:45478", + "left": "issue:42907", + "right": "issue:44222", "accept": false, - "reason": "Both are Qwen3/Qwen3.5 MoE-related, but one is a flash-attention generation bug and the other is a loading error; too different." + "reason": "Both are save_pretrained-related MoE/quantization reports, but the model and failure modes differ too much to treat as duplicates." }, { - "left": "issue:43976", - "right": "issue:45446", + "left": "issue:42757", + "right": "issue:44351", "accept": false, - "reason": "Python-version support breakage and an AuxRequest version-check bug are not the same issue." + "reason": "Different missing imports from different packages/symbols." }, { - "left": "issue:39401", - "right": "issue:44779", + "left": "issue:42907", + "right": "issue:44912", "accept": false, - "reason": "Both are tokenizer correctness issues, but they affect different tokenizers and different regressions." + "reason": "Save/dequantize issues versus MXFP4 load fallback are different problems." }, { - "left": "issue:43931", - "right": "issue:44945", + "left": "issue:42222", + "right": "issue:43441", "accept": false, - "reason": "Model shape-mismatch loading error versus incorrect output under pipeline parallelism; separate problems." + "reason": "Vitpose model breakage is unrelated to Ministral-3 FlashAttention failures." }, { - "left": "issue:33453", - "right": "issue:39401", + "left": "issue:43784", + "right": "issue:45446", "accept": false, - "reason": "A generic tokenizer-loading regression and a Qwen3 offset_mapping bug are not clearly the same underlying defect." + "reason": "Importing sentence-transformers NameError is unrelated to the flex_attention version-check bug." }, { - "left": "issue:45083", - "right": "issue:45446", + "left": "issue:42673", + "right": "issue:45072", "accept": false, - "reason": "A Qwen3 Omni helper-function bug and an unrelated flex_attention version-check issue." + "reason": "VRAM leak in Qwen3ForCausalLM threads is unrelated to dtype mismatches in inference." }, { - "left": "issue:43937", - "right": "issue:44977", + "left": "issue:43064", + "right": "issue:43673", "accept": false, - "reason": "GLM-5 generation-config invalidity and Qwen3.5 flash-attention output problems are different model-specific bugs." + "reason": "Different training/runtime areas: FSDP2 optimizer state corruption vs generation cache missing in chunked prefill." }, { - "left": "issue:42673", - "right": "issue:43906", + "left": "issue:43550", + "right": "issue:43784", "accept": false, - "reason": "A VRAM leak and an isolated reproduction of another issue are not enough to conclude duplication." + "reason": "torch.compile/SDPA model failure is unrelated to the sentence-transformers import NameError." }, { - "left": "issue:43827", - "right": "issue:45446", + "left": "issue:38617", + "right": "issue:45278", "accept": false, - "reason": "Docs still referencing pipeline() and a PyTorch version-check bug are unrelated." + "reason": "Both are import errors, but one is a specific missing internal symbol and the other is a broad upgrade regression; not a duplicate pair." }, { - "left": "issue:44291", - "right": "issue:45071", + "left": "issue:22355", + "right": "issue:29127", "accept": false, - "reason": "An init_empty_weights argument error and a PretrainedConfig type-check regression are different load-time issues." + "reason": "No overlap in bug area: ONNX import failure vs LayoutLMv3 error-message clarity." }, { - "left": "issue:43450", - "right": "issue:43986", + "left": "issue:43937", + "right": "issue:44485", "accept": false, - "reason": "A batched video-processor shape bug and a missing-torchvision import crash are different problems." + "reason": "GLM-5 generation config validation and GLM-5 RoPE implementation are related only by model name, not the same bug." } ] }, @@ -11444,311 +11230,296 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ + "issue:33453", "issue:38617", "issue:39401", "issue:41628", - "issue:42175", "issue:42222", "issue:42371", - "issue:42617", "issue:42673", - "issue:42994", - "issue:43012", - "issue:43066", - "issue:43097", - "issue:43122", + "issue:43116", "issue:43408", "issue:43421", - "issue:43441", - "issue:43452", + "issue:43450", + "issue:43493", "issue:43577", "issue:43582", - "issue:43673", "issue:43742", + "issue:43825", + "issue:43827", "issue:43874", + "issue:43906", + "issue:43931", "issue:43937", + "issue:43976", "issue:43986", + "issue:44038", + "issue:44075", "issue:44117", "issue:44220", + "issue:44222", "issue:44261", - "issue:44263", + "issue:44291", "issue:44410", - "issue:44483", "issue:44484", - "issue:44485", - "issue:44493", "issue:44496", + "issue:44779", "issue:44829", - "issue:44933", - "issue:45003", + "issue:44936", + "issue:44945", + "issue:44977", "issue:45071", "issue:45081", + "issue:45083", "issue:45216", - "issue:45278", "issue:45310", - "issue:45341", "issue:45405", "issue:45446", - "issue:45459", - "issue:45464", - "issue:45478" + "issue:45478", + "issue:45542" ], "soft_pairs": [ + "issue:43116|issue:43493", + "issue:42371|issue:43874", + "issue:44075|issue:44936", + "issue:44222|issue:45478", + "issue:43116|issue:43825", + "issue:44038|issue:45478", + "issue:43976|issue:45446", + "issue:39401|issue:44779", + "issue:43931|issue:44945", + "issue:33453|issue:39401", + "issue:45083|issue:45446", + "issue:43937|issue:44977", + "issue:43827|issue:45446", + "issue:42673|issue:43906", + "issue:44291|issue:45071", + "issue:43450|issue:43986", "issue:45405|issue:45446", - "issue:44484|issue:45310", "issue:44829|issue:45216", + "issue:44484|issue:45310", "issue:39401|issue:43421", "issue:38617|issue:41628", "issue:44117|issue:45081", "issue:42673|issue:43582", "issue:44410|issue:45310", "issue:43408|issue:43742", - "issue:38617|issue:44220", "issue:42673|issue:43577", - "issue:43937|issue:44496", + "issue:42222|issue:45542", + "issue:38617|issue:44220", "issue:42371|issue:44261", - "issue:42175|issue:42222", - "issue:44483|issue:45464", - "issue:42617|issue:45341", - "issue:44933|issue:45278", - "issue:44484|issue:45478", - "issue:43066|issue:43122", - "issue:43673|issue:45341", - "issue:43097|issue:44493", - "issue:42222|issue:45459", - "issue:44410|issue:45478", - "issue:43986|issue:44484", - "issue:43441|issue:44829", - "issue:43874|issue:44261", - "issue:43012|issue:43408", - "issue:42222|issue:42994", - "issue:44263|issue:44485", - "issue:41628|issue:45071", - "issue:45003|issue:45341", - "issue:43097|issue:43452" + "issue:43937|issue:44496" ], - "prepared_review_unit_hash": "fb7ed03007c318f3a89a1333ceb7c44436eba1347066e46caf17f70979935023", + "prepared_review_unit_hash": "c4d9d1f92af415baa20bd048b2ecb62fce42c9b599670e7a70da04b83a226089", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 48, - "item_count": 48, - "soft_pair_count": 32, - "serialized_chars": 37000, - "estimated_input_tokens": 9250, - "estimated_eval_tokens": 18756 + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36030, + "estimated_input_tokens": 9008, + "estimated_eval_tokens": 18272 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is highly heterogeneous: most items are unrelated import, tokenizer, model-loading, backend, or API issues. There is no strong single duplicate cluster here; at best, issue 45278 is a loose umbrella for import regressions, but it does not explain the rest.", - "confidence": 0.24, - "canonical_issue_reason": "Issue 45278 is the broadest umbrella candidate because it reports widespread import errors after an upgrade, but the cluster is too mixed for a true canonical issue.", + "summary": "This cluster is mostly a false-positive grouping: the items share broad themes like tokenizers, Qwen, loading, or v5 regressions, but the concrete bugs and code paths are different. No soft edge looks safe to merge, and there are no PRs in the set.", + "confidence": 0.83, + "canonical_issue_reason": "No single canonical issue emerges; the set is a loose mix of unrelated bugs, so any canonical choice would be arbitrary.", "canonical_pr_reason": null, - "best_issue_reason": "45278 is the closest thing to a cluster anchor due to its broad import-regression framing and open status, though it only fits a small subset of the items.", + "best_issue_reason": "If forced, issue:45216 is the closest thing to a representative item because it is a concrete version-bound regression report, but it does not unify the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:45405", - "right": "issue:45446", + "left": "issue:43116", + "right": "issue:43493", "accept": false, - "reason": "Different problems: PEFT version pinning vs a PyTorch-version guard bug in flex_attention." + "reason": "Different bugs and models: example script multi-label output vs SigLIP2 implementation discrepancy." }, { - "left": "issue:44484", - "right": "issue:45310", + "left": "issue:42371", + "right": "issue:43874", "accept": false, - "reason": "Unrelated: save_pretrained shard size question vs Qwen3.5 MoE from_pretrained failure." + "reason": "Unrelated topics: TF32 settings vs a missing GLM46V image-processor method." }, { - "left": "issue:44829", - "right": "issue:45216", + "left": "issue:44075", + "right": "issue:44936", "accept": false, - "reason": "Different code paths and symptoms: flash_attention_3 training degeneration vs a save_pretrained regression." + "reason": "Different failure modes: optimizer args ignored vs trainer.evaluate() failing after train()." }, { - "left": "issue:39401", - "right": "issue:43421", + "left": "issue:44222", + "right": "issue:45478", "accept": false, - "reason": "Both tokenizer-related, but one is offset_mapping on Qwen3 and the other is runtime post-processor updates; not the same bug." + "reason": "Both mention MoE, but one is FP8 save_pretrained and the other is a from_pretrained loading error; not the same concrete bug." }, { - "left": "issue:38617", - "right": "issue:41628", + "left": "issue:43116", + "right": "issue:43825", "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and likely different root causes." + "reason": "Example-script classification bug vs pipeline deprecation/error-message issue; unrelated paths." }, { - "left": "issue:44117", - "right": "issue:45081", + "left": "issue:44038", + "right": "issue:45478", "accept": false, - "reason": "Tokenizer loading failures, but one is a mapping-None assumption and the other is a Mistral regex patch crash." + "reason": "Same family name only; the reported symptoms and likely code paths are different." }, { - "left": "issue:42673", - "right": "issue:43582", + "left": "issue:43976", + "right": "issue:45446", "accept": false, - "reason": "Different failures: VRAM leak in dataloader threads vs Apple Silicon TypeError in allocator warmup." + "reason": "Python version compatibility issue vs a PyTorch version check bug; unrelated." }, { - "left": "issue:44410", - "right": "issue:45310", + "left": "issue:39401", + "right": "issue:44779", "accept": false, - "reason": "Different model families and errors: missing qwen3next projections vs Qwen3.5 MoE load failure." + "reason": "Both tokenizer issues, but different models and distinct symptoms (offset mapping vs incorrect results)." }, { - "left": "issue:43408", - "right": "issue:43742", + "left": "issue:43931", + "right": "issue:44945", "accept": false, - "reason": "Both are model loading issues, but the specific models and failure modes are unrelated." + "reason": "Model weight-shape mismatch vs pipeline parallelism producing wrong outputs; different problems." }, { - "left": "issue:38617", - "right": "issue:44220", + "left": "issue:33453", + "right": "issue:39401", "accept": false, - "reason": "Different import/symbol issue vs a feature-extraction function bug." + "reason": "Generic tokenizer loading regression vs Qwen3 offset_mapping bug; not the same underlying defect." }, { - "left": "issue:42673", - "right": "issue:43577", + "left": "issue:45083", + "right": "issue:45446", "accept": false, - "reason": "VRAM leak in multithreaded loading is unrelated to Blip2 dtype propagation." + "reason": "Feature-extraction helper behavior vs a flex_attention import/version check; unrelated." }, { "left": "issue:43937", - "right": "issue:44496", - "accept": false, - "reason": "Both involve model/config validation, but one is GLM-5 GenerationConfig and the other is an unrecognized model/config.json issue." - }, - { - "left": "issue:42371", - "right": "issue:44261", + "right": "issue:44977", "accept": false, - "reason": "TF32 settings warning vs MLA q_a_layernorm precision/config issue are different changes." + "reason": "GenerationConfig validation error vs flash-attention generation malfunction; different code paths." }, { - "left": "issue:42175", - "right": "issue:42222", + "left": "issue:43827", + "right": "issue:45446", "accept": false, - "reason": "Backend packaging/install issue vs vitpose model breakage; not the same underlying bug." + "reason": "Docs still mentioning pipeline() vs a runtime import/version check bug; unrelated." }, { - "left": "issue:44483", - "right": "issue:45464", + "left": "issue:42673", + "right": "issue:43906", "accept": false, - "reason": "General chat/completions request rejection vs Qwen3.5 streaming inference failure; different concrete failures." + "reason": "VRAM leak in multithreaded inference vs an isolated reproduction of a different issue; no evidence they are the same bug." }, { - "left": "issue:42617", - "right": "issue:45341", + "left": "issue:44291", + "right": "issue:45071", "accept": false, - "reason": "Unrelated: 3d_parallel.py execution problem vs a testing_utils bug." + "reason": "init_empty_weights/_is_hf_initialized TypeError vs PretrainedConfig type-checking breakage; different failures." }, { - "left": "issue:44933", - "right": "issue:45278", + "left": "issue:43450", + "right": "issue:43986", "accept": false, - "reason": "45278 is broad import regressions, but 44933 is a specific missing import; not enough evidence they are the same defect." + "reason": "Batched video processor shape bug vs crash from missing torchvision during AutoProcessor loading; unrelated." }, { - "left": "issue:44484", - "right": "issue:45478", + "left": "issue:45405", + "right": "issue:45446", "accept": false, - "reason": "Completely different topics: shard sizing vs Qwen3.5 MoE from_pretrained regression." + "reason": "Released-version bump concern vs PyTorch import/version check bug; unrelated." }, { - "left": "issue:43066", - "right": "issue:43122", + "left": "issue:44829", + "right": "issue:45216", "accept": false, - "reason": "Both tokenizer behavior changes in v5, but one is decoder type and the other is tokenization output drift; not the same bug." + "reason": "Training degeneracy under flash_attention_3 vs a save_pretrained checkpoint regression; different behaviors." }, { - "left": "issue:43673", - "right": "issue:45341", + "left": "issue:44484", + "right": "issue:45310", "accept": false, - "reason": "Generation cache/chunked_prefill regression is unrelated to a testing utility bug." + "reason": "save_pretrained shard-size question vs Qwen3.5 MoE from_pretrained error; unrelated." }, { - "left": "issue:43097", - "right": "issue:44493", + "left": "issue:39401", + "right": "issue:43421", "accept": false, - "reason": "Removed tie_embeddings API vs position-id unexpected key warnings are different issues." + "reason": "Qwen3 offset mapping bug vs runtime post-processor update behavior; related area only, not same bug." }, { - "left": "issue:42222", - "right": "issue:45459", + "left": "issue:38617", + "right": "issue:41628", "accept": false, - "reason": "Vitpose model breakage is unrelated to tokenizer error masking when protobuf is absent." + "reason": "Different import errors with different missing symbols and likely different fixes." }, { - "left": "issue:44410", - "right": "issue:45478", + "left": "issue:44117", + "right": "issue:45081", "accept": false, - "reason": "Same broad Qwen area, but different model variant and distinct failure modes." + "reason": "Tokenizer registry None handling vs Mistral regex patch crash; different root causes." }, { - "left": "issue:43986", - "right": "issue:44484", + "left": "issue:42673", + "right": "issue:43582", "accept": false, - "reason": "AutoProcessor without torchvision and shard-size discussion are unrelated." + "reason": "VRAM leak in model use vs Apple Silicon TypeError in warmup helper; unrelated." }, { - "left": "issue:43441", - "right": "issue:44829", + "left": "issue:44410", + "right": "issue:45310", "accept": false, - "reason": "Ministral FlashAttention failure and flash_attention_3 training degeneration are not the same code-path problem." + "reason": "Missing projections in qwen3next vs Qwen3.5 MoE loading error; not the same defect." }, { - "left": "issue:43874", - "right": "issue:44261", + "left": "issue:43408", + "right": "issue:43742", "accept": false, - "reason": "Missing image patch-count method vs MLA layernorm epsilon precision issue are unrelated." + "reason": "SAM3 tracker/video model warning vs MobileLLM loading key error; unrelated." }, { - "left": "issue:43012", - "right": "issue:43408", + "left": "issue:42673", + "right": "issue:43577", "accept": false, - "reason": "bfloat16 compilation warning is unrelated to a model-type mismatch warning." + "reason": "VRAM leak vs incorrect dtype propagation in Blip2 loading; different bugs." }, { "left": "issue:42222", - "right": "issue:42994", + "right": "issue:45542", "accept": false, - "reason": "Vitpose model breakage and quantized model saving failure are different bugs." + "reason": "VitPose model breakage vs undefined tf backend error; no connection." }, { - "left": "issue:44263", - "right": "issue:44485", - "accept": false, - "reason": "torch.split return-value issue is unrelated to GLM-5 RoPE implementation." - }, - { - "left": "issue:41628", - "right": "issue:45071", + "left": "issue:38617", + "right": "issue:44220", "accept": false, - "reason": "Both involve imports/config typing, but the actual failures are different and not a shared concrete bug." + "reason": "ImportError for a config symbol vs an audio feature-extraction helper issue; unrelated." }, { - "left": "issue:45003", - "right": "issue:45341", + "left": "issue:42371", + "right": "issue:44261", "accept": false, - "reason": "Unsafe sys.modules access and a testing_utils bug are unrelated." + "reason": "TF32 behavior settings vs missing rms_norm_eps in MLA layernorm; different subsystems." }, { - "left": "issue:43097", - "right": "issue:43452", + "left": "issue:43937", + "right": "issue:44496", "accept": false, - "reason": "Tie-embeddings API removal and gguf_file tokenizer/model loading failures are different regressions." + "reason": "Invalid generation config vs unrecognized model/config loading failure; not the same bug." } ] }, @@ -11770,325 +11541,332 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:38617", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42617", "issue:42757", - "issue:42886", "issue:43012", "issue:43064", "issue:43066", "issue:43097", + "issue:43122", "issue:43335", - "issue:43352", - "issue:43381", "issue:43408", - "issue:43502", + "issue:43441", + "issue:43452", "issue:43531", - "issue:43550", - "issue:43582", - "issue:43825", - "issue:43873", + "issue:43673", "issue:43874", - "issue:43901", - "issue:43906", "issue:43931", "issue:43976", "issue:43986", - "issue:43992", "issue:44038", - "issue:44062", "issue:44117", "issue:44261", "issue:44263", "issue:44315", - "issue:44351", "issue:44368", - "issue:44380", "issue:44410", - "issue:44464", + "issue:44483", "issue:44484", "issue:44485", - "issue:44655", + "issue:44493", "issue:44704", "issue:44829", + "issue:44933", "issue:44987", + "issue:45003", "issue:45071", "issue:45103", "issue:45245", - "issue:45290", + "issue:45278", "issue:45341", "issue:45372", - "issue:45375" + "issue:45375", + "issue:45459", + "issue:45464", + "issue:45478", + "issue:45542" ], "soft_pairs": [ - "issue:44117|issue:44987", + "issue:44483|issue:45464", + "issue:42175|issue:42222", + "issue:42617|issue:45341", + "issue:44933|issue:45278", + "issue:44484|issue:45478", + "issue:43066|issue:43122", + "issue:43673|issue:45341", + "issue:43097|issue:44493", + "issue:42222|issue:45459", + "issue:44410|issue:45478", + "issue:43874|issue:44261", + "issue:43441|issue:44829", + "issue:43986|issue:44484", + "issue:43012|issue:43408", + "issue:44263|issue:44485", + "issue:41628|issue:45071", + "issue:45071|issue:45542", + "issue:45003|issue:45341", + "issue:43097|issue:43452", "issue:43986|issue:45341", + "issue:44117|issue:44987", "issue:43931|issue:44410", "issue:43335|issue:45071", "issue:43408|issue:44368", "issue:43931|issue:44038", - "issue:43986|issue:45372", - "issue:44829|issue:45245", "issue:43874|issue:44263", + "issue:44829|issue:45245", + "issue:43986|issue:45372", "issue:43976|issue:45071", - "issue:44464|issue:45071", "issue:43531|issue:45341", "issue:43064|issue:44315", "issue:38617|issue:42757", "issue:44261|issue:44485", "issue:44410|issue:45375", - "issue:44704|issue:45103", - "issue:44062|issue:44117", - "issue:43502|issue:43992", - "issue:43986|issue:44117", - "issue:38617|issue:44351", - "issue:43582|issue:45341", - "issue:44038|issue:44410", - "issue:43097|issue:43873", - "issue:43992|issue:44484", - "issue:43986|issue:45290", - "issue:43066|issue:43906", - "issue:38617|issue:43097", - "issue:43825|issue:44655", - "issue:43352|issue:44380", - "issue:43097|issue:43901", - "issue:43012|issue:43550", - "issue:43012|issue:44368", - "issue:42886|issue:43381" + "issue:44704|issue:45103" ], - "prepared_review_unit_hash": "f7035d976bd0d823f6901e2d3160f384b6f5441330457bad00a45a4d32ca62c1", + "prepared_review_unit_hash": "7407466d1c0568032398e29de58392d79961549310deaeff42c48d9fb4813de7", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 34, - "serialized_chars": 37887, - "estimated_input_tokens": 9472, - "estimated_eval_tokens": 19200 + "soft_pair_count": 35, + "serialized_chars": 38050, + "estimated_input_tokens": 9513, + "estimated_eval_tokens": 19282 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is not a true duplicate set: the items span unrelated import errors, model/config bugs, loading/offline issues, training problems, and documentation mismatches. I would not merge any of the soft-edge pairs.", - "confidence": 0.98, - "canonical_issue_reason": "No clear canonical issue: the items do not describe the same underlying bug or change, so selecting one representative would be misleading.", + "summary": "The cluster is mostly a set of similarly worded but distinct regressions/import failures. I would not merge any of the soft-edge pairs; the best triage anchor is the broad import-regression report, not a true duplicate set.", + "confidence": 0.89, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No issue in this set is a good global representative because the cluster is heterogeneous and not centered on one defect.", + "best_issue_reason": "issue:45278 is the broadest, most umbrella-style report of upgrade-related import failures, so it is the best triage anchor, but it does not duplicate the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44117", - "right": "issue:44987", + "left": "issue:44483", + "right": "issue:45464", "accept": false, - "reason": "Different failures: one is TOKENIZER_MAPPING_NAMES/from_pretrained handling, the other is loading a specific model on transformers>=5.1.0." + "reason": "Both mention chat/completions failures, but one is a generic v5.3 API break and the other is Qwen3.5-0.8B streaming behavior; not the same bug." }, { - "left": "issue:43986", - "right": "issue:45341", + "left": "issue:42175", + "right": "issue:42222", "accept": false, - "reason": "Unrelated areas: video-model loading without torchvision vs a bug in testing_utils.py." + "reason": "TensorFlow backend packaging vs broken VitPose model files; unrelated problems." }, { - "left": "issue:43931", - "right": "issue:44410", + "left": "issue:42617", + "right": "issue:45341", "accept": false, - "reason": "Different model-loading problems for different architectures and symptoms; not the same code path." + "reason": "A script execution failure and a testing utility bug are unrelated." }, { - "left": "issue:43335", - "right": "issue:45071", + "left": "issue:44933", + "right": "issue:45278", "accept": false, - "reason": "SwitchTransformers sparse-layer config bug is unrelated to PretrainedConfig type checking." + "reason": "A specific missing import from image_utils is not the same as a broad post-upgrade import-error report." }, { - "left": "issue:43408", - "right": "issue:44368", + "left": "issue:44484", + "right": "issue:45478", "accept": false, - "reason": "Different model families and warnings: sam3 tracker mismatch vs tie_word_embeddings warning in Qwen3.5 fine-tuning." + "reason": "Save/sharding behavior and Qwen3.5-Moe loading errors are different code paths." }, { - "left": "issue:43931", - "right": "issue:44038", + "left": "issue:43066", + "right": "issue:43122", "accept": false, - "reason": "Both involve model loading, but for different models and distinct failures; not one underlying bug." + "reason": "Both are tokenizer regressions in v5, but one is decoder-type metadata and the other is changed tokenization output; too different to merge." }, { - "left": "issue:43986", - "right": "issue:45372", + "left": "issue:43673", + "right": "issue:45341", "accept": false, - "reason": "Different missing-dependency/import issues in separate processor paths." + "reason": "Generation cache behavior and a testing utility bug are unrelated." }, { - "left": "issue:44829", - "right": "issue:45245", + "left": "issue:43097", + "right": "issue:44493", "accept": false, - "reason": "FlashAttention 3 training degeneration is unrelated to the categorical limit RuntimeError." + "reason": "Removed config field vs unexpected position-id keys are different symptoms and code paths." }, { - "left": "issue:43874", - "right": "issue:44263", + "left": "issue:42222", + "right": "issue:45459", "accept": false, - "reason": "Both mention GLM, but one is an image-processor method missing and the other is a torch.split return-value issue; different bugs." + "reason": "VitPose model breakage and protobuf-related tokenizer error masking are unrelated." }, { - "left": "issue:43976", - "right": "issue:45071", + "left": "issue:44410", + "right": "issue:45478", "accept": false, - "reason": "Python version compatibility issue is unrelated to config type-checking regressions." + "reason": "Different Qwen model families and different failure modes." }, { - "left": "issue:44464", - "right": "issue:45071", + "left": "issue:43874", + "right": "issue:44261", "accept": false, - "reason": "Chunked generation with compiled forward is a different runtime path than PretrainedConfig type checking." + "reason": "Image-token counting bug in GLM46V vs RMS-epsilon precision issue in MLA are distinct." }, { - "left": "issue:43531", - "right": "issue:45341", + "left": "issue:43441", + "right": "issue:44829", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior is unrelated to a testing_utils bug." + "reason": "Both involve flash attention, but one is a model-loading failure and the other is degenerate training; not the same concrete bug." }, { - "left": "issue:43064", - "right": "issue:44315", + "left": "issue:43986", + "right": "issue:44484", "accept": false, - "reason": "Trainer/FSDP2 optimizer-state bug is unrelated to Liger Kernel not being applied with model_init." + "reason": "AutoProcessor/torchvision crash is unrelated to max_shard_size behavior." }, { - "left": "issue:38617", - "right": "issue:42757", + "left": "issue:43012", + "right": "issue:43408", "accept": false, - "reason": "Both are import errors, but they are for different missing symbols from different modules and different root causes." + "reason": "A numerical warning during bfloat16 compilation is unrelated to a model-type mismatch warning." }, { - "left": "issue:44261", + "left": "issue:44263", "right": "issue:44485", "accept": false, - "reason": "MLA q_a_layernorm precision/config issue is not the same as GLM-5 RoPE implementation differences." + "reason": "Torch.split indexing bug and RoPE implementation concern are separate GLM issues." }, { - "left": "issue:44410", - "right": "issue:45375", + "left": "issue:41628", + "right": "issue:45071", "accept": false, - "reason": "Qwen3next missing attention projections is unrelated to Qwen3_5MoeVisionConfig missing a config field." + "reason": "Missing AutoImageProcessor import and PretrainedConfig type-checking regression are unrelated." }, { - "left": "issue:44704", - "right": "issue:45103", + "left": "issue:45071", + "right": "issue:45542", "accept": false, - "reason": "AutoProcessor kwargs forwarding bug is unrelated to auto_docstring crashing on future annotations." + "reason": "Type-checking regression and missing TensorFlow backend wiring are unrelated." }, { - "left": "issue:44062", - "right": "issue:44117", + "left": "issue:45003", + "right": "issue:45341", "accept": false, - "reason": "AddedToken duplicate-special-arg error and tokenizer mapping None-handling are distinct tokenizer bugs." + "reason": "Unsafe sys.modules access and a testing_utils bug do not describe the same fault." }, { - "left": "issue:43502", - "right": "issue:43992", + "left": "issue:43097", + "right": "issue:43452", "accept": false, - "reason": "local_files_only causing network requests is unrelated to UMT5Encoder missing embed_tokens.weight." + "reason": "Removed tie_embeddings behavior and gguf_file loading failures are different issues." }, { "left": "issue:43986", - "right": "issue:44117", + "right": "issue:45341", "accept": false, - "reason": "Video-model processor loading without torchvision is unrelated to tokenizer mapping assumptions." + "reason": "Video-model processor crash and testing_utils bug are unrelated." }, { - "left": "issue:38617", - "right": "issue:44351", + "left": "issue:44117", + "right": "issue:44987", "accept": false, - "reason": "Different missing imports from different packages; no shared underlying defect." + "reason": "Tokenizer mapping returning None may contribute to loading failures, but the model-specific load error is not clearly the same bug." }, { - "left": "issue:43582", - "right": "issue:45341", + "left": "issue:43931", + "right": "issue:44410", "accept": false, - "reason": "AppleSilicon TypeError in caching_allocator_warmup is unrelated to testing_utils.py." + "reason": "Weight-shape mismatch for Qwen3-VL vs missing projections in Qwen3Next are different model/code-path problems." }, { - "left": "issue:44038", - "right": "issue:44410", + "left": "issue:43335", + "right": "issue:45071", "accept": false, - "reason": "Qwen3-VL-Moe transformer-v5 bug and Qwen3next missing projections are separate model-specific issues." + "reason": "SwitchTransformers sparse-layer construction and PretrainedConfig type checking are unrelated." }, { - "left": "issue:43097", - "right": "issue:43873", + "left": "issue:43408", + "right": "issue:44368", "accept": false, - "reason": "Removed tie_embeddings_and_encoder_decoder warning/docs issue is unrelated to quantization offloading behavior." + "reason": "Model-type warning and tie_word_embeddings warning are different warnings with different causes." }, { - "left": "issue:43992", - "right": "issue:44484", + "left": "issue:43931", + "right": "issue:44038", "accept": false, - "reason": "Missing embedding weight on load is unrelated to max_shard_size default behavior." + "reason": "Both are Qwen3-VL related, but one is a shape mismatch and the other is a transformers-version/MoE compatibility issue." }, { - "left": "issue:43986", - "right": "issue:45290", + "left": "issue:43874", + "right": "issue:44263", "accept": false, - "reason": "Video processor dependency crash is unrelated to apply_chat_template tool-call handling." + "reason": "Different GLM bugs: missing image-patch helper vs torch.split return handling." }, { - "left": "issue:43066", - "right": "issue:43906", + "left": "issue:44829", + "right": "issue:45245", "accept": false, - "reason": "Tokenizer decoder type regression and isolated reproduction of another issue are not the same bug." + "reason": "FlashAttention training degradation and category-cardinality limit are unrelated." }, { - "left": "issue:38617", - "right": "issue:43097", + "left": "issue:43986", + "right": "issue:45372", "accept": false, - "reason": "ImportError for layer_type_validation and a tie_embeddings deprecation/removal complaint are unrelated." + "reason": "Processor loading without torchvision and mistral_common import failure are unrelated dependency issues." }, { - "left": "issue:43825", - "right": "issue:44655", + "left": "issue:43976", + "right": "issue:45071", "accept": false, - "reason": "Pipeline translation-task messaging is unrelated to saving Pipeline objects." + "reason": "Python-version incompatibility and PretrainedConfig type checking are different regressions." }, { - "left": "issue:43352", - "right": "issue:44380", + "left": "issue:43531", + "right": "issue:45341", "accept": false, - "reason": "Nemotron FlashAttention 2 support error is unrelated to GPT2 attention scaling being ignored." + "reason": "Qwen3-MoE sliding-window behavior and a testing utility bug are unrelated." }, { - "left": "issue:43097", - "right": "issue:43901", + "left": "issue:43064", + "right": "issue:44315", "accept": false, - "reason": "Tie-embeddings deprecation/removal is unrelated to TextClassificationPipeline docs and return_all_scores behavior." + "reason": "Distributed optimizer-state corruption and Liger Kernel application timing are different training issues." }, { - "left": "issue:43012", - "right": "issue:43550", + "left": "issue:38617", + "right": "issue:42757", "accept": false, - "reason": "bfloat16 precision warning and Bamba torch.compile/SDPA failure are different runtime issues." + "reason": "Both are import errors, but they are missing different symbols from different packages." }, { - "left": "issue:43012", - "right": "issue:44368", + "left": "issue:44261", + "right": "issue:44485", "accept": false, - "reason": "PyTorch precision warning is unrelated to a tie_word_embeddings warning during LoRA fine-tuning." + "reason": "RMS-epsilon precision bug and GLM-5 RoPE implementation concern are separate issues." }, { - "left": "issue:42886", - "right": "issue:43381", + "left": "issue:44410", + "right": "issue:45375", "accept": false, - "reason": "Offline tokenizer cache failure is unrelated to gradient checkpointing in eval mode." + "reason": "Missing projections in Qwen3Next and a missing vision-config field are unrelated." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "Passing kwargs to cached_file and auto_docstring annotation handling are different helper bugs." } ] }, @@ -12110,338 +11888,346 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ - "issue:36010", "issue:36683", "issue:38617", "issue:42757", + "issue:42886", "issue:42907", - "issue:42994", "issue:43012", "issue:43064", "issue:43066", + "issue:43097", "issue:43208", "issue:43299", "issue:43352", - "issue:43475", + "issue:43381", "issue:43502", "issue:43525", "issue:43526", "issue:43550", "issue:43582", "issue:43618", - "issue:43756", "issue:43761", "issue:43824", + "issue:43825", "issue:43867", + "issue:43873", "issue:43901", + "issue:43906", "issue:43937", "issue:43976", + "issue:43986", + "issue:43992", "issue:44038", - "issue:44077", + "issue:44062", "issue:44117", - "issue:44188", - "issue:44220", "issue:44230", - "issue:44263", + "issue:44351", + "issue:44368", "issue:44380", + "issue:44410", "issue:44483", - "issue:44485", - "issue:44492", + "issue:44484", "issue:44514", "issue:44655", "issue:44683", - "issue:44792", - "issue:44938", - "issue:45216", "issue:45290", + "issue:45341", "issue:45381", - "issue:45440", - "issue:45479" + "issue:45479", + "issue:45538" ], "soft_pairs": [ + "issue:44062|issue:44117", + "issue:43582|issue:45341", + "issue:38617|issue:44351", + "issue:43986|issue:44117", + "issue:44038|issue:44410", + "issue:43502|issue:43992", + "issue:43097|issue:43873", + "issue:43992|issue:44484", + "issue:43986|issue:45290", + "issue:43937|issue:45538", + "issue:43066|issue:43906", + "issue:38617|issue:43097", + "issue:43825|issue:44655", + "issue:43352|issue:44380", + "issue:43097|issue:43901", + "issue:43012|issue:43550", + "issue:43012|issue:44368", + "issue:42886|issue:43381", "issue:43208|issue:43550", - "issue:43937|issue:44792", "issue:42907|issue:44655", - "issue:43525|issue:44117", "issue:44380|issue:45479", + "issue:43525|issue:44117", "issue:43352|issue:43976", "issue:43064|issue:44380", "issue:43208|issue:43526", "issue:44483|issue:44514", "issue:43618|issue:44380", - "issue:36683|issue:43582", "issue:43618|issue:43761", + "issue:36683|issue:43582", "issue:43012|issue:44683", "issue:44483|issue:45290", "issue:42757|issue:43824", - "issue:38617|issue:43502", "issue:44038|issue:44230", "issue:44483|issue:45381", + "issue:38617|issue:43502", "issue:43299|issue:43867", - "issue:43901|issue:44655", - "issue:42994|issue:44655", - "issue:36683|issue:43475", - "issue:43756|issue:44485", - "issue:43867|issue:45216", - "issue:44220|issue:44655", - "issue:43066|issue:45381", - "issue:43352|issue:43937", - "issue:44077|issue:44380", - "issue:36683|issue:44077", - "issue:44263|issue:45440", - "issue:43064|issue:44492", - "issue:43976|issue:44938", - "issue:36683|issue:43525", - "issue:36010|issue:42757", - "issue:44188|issue:44380", - "issue:43618|issue:45216" + "issue:43901|issue:44655" ], - "prepared_review_unit_hash": "225e25c1406ea44e5b61d832d62390332d7cbffb11981167f649fab53e07c43e", + "prepared_review_unit_hash": "f7a729ad872cee7bf1c3403eab50404b1bf78cd5b8254cc2770efcabd9faa567", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 36, - "serialized_chars": 37558, - "estimated_input_tokens": 9390, - "estimated_eval_tokens": 19036 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38528, + "estimated_input_tokens": 9632, + "estimated_eval_tokens": 19520 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: mostly unrelated Transformers import/config regressions, model-specific bugs, and save/load/runtime issues. The soft-similarity pairs do not look like true duplicates of the same underlying bug or change.", - "confidence": 0.96, - "canonical_issue_reason": "Issue 44483 is the broadest user-facing regression in the set, but the cluster as a whole is too diverse to have a strong true canonical issue.", + "summary": "This set does not form a single duplicate cluster; the items span unrelated import errors, model-specific regressions, tokenizer/pipeline issues, and training/runtime bugs. The soft-similarity links are mostly superficial topic overlaps, not the same underlying defect.", + "confidence": 0.97, + "canonical_issue_reason": "No clear canonical issue: the issues are heterogeneous and do not share one concrete bug or fix path.", "canonical_pr_reason": null, - "best_issue_reason": "If a representative must be picked, 44483 is the least model-specific and most general public API failure; otherwise, there is no good global canonical issue here.", + "best_issue_reason": "No single issue is a good global representative because the cluster is not cohesive enough to anchor on one bug.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43208", - "right": "issue:43550", + "left": "issue:44062", + "right": "issue:44117", "accept": false, - "reason": "Different model families and different failure modes; one is multiple xLSTM training bugs, the other is a Bamba SDPA/torch.compile issue." + "reason": "Different failures: AddedToken keyword collision vs tokenizer mapping returning None during from_pretrained." }, { - "left": "issue:43937", - "right": "issue:44792", + "left": "issue:43582", + "right": "issue:45341", "accept": false, - "reason": "Unrelated problems: invalid GLM-5 generation config vs a janus image-generation test failure." + "reason": "Unrelated: Apple Silicon caching allocator TypeError vs a testing_utils bug." }, { - "left": "issue:42907", - "right": "issue:44655", + "left": "issue:38617", + "right": "issue:44351", "accept": false, - "reason": "Both involve saving, but one is dequantized Ministral/Devstral models and the other is Pipeline.save_pretrained; different code paths." + "reason": "Both are import errors, but for different missing symbols and different call paths." }, { - "left": "issue:43525", + "left": "issue:43986", "right": "issue:44117", "accept": false, - "reason": "A missing config attribute on Llama4 is unrelated to tokenizer mapping returning None in from_pretrained." + "reason": "Different areas: AutoProcessor/video-model crash without torchvision vs tokenizer mapping assumptions." }, { - "left": "issue:44380", - "right": "issue:45479", + "left": "issue:44038", + "right": "issue:44410", "accept": false, - "reason": "Different subsystems and symptoms: GPT-2 attention scaling vs a sequence-classification zero-loss bug." + "reason": "Both involve Qwen-family models, but they describe different model loading/projection issues." }, { - "left": "issue:43352", - "right": "issue:43976", + "left": "issue:43502", + "right": "issue:43992", "accept": false, - "reason": "Nemotron Flash Attention support is unrelated to a Python-version compatibility issue." + "reason": "Different symptoms: unintended API calls with local_files_only vs missing embed_tokens.weight in UMT5Encoder loading." }, { - "left": "issue:43064", - "right": "issue:44380", + "left": "issue:43097", + "right": "issue:43873", "accept": false, - "reason": "FSDP2/PEFT optimizer-state corruption is not the same bug as GPT-2 attention scaling being ignored." + "reason": "Tie-embeddings API removal and quantized offloading behavior are unrelated." }, { - "left": "issue:43208", - "right": "issue:43526", + "left": "issue:43992", + "right": "issue:44484", "accept": false, - "reason": "xLSTM training bugs and BEiT image processor label reduction are different components and different defects." + "reason": "Model weight loading bug vs shard-size question for save_pretrained; no shared code-path defect." }, { - "left": "issue:44483", - "right": "issue:44514", + "left": "issue:43986", + "right": "issue:45290", "accept": false, - "reason": "Both touch chat APIs, but one is endpoint request rejection and the other is template tokenization on batched input; not the same bug." + "reason": "Both touch chat/processor flows, but one is missing torchvision and the other is a tool-call templating crash." }, { - "left": "issue:43618", - "right": "issue:44380", + "left": "issue:43937", + "right": "issue:45538", "accept": false, - "reason": "CLIPOutput attentions missing is unrelated to GPT-2 attention scaling under SDPA/FlashAttention." + "reason": "GenerationConfig validation for GLM-5 is unrelated to CLIPTokenizer max length." }, { - "left": "issue:36683", - "right": "issue:43582", + "left": "issue:43066", + "right": "issue:43906", "accept": false, - "reason": "Gemma3Config missing vocab_size is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." + "reason": "Wrong tokenizer decoder type is not the same as an isolated reproduction of a different issue." }, { - "left": "issue:43618", - "right": "issue:43761", + "left": "issue:38617", + "right": "issue:43097", "accept": false, - "reason": "Both are CLIP-related, but one loses attentions in CLIPOutput while the other returns hidden_states=None in CLIPVisionModel.forward; different regressions." + "reason": "Different deprecations/import surface: missing configuration_utils symbol vs removed tie_embeddings_and_encoder_decoder." }, { - "left": "issue:43012", - "right": "issue:44683", + "left": "issue:43825", + "right": "issue:44655", "accept": false, - "reason": "A bfloat16 precision warning is not the same as a compiled flex_attention failure on torch >= 2.9." + "reason": "Pipeline docs/message issue vs inability to save Pipeline objects; different behavior and fix." }, { - "left": "issue:44483", - "right": "issue:45290", + "left": "issue:43352", + "right": "issue:44380", "accept": false, - "reason": "The chat-completions request rejection and apply_chat_template crashing on tool-call messages are different failure points." + "reason": "FlashAttention support error for a Nemotron model is unrelated to GPT2 attention scaling under SDPA/FlashAttention." }, { - "left": "issue:42757", - "right": "issue:43824", + "left": "issue:43097", + "right": "issue:43901", "accept": false, - "reason": "Both are import errors, but for different missing symbols from different packages." + "reason": "Removed config API vs stale TextClassificationPipeline docs; not the same bug." }, { - "left": "issue:38617", - "right": "issue:43502", + "left": "issue:43012", + "right": "issue:43550", "accept": false, - "reason": "A missing configuration_utils import is unrelated to local_files_only still making API requests." + "reason": "Different warnings/errors: bf16 compile warning vs Bamba torch.compile SDPA failure." }, { - "left": "issue:44038", - "right": "issue:44230", + "left": "issue:43012", + "right": "issue:44368", "accept": false, - "reason": "One is a Qwen3-VL-Moe loading bug; the other is fp8 inference support for Qwen3-VL/Qwen3.5 MoE." + "reason": "Both mention warnings, but one is a PyTorch precision warning and the other is a tie_word_embeddings config warning." }, { - "left": "issue:44483", - "right": "issue:45381", + "left": "issue:42886", + "right": "issue:43381", "accept": false, - "reason": "General chat API rejection is not the same as incorrect vision_position_ids in Qwen2.5-VL video input." + "reason": "Offline cache loading and gradient checkpointing in eval mode are unrelated." }, { - "left": "issue:43299", - "right": "issue:43867", + "left": "issue:43208", + "right": "issue:43550", "accept": false, - "reason": "Qwen3-VL-Moe loading breakage is different from a state_dict sorting load error." + "reason": "xLSTM training bugs and Bamba SDPA compile failure affect different models and code paths." }, { - "left": "issue:43901", + "left": "issue:42907", "right": "issue:44655", "accept": false, - "reason": "Pipeline save_pretrained docs/behavior and an actual pipeline serialization failure are different issues." + "reason": "Saving dequantized models and saving Pipeline objects are different persistence problems." }, { - "left": "issue:42994", - "right": "issue:44655", + "left": "issue:44380", + "right": "issue:45479", "accept": false, - "reason": "Quantized model saving failure is broader/different from saving Pipeline objects." + "reason": "Attention scaling backend bug is unrelated to sequence-classification zero-loss behavior." }, { - "left": "issue:36683", - "right": "issue:43475", + "left": "issue:43525", + "right": "issue:44117", "accept": false, - "reason": "Gemma3Config vocab_size and Sam3VisionEncoderOutput missing fpn_position_embeddings are unrelated attribute errors." + "reason": "Missing Llama4Config attribute and tokenizer mapping None are different configuration/loading defects." }, { - "left": "issue:43756", - "right": "issue:44485", + "left": "issue:43352", + "right": "issue:43976", "accept": false, - "reason": "Smollm3 RoPE-layer mismatch and GLM-5 RoPE implementation concerns are different models and different bugs." + "reason": "FlashAttention support for one model is unrelated to Python version compatibility." }, { - "left": "issue:43867", - "right": "issue:45216", + "left": "issue:43064", + "right": "issue:44380", "accept": false, - "reason": "Loading failure with sorted state_dict is not the same as a Qwen3.5 save_pretrained regression." + "reason": "Distributed optimizer-state corruption and GPT2 attention scaling are unrelated." }, { - "left": "issue:44220", - "right": "issue:44655", + "left": "issue:43208", + "right": "issue:43526", "accept": false, - "reason": "An fbank feature extraction bug is unrelated to pipeline serialization." + "reason": "xLSTM training bugs and BeitImageProcessorFast label reduction are unrelated." }, { - "left": "issue:43066", - "right": "issue:45381", + "left": "issue:44483", + "right": "issue:44514", "accept": false, - "reason": "Wrong tokenizer decoder type and wrong vision_position_ids are unrelated regressions." + "reason": "OpenAI-style chat/completions endpoint rejection is not the same as apply_chat_template crashing on batched tool-call input." }, { - "left": "issue:43352", - "right": "issue:43937", + "left": "issue:43618", + "right": "issue:44380", "accept": false, - "reason": "FlashAttention-2 support for Nemotron is unrelated to GLM-5 generation config validation." + "reason": "CLIP attentions regression and GPT2 attention scaling are different model-specific issues." }, { - "left": "issue:44077", - "right": "issue:44380", + "left": "issue:43618", + "right": "issue:43761", "accept": false, - "reason": "patchtsmixer post_init validation and GPT-2 attention scaling are different code paths." + "reason": "Both are CLIP regressions, but one drops attentions and the other drops hidden_states; not enough evidence of one shared bug." }, { "left": "issue:36683", - "right": "issue:44077", + "right": "issue:43582", "accept": false, - "reason": "Gemma3Config missing vocab_size is unrelated to patchtsmixer post_init allowance." + "reason": "Gemma3Config missing vocab_size and Apple Silicon allocator TypeError are unrelated." }, { - "left": "issue:44263", - "right": "issue:45440", + "left": "issue:43012", + "right": "issue:44683", "accept": false, - "reason": "Both mention MoE internals, but the concrete bugs and model implementations differ." + "reason": "bf16 warning vs compiled flex_attention failure are separate backend/precision issues." }, { - "left": "issue:43064", - "right": "issue:44492", + "left": "issue:44483", + "right": "issue:45290", "accept": false, - "reason": "Optimizer-state corruption in Trainer/FSDP2 is unrelated to a cache-strategy typo." + "reason": "Different request paths: /v1/chat/completions API handling vs tokenizer/template crash on tool-call messages." }, { - "left": "issue:43976", - "right": "issue:44938", + "left": "issue:42757", + "right": "issue:43824", "accept": false, - "reason": "These are different platform/version compatibility failures: Python 3.9+ vs Python 3.14." + "reason": "Import errors for different missing names from different modules and packages." }, { - "left": "issue:36683", - "right": "issue:43525", + "left": "issue:44038", + "right": "issue:44230", "accept": false, - "reason": "Missing vocab_size on Gemma3Config and missing pad_token_id on Llama4Config are separate model config bugs." + "reason": "Qwen3-VL-Moe loading issue and fp8 support request are not the same defect." }, { - "left": "issue:36010", - "right": "issue:42757", + "left": "issue:44483", + "right": "issue:45381", "accept": false, - "reason": "Import errors for GenerationMixin and is_offline_mode are unrelated symbols and modules." + "reason": "Chat/completions request handling is unrelated to Qwen2.5-VL video vision_position_ids." }, { - "left": "issue:44188", - "right": "issue:44380", + "left": "issue:38617", + "right": "issue:43502", "accept": false, - "reason": "torch.compile branching divergence in attention kernels is not the same as GPT-2 attention scaling being ignored." + "reason": "Missing configuration_utils symbol and unwanted API calls despite local_files_only concern different mechanisms." }, { - "left": "issue:43618", - "right": "issue:45216", + "left": "issue:43299", + "right": "issue:43867", "accept": false, - "reason": "CLIP attentions missing and Qwen3.5 checkpoint corruption are unrelated regressions." + "reason": "Qwen3VL-Moe loading regression and sorted-state_dict load error are different model-loading problems." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Docs mismatch for return_all_scores is unrelated to Pipeline save_pretrained behavior." } ] }, @@ -12463,50 +12249,51 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:28282", "issue:33453", + "issue:36010", "issue:36296", "issue:36683", "issue:41669", - "issue:42548", + "issue:42757", "issue:42890", + "issue:42994", "issue:43010", "issue:43064", - "issue:43065", - "issue:43232", + "issue:43066", "issue:43352", - "issue:43502", + "issue:43475", + "issue:43525", "issue:43575", "issue:43618", - "issue:43644", - "issue:43723", + "issue:43756", "issue:43867", "issue:43931", + "issue:43937", "issue:43976", "issue:43992", - "issue:44062", - "issue:44075", "issue:44077", "issue:44112", - "issue:44222", + "issue:44188", + "issue:44220", "issue:44242", "issue:44246", - "issue:44351", + "issue:44263", "issue:44360", - "issue:44387", + "issue:44380", "issue:44484", - "issue:44556", + "issue:44485", + "issue:44492", + "issue:44655", "issue:44857", "issue:44938", "issue:44964", - "issue:44977", "issue:45071", - "issue:45125", + "issue:45216", "issue:45278", "issue:45310", - "issue:45341", "issue:45357", "issue:45375", "issue:45381", @@ -12514,8 +12301,24 @@ "issue:45478" ], "soft_pairs": [ - "issue:43992|issue:45310", + "issue:43756|issue:44485", + "issue:42994|issue:44655", + "issue:36683|issue:43475", + "issue:43867|issue:45216", + "issue:44220|issue:44655", + "issue:43066|issue:45381", + "issue:43352|issue:43937", + "issue:44077|issue:44380", + "issue:36683|issue:44077", + "issue:44263|issue:45440", + "issue:43064|issue:44492", + "issue:43976|issue:44938", + "issue:36683|issue:43525", + "issue:36010|issue:42757", + "issue:44188|issue:44380", + "issue:43618|issue:45216", "issue:36683|issue:44242", + "issue:43992|issue:45310", "issue:43010|issue:45278", "issue:36683|issue:43352", "issue:43352|issue:43931", @@ -12529,261 +12332,270 @@ "issue:44938|issue:45381", "issue:44360|issue:45440", "issue:43618|issue:44077", - "issue:43644|issue:44938", "issue:36296|issue:43575", "issue:43867|issue:44484", "issue:43618|issue:44857", - "issue:28282|issue:33453", "issue:42890|issue:44964", - "issue:43618|issue:44977", - "issue:42890|issue:44112", - "issue:43502|issue:44062", - "issue:43010|issue:44075", - "issue:42890|issue:45341", - "issue:43723|issue:44556", - "issue:44387|issue:44556", - "issue:43010|issue:43232", - "issue:44222|issue:45125", - "issue:43010|issue:43065", - "issue:42548|issue:44351", - "issue:45125|issue:45478" + "issue:28282|issue:33453", + "issue:42890|issue:44112" ], - "prepared_review_unit_hash": "1bc4c5d15383a9a3f7f5aa2643da31044e373394cd7cdf0701db05dcf8c8cac4", + "prepared_review_unit_hash": "aa39ae4710c11f5f1aafc12c005651cef821deefac86a2c357340618d99c93ca", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 33, - "serialized_chars": 36388, - "estimated_input_tokens": 9097, - "estimated_eval_tokens": 18450 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38190, + "estimated_input_tokens": 9548, + "estimated_eval_tokens": 19352 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab bag of Transformers v5 regression reports, especially import/load failures and model-specific breakages. Most candidate pairs are only loosely related by subsystem and should not be merged; one Qwen3.5 MoE/tensor-parallel pair looks like a real duplicate.", + "summary": "These are mostly unrelated bug reports across model loading, saving, attention, config, and CI. The soft pairs share surface keywords but not the same concrete defect, so none should be merged as duplicates. No PRs were provided.", "confidence": 0.79, - "canonical_issue_reason": "issue:45278 is the broadest and most representative issue in the set: it summarizes multiple import errors after upgrading from 4.57.0 to 5.5.0, which matches the dominant theme of the cluster.", + "canonical_issue_reason": "No single true duplicate target stands out; if one issue must serve as an umbrella, issue:45278 is the broadest open report about widespread import regressions after an upgrade.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the best single representative issue because it is broad, user-facing, and captures the central upgrade/import regression pattern across the cluster.", + "best_issue_reason": "issue:45278 is the best global issue candidate only as a broad tracker for import failures, but it is not a real canonical duplicate for this set.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43992", - "right": "issue:45310", + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Different bugs: Smollm3 RoPE-layer mismatch vs a save_pretrained shard-size question." + }, + { + "left": "issue:42994", + "right": "issue:44655", "accept": false, - "reason": "Different failures on different models: UMT5Encoder missing embed_tokens.weight vs Qwen3.5 MoE from_pretrained error." + "reason": "Both involve saving, but one is quantized model saving and the other is Pipeline.save_pretrained; different code paths." }, { "left": "issue:36683", - "right": "issue:44242", + "right": "issue:43475", "accept": false, - "reason": "Unrelated bugs: Gemma3Config missing vocab_size vs MoE load-balancing loss not added." + "reason": "Missing config attribute in Gemma3 vs missing output field in SAM 3 Video; different models and failures." }, { - "left": "issue:43010", - "right": "issue:45278", + "left": "issue:43867", + "right": "issue:45216", "accept": false, - "reason": "Cache/update no_grad behavior is unrelated to broad import errors after upgrade." + "reason": "Load-time state_dict ordering error vs a save_pretrained regression; opposite operations and different root causes." }, { - "left": "issue:36683", - "right": "issue:43352", + "left": "issue:44220", + "right": "issue:44655", "accept": false, - "reason": "Different code paths and symptoms: config attribute missing vs Flash Attention support gating." + "reason": "Feature extraction bug vs pipeline saving bug; no shared concrete defect." }, { - "left": "issue:43352", - "right": "issue:43931", + "left": "issue:43066", + "right": "issue:45381", "accept": false, - "reason": "Flash-attention support error is unrelated to Qwen3-VL weight-shape mismatch." + "reason": "Tokenizer decoder type regression vs Qwen2.5-VL video position-id issue; unrelated." }, { - "left": "issue:41669", - "right": "issue:44246", + "left": "issue:43352", + "right": "issue:43937", "accept": false, - "reason": "Both mention import performance, but one is a specific import * refactor and the other is only a generic slow-import report." + "reason": "FlashAttention support check in Nemotron vs invalid GenerationConfig in GLM-5; different validation failures." }, { - "left": "issue:43867", - "right": "issue:45375", + "left": "issue:44077", + "right": "issue:44380", "accept": false, - "reason": "State-dict sorting load error is unrelated to a missing config field being dropped by strict parsing." + "reason": "patchtsmixer post_init policy vs GPT2 attention scaling under SDPA/FlashAttention; different subsystems and bugs." }, { "left": "issue:36683", - "right": "issue:43064", + "right": "issue:44077", "accept": false, - "reason": "Gemma3Config vocab_size bug and FSDP2/PEFT optimizer-state bug are different problems." + "reason": "Different model/config issues: Gemma3 missing vocab_size vs patchtsmixer post_init handling." }, { - "left": "issue:43992", - "right": "issue:45478", + "left": "issue:44263", + "right": "issue:45440", "accept": false, - "reason": "Different model families and failure details; both are load errors, but not the same underlying bug." + "reason": "GlmMoeDsaIndexer torch.split issue vs DeepseekV3MoE implementation divergence; not the same defect." }, { - "left": "issue:44938", - "right": "issue:45071", + "left": "issue:43064", + "right": "issue:44492", "accept": false, - "reason": "Python 3.14 load failure is unrelated to PretrainedConfig type-checking regressions." + "reason": "FSDP2/PEFT optimizer-state bug vs a typo in cache strategies; no overlap in underlying change." }, { - "left": "issue:43867", - "right": "issue:45357", + "left": "issue:43976", + "right": "issue:44938", "accept": false, - "reason": "Sorting-related load failure is not the same as incorrect visual encoder keys in save_pretrained." + "reason": "Both are version-compatibility reports, but one is Python 3.9+ and the other Python 3.14; different compatibility breaks." }, { - "left": "issue:43010", - "right": "issue:43976", + "left": "issue:36683", + "right": "issue:43525", "accept": false, - "reason": "Generation cache update logic is unrelated to the Python-version compatibility issue." + "reason": "Both mention missing config fields, but Gemma3 vocab_size and Llama4 pad_token_id are different models and different attributes." }, { - "left": "issue:44938", - "right": "issue:45381", + "left": "issue:36010", + "right": "issue:42757", "accept": false, - "reason": "Python 3.14 import/load failure is unrelated to Qwen2.5-VL video position-id correctness." + "reason": "Both are import errors, but for different symbols from different modules; not the same underlying regression." }, { - "left": "issue:44360", - "right": "issue:45440", + "left": "issue:44188", + "right": "issue:44380", "accept": false, - "reason": "DSA indexer activation bug and DeepSeekV3 implementation divergence are distinct." + "reason": "Attention-related, but one is torch.compile branching divergence and the other is GPT2 scaling ignored under backend dispatch." }, { "left": "issue:43618", - "right": "issue:44077", + "right": "issue:45216", "accept": false, - "reason": "CLIP attentions regression and patchtsmixer post_init policy are unrelated." + "reason": "CLIPOutput attentions missing and Qwen3.5 save_pretrained regression are unrelated bugs." }, { - "left": "issue:43644", - "right": "issue:44938", + "left": "issue:36683", + "right": "issue:44242", "accept": false, - "reason": "Non-persistent buffer initialization issue is unrelated to Python 3.14 loading failures." + "reason": "Gemma3 config attribute error vs load-balancing loss omission; different features and code paths." }, { - "left": "issue:36296", - "right": "issue:43575", + "left": "issue:43992", + "right": "issue:45310", "accept": false, - "reason": "Tensor-parallel training bug and Qwen2-57B tp OOM are related only at a high level, not the same defect." + "reason": "UMT5Encoder missing embed_tokens weight vs Qwen3.5 MoE from_pretrained error; different loading failures." }, { - "left": "issue:43867", - "right": "issue:44484", + "left": "issue:43010", + "right": "issue:45278", "accept": false, - "reason": "Load error from sorted state_dict is unrelated to the default max_shard_size question." + "reason": "Cache update no_grad decoration request vs broad import errors after upgrade; unrelated." }, { - "left": "issue:43618", - "right": "issue:44857", + "left": "issue:36683", + "right": "issue:43352", "accept": false, - "reason": "Missing attentions assignment and AMP/CUDA crash are different bugs." + "reason": "Different model-specific issues: Gemma3 config field missing vs Nemotron FlashAttention support error." }, { - "left": "issue:28282", - "right": "issue:33453", + "left": "issue:43352", + "right": "issue:43931", "accept": false, - "reason": "PyTorch missing dependency error is unrelated to tokenizer loading regression." + "reason": "Unsupported FlashAttention vs weight-shape mismatch during load; distinct bugs." }, { - "left": "issue:42890", - "right": "issue:44964", + "left": "issue:41669", + "right": "issue:44246", "accept": false, - "reason": "A flaky integration test and a model-loading failure are not the same issue." + "reason": "One is about removing import * for startup cost, the other about intermittent import slowness; related topic but not the same defect." }, { - "left": "issue:43618", - "right": "issue:44977", + "left": "issue:43867", + "right": "issue:45375", "accept": false, - "reason": "Attentions-output regression is unrelated to Qwen3.5 flash-attention generation problems." + "reason": "Sorted state_dict loading error vs missing deepstack_visual_indexes being dropped by strict config handling; different root causes." }, { - "left": "issue:42890", - "right": "issue:44112", + "left": "issue:36683", + "right": "issue:43064", "accept": false, - "reason": "Both are test-related, but they concern different tests and different causes." + "reason": "Gemma3 config attribute issue vs FSDP2 optimizer-state corruption on nonzero ranks; unrelated." }, { - "left": "issue:43502", - "right": "issue:44062", + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "Both are Qwen3.5 loading problems, but they concern different model variants and different failure modes." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Python 3.14 import failure vs PretrainedConfig type-checking regression; not the same bug." + }, + { + "left": "issue:43867", + "right": "issue:45357", "accept": false, - "reason": "Local-files-only network leak and AddedToken argument collision are unrelated." + "reason": "One is load failure from sorted state_dict, the other is save_pretrained writing wrong visual encoder keys." }, { "left": "issue:43010", - "right": "issue:44075", + "right": "issue:43976", "accept": false, - "reason": "Generation cache no_grad behavior is unrelated to SGD argument handling." + "reason": "Cache no_grad API change vs Python version incompatibility; unrelated." }, { - "left": "issue:42890", - "right": "issue:45341", + "left": "issue:44938", + "right": "issue:45381", "accept": false, - "reason": "A flaky model integration test is not clearly the same as a generic testing_utils bug." + "reason": "Python 3.14 load failure and Qwen2.5-VL vision_position_ids bug are separate regressions." }, { - "left": "issue:43723", - "right": "issue:44556", + "left": "issue:44360", + "right": "issue:45440", "accept": false, - "reason": "Tokenizer-loading regression after v5 is not the same as reloading a checkpoint trained on v4.57." + "reason": "DSA indexer missing ReLU vs DeepseekV3MoE divergence from remote implementation; different functionality." }, { - "left": "issue:44387", - "right": "issue:44556", + "left": "issue:43618", + "right": "issue:44077", "accept": false, - "reason": "Int4 CUDA memory regression and checkpoint reload incompatibility are different issues." + "reason": "CLIPOutput attentions regression vs patchtsmixer post_init restriction; unrelated." }, { - "left": "issue:43010", - "right": "issue:43232", + "left": "issue:36296", + "right": "issue:43575", "accept": false, - "reason": "Cache update decoration and generation kwarg syncing are distinct generation internals." + "reason": "Both mention tensor parallelism, but one is a training correctness bug and the other is expected OOM for a very large model." }, { - "left": "issue:44222", - "right": "issue:45125", + "left": "issue:43867", + "right": "issue:44484", "accept": false, - "reason": "FP8 save_pretrained handling and missing _tp_plan for tensor parallelism are different failures." + "reason": "Load error when state_dict is sorted vs a question about max_shard_size default; no shared defect." }, { - "left": "issue:43010", - "right": "issue:43065", + "left": "issue:43618", + "right": "issue:44857", "accept": false, - "reason": "Cache no_grad behavior is unrelated to the dummy Conv2d in Sam3PixelDecoder." + "reason": "Missing attentions in CLIPOutput vs float16 AMP crash in LwDetrImageLoss; different areas entirely." }, { - "left": "issue:42548", - "right": "issue:44351", + "left": "issue:42890", + "right": "issue:44964", "accept": false, - "reason": "Both are import errors, but they are about different symbols and likely different root causes." + "reason": "Stale device override CI test vs Phi-4 multimodal load failure; unrelated." }, { - "left": "issue:45125", - "right": "issue:45478", - "accept": true, - "reason": "Both point to the same Qwen3.5 MoE tensor-parallel/from_pretrained regression; the missing _tp_plan is a plausible root cause of the later from_pretrained failure." + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "PyTorch-missing ImportError for AutoModel vs tokenizer-loading regression; not the same issue." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Both mention stale device override tests, but the actual failing models and test cases are different." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "Grounded overall. The summary stays within the packet: it correctly frames the cluster as mostly heterogeneous v5 regression reports, and issue:45278 is a reasonable broad representative. The soft-edge judgments are conservative, and the single accepted Qwen3.5 MoE/tensor-parallel pair is plausibly the same underlying bug." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -12798,7 +12610,7 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:30990", "issue:31795", @@ -12808,48 +12620,59 @@ "issue:36296", "issue:38175", "issue:38617", - "issue:41628", "issue:42548", "issue:42757", "issue:42831", "issue:42890", "issue:43010", - "issue:43012", + "issue:43065", "issue:43232", "issue:43299", "issue:43502", - "issue:43526", "issue:43576", + "issue:43618", "issue:43673", + "issue:43723", "issue:43824", "issue:43827", "issue:43874", - "issue:43901", + "issue:44062", "issue:44075", "issue:44222", "issue:44230", - "issue:44273", "issue:44315", "issue:44351", "issue:44355", "issue:44360", "issue:44368", "issue:44373", + "issue:44387", "issue:44485", "issue:44492", "issue:44556", "issue:44655", "issue:44749", "issue:44871", - "issue:44908", "issue:44961", - "issue:45003", + "issue:44977", "issue:45071", "issue:45125", + "issue:45341", "issue:45375", - "issue:45468" + "issue:45478" ], "soft_pairs": [ + "issue:43618|issue:44977", + "issue:43502|issue:44062", + "issue:43010|issue:44075", + "issue:42890|issue:45341", + "issue:43723|issue:44556", + "issue:44222|issue:45125", + "issue:44387|issue:44556", + "issue:43010|issue:43065", + "issue:43010|issue:43232", + "issue:42548|issue:44351", + "issue:45125|issue:45478", "issue:44355|issue:44749", "issue:45125|issue:45375", "issue:30990|issue:31795", @@ -12862,8 +12685,8 @@ "issue:43299|issue:44556", "issue:43502|issue:44961", "issue:44355|issue:44655", - "issue:44355|issue:45071", "issue:44222|issue:44230", + "issue:44355|issue:45071", "issue:42831|issue:43874", "issue:42890|issue:43232", "issue:33453|issue:34689", @@ -12874,251 +12697,246 @@ "issue:43576|issue:45071", "issue:43673|issue:44492", "issue:38617|issue:43576", - "issue:33357|issue:33453", - "issue:44961|issue:45468", - "issue:41628|issue:42548", - "issue:31795|issue:38617", - "issue:43576|issue:44351", - "issue:44908|issue:44961", - "issue:36296|issue:43901", - "issue:44485|issue:44961", - "issue:44273|issue:45003", - "issue:36296|issue:43526", - "issue:41628|issue:44355", - "issue:36296|issue:43012" + "issue:33357|issue:33453" ], - "prepared_review_unit_hash": "98379cd936f803300d8522bde86da3dc23b3ee622683dd096c7bd5ea2e4da4d6", + "prepared_review_unit_hash": "102bd00af91a52fed393764fcd2a8f11a9e55fd5e987b99f836cd37f3d1f048b", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 37907, - "estimated_input_tokens": 9477, - "estimated_eval_tokens": 19210 + "serialized_chars": 37891, + "estimated_input_tokens": 9473, + "estimated_eval_tokens": 19202 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a loose cluster of mostly unrelated Transformers issues, with a few broad v5 loading/import regressions but no strong duplicate pairs. The closest umbrella topic is v5-era load/import breakage, not a single concrete bug.", - "confidence": 0.77, - "canonical_issue_reason": "No exact duplicate set emerges. If one issue must anchor the cluster, issue 44556 is the broadest v5 reload-regression report and the best umbrella for the loading/import breakage subgroup.", + "summary": "The cluster is mostly a grab-bag of Transformers v5 regressions and unrelated model/test/docs issues. The only clear common thread is broad loading/import breakage, but none of the soft pairs look like true duplicates.", + "confidence": 0.69, + "canonical_issue_reason": "44556 is the broadest representative of the shared theme: checkpoint reload breaks after upgrading from v4.57 to v5.x. It best serves as an umbrella issue for the loading-regression subset.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44556 is the most representative of the recurring version-upgrade loading failures in this set; it is broad enough to serve as a cluster anchor even though many other items are unrelated.", + "best_issue_reason": "44556 is the most globally representative issue in this set because it captures the main v5 migration/loading regression pattern without being overly model-specific.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43618", + "right": "issue:44977", + "accept": false, + "reason": "Different failures: missing CLIPOutput attentions vs Qwen3.5 generation with flash-attention." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "Unrelated bugs: offline-mode network requests vs AddedToken constructor argument collision." + }, + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "Different code paths: cache update no_grad behavior vs SGD optimizer args not being used." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "Both are test-related, but one is flaky seeds in SamHQ integration tests and the other is a bug in testing_utils.py." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Both are loading regressions, but one is tokenizer loading and the other is checkpoint reload after v5 upgrade; not the same concrete bug." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "Different MoE/FP8 issues: save_pretrained behavior vs missing tensor-parallel plan." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "Int4 quantization OOM is a memory/quantization issue, not a checkpoint reload regression." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "Cache update semantics and a dummy Conv2d in Sam3PixelDecoder are unrelated." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Both touch generation utilities, but the specific bugs are different: no_grad on cache update vs sync_gpus kwargs handling." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different symbols and modules." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Same model family, but one is missing _tp_plan and the other is a broader from_pretrained failure; not the same bug." + }, { "left": "issue:44355", "right": "issue:44749", "accept": false, - "reason": "Different problems: compiled Python execution errors vs slowdown after upgrading and filtering data." + "reason": "Compiled-file errors and a Chinese-described performance slowdown are unrelated." }, { "left": "issue:45125", "right": "issue:45375", "accept": false, - "reason": "Both are Qwen3.5-Moe config issues, but they hit different missing fields and different code paths." + "reason": "Both are Qwen3.5 MoE-related, but one is tensor-parallel metadata and the other is a missing config field dropped by strict validation." }, { "left": "issue:30990", "right": "issue:31795", "accept": false, - "reason": "SentenceTransformers loading hang and a documentation confusion issue are unrelated." + "reason": "Sentence Transformers loading stall and a forward-docstring issue are unrelated." }, { "left": "issue:43502", "right": "issue:44485", "accept": false, - "reason": "Local-files-only network leakage vs GLM-5 RoPE implementation are different bugs." + "reason": "Offline network requests and GLM-5 RoPE implementation are different subsystems and symptoms." }, { "left": "issue:44075", "right": "issue:44871", "accept": false, - "reason": "SGD argument handling and eos_token_id config mismatch are unrelated." + "reason": "Optimizer argument handling and Gemma eos_token_id config mismatch are unrelated." }, { "left": "issue:43010", "right": "issue:44368", "accept": false, - "reason": "Cache update no_grad decoration and tie_word_embeddings warning are different issues." + "reason": "Cache update decoration and tie_word_embeddings warning are different issues." }, { "left": "issue:44315", "right": "issue:44360", "accept": false, - "reason": "Liger Kernel application with model_init is unrelated to the DSA indexer ReLU discussion." + "reason": "Liger Kernel application in model_init and DSA indexer ReLU are unrelated." }, { "left": "issue:42890", "right": "issue:44373", "accept": false, - "reason": "Flaky tests from missing set_seed and a position_ids docstring typo are not the same bug." + "reason": "Test seed flakiness and a wrong docstring are not the same defect." }, { "left": "issue:38175", "right": "issue:43502", "accept": false, - "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to local_files_only network requests." + "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to local_files_only network access." }, { "left": "issue:43299", "right": "issue:44556", "accept": false, - "reason": "Both are loading regressions, but they affect different model families and likely different code paths." + "reason": "Both are load regressions, but for different model families and code paths." }, { "left": "issue:43502", "right": "issue:44961", "accept": false, - "reason": "Local-files-only API leakage and a meaningless titled issue are unrelated." + "reason": "Local-files-only network leakage and the unrelated 'racoon' issue do not match." }, { "left": "issue:44355", "right": "issue:44655", "accept": false, - "reason": "Compiled-file execution errors and pipeline save_pretrained are different bugs." + "reason": "Compiled Python-file errors and pipeline save_pretrained support are unrelated." }, { - "left": "issue:44355", - "right": "issue:45071", + "left": "issue:44222", + "right": "issue:44230", "accept": false, - "reason": "Compiled-file execution errors and PretrainedConfig type checking are unrelated." + "reason": "Both mention FP8/MoE, but one is save_pretrained and the other is inference support; different bugs." }, { - "left": "issue:44222", - "right": "issue:44230", + "left": "issue:44355", + "right": "issue:45071", "accept": false, - "reason": "FP8 save_pretrained on MoE and FP8 inference support requests are not the same concrete bug." + "reason": "Compiled-file runtime errors and PretrainedConfig type-checking are unrelated." }, { "left": "issue:42831", "right": "issue:43874", "accept": false, - "reason": "FineGrainedFP8 accuracy and a missing image-processor method are different problems." + "reason": "FineGrainedFP8 accuracy drift and a missing image-processor method are different defects." }, { "left": "issue:42890", "right": "issue:43232", "accept": false, - "reason": "Seed-related test instability and generation kwargs handling are unrelated." + "reason": "Flaky tests due to seeding and generation kwargs after sync_gpus are unrelated." }, { "left": "issue:33453", "right": "issue:34689", "accept": false, - "reason": "Both are loading regressions, but they concern different models and symptoms." + "reason": "Both are model-loading regressions, but for different models and different failure modes." }, { "left": "issue:42757", "right": "issue:43576", "accept": false, - "reason": "An import error from huggingface_hub and a broken env command are not the same concrete issue." + "reason": "ImportError for is_offline_mode and a broken transformers env command are unrelated." }, { "left": "issue:33453", "right": "issue:36296", "accept": false, - "reason": "Tokenizer loading regression and tensor-parallel training bug are unrelated." + "reason": "Tokenizer loading regression and tensor-parallel training bug are not the same issue." }, { "left": "issue:43827", "right": "issue:44961", "accept": false, - "reason": "Documentation referencing removed pipeline() and an unrelated issue title have no overlap." + "reason": "Docs still referencing pipeline() and 'racoon' are unrelated." }, { "left": "issue:43576", "right": "issue:43824", "accept": false, - "reason": "Broken env command and missing Qwen2.5-VL export are different import failures." - }, - { - "left": "issue:43576", - "right": "issue:44351", - "accept": false, - "reason": "Broken env command and missing HybridCache export are different failures." + "reason": "Broken env command and missing Qwen2_5_VLForConditionalGeneration import are different failures." }, { "left": "issue:43673", "right": "issue:44492", "accept": false, - "reason": "Chunked-prefill cache missing vs a typo in cache strategies are unrelated." + "reason": "Generation cache missing in chunked_prefill and a cache-strategy typo are not the same bug." }, { "left": "issue:38617", "right": "issue:43576", "accept": false, - "reason": "Different missing imports in different modules; not the same underlying bug." + "reason": "Importing layer_type_validation and the env command failure are unrelated." }, { "left": "issue:33357", "right": "issue:33453", "accept": false, - "reason": "MacOS bus error on a CLIP model and tokenizer loading regression are too different to merge." - }, - { - "left": "issue:44961", - "right": "issue:45468", - "accept": false, - "reason": "Unrelated issue title and Gemma-4 audio positional encoding bug." - }, - { - "left": "issue:41628", - "right": "issue:42548", - "accept": false, - "reason": "Both are import errors, but they concern different exported symbols and likely different root causes." - }, - { - "left": "issue:31795", - "right": "issue:38617", - "accept": false, - "reason": "Documentation confusion and a missing import error are unrelated." - }, - { - "left": "issue:43576", - "right": "issue:44351", - "accept": false, - "reason": "Different missing symbols and no evidence of the same code path." - }, - { - "left": "issue:44908", - "right": "issue:44961", - "accept": false, - "reason": "Scheduler kwargs handling and an unrelated issue title are not duplicates." - }, - { - "left": "issue:36296", - "right": "issue:43901", - "accept": false, - "reason": "Tensor-parallel training bug and documentation about pipeline parameters are unrelated." - }, - { - "left": "issue:44485", - "right": "issue:44961", - "accept": false, - "reason": "GLM-5 RoPE implementation and an unrelated issue title are not the same bug." - }, - { - "left": "issue:44273", - "right": "issue:45003", - "accept": false, - "reason": "Lazy loading problems and unsafe sys.modules access may be adjacent, but there is not enough concrete evidence to treat them as the same bug." - }, - { - "left": "issue:36296", - "right": "issue:43012", - "accept": false, - "reason": "Tensor parallel training bug and bf16 compile warning are unrelated." + "reason": "Different regressions: MacOS bus error on CLIP vs tokenizer loading regression." } ] }, @@ -13140,59 +12958,71 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:31795", "issue:33453", - "issue:34689", - "issue:37428", + "issue:36296", + "issue:38617", + "issue:41628", "issue:41669", "issue:42371", + "issue:42548", "issue:42890", + "issue:43012", "issue:43352", "issue:43408", - "issue:43502", - "issue:43519", + "issue:43526", "issue:43576", - "issue:43606", "issue:43673", - "issue:43704", - "issue:43723", - "issue:43827", + "issue:43901", "issue:43976", "issue:44038", "issue:44077", "issue:44246", "issue:44261", - "issue:44263", "issue:44273", "issue:44295", "issue:44297", "issue:44315", "issue:44351", + "issue:44355", "issue:44360", "issue:44393", "issue:44485", "issue:44492", "issue:44556", "issue:44623", - "issue:44655", "issue:44704", "issue:44829", "issue:44861", "issue:44908", "issue:44945", + "issue:44961", + "issue:45003", "issue:45071", "issue:45092", "issue:45125", "issue:45230", - "issue:45310", "issue:45341", "issue:45468", - "issue:45478" + "issue:45478", + "issue:45542" ], "soft_pairs": [ + "issue:44961|issue:45468", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:41628|issue:42548", + "issue:36296|issue:43901", + "issue:44485|issue:44961", + "issue:36296|issue:43526", + "issue:36296|issue:43012", + "issue:44273|issue:45003", + "issue:41628|issue:44355", "issue:44261|issue:44360", + "issue:44273|issue:45542", "issue:44623|issue:45230", "issue:43576|issue:44556", "issue:41669|issue:43673", @@ -13217,367 +13047,246 @@ "issue:44315|issue:44829", "issue:44393|issue:45478", "issue:44077|issue:44861", - "issue:31795|issue:33453", - "issue:43827|issue:44908", - "issue:41669|issue:44908", - "issue:43723|issue:44393", - "issue:44655|issue:44861", - "issue:43576|issue:44273", - "issue:44360|issue:45468", - "issue:45230|issue:45310", - "issue:44273|issue:44908", - "issue:41669|issue:43704", - "issue:43519|issue:44485", - "issue:43519|issue:44908", - "issue:31795|issue:34689", - "issue:31795|issue:37428", - "issue:44315|issue:45092", - "issue:44273|issue:45071", - "issue:43704|issue:44485", - "issue:44485|issue:45468", - "issue:42371|issue:43704", - "issue:43502|issue:43519", - "issue:43519|issue:44623", - "issue:43519|issue:43704", - "issue:44263|issue:45468", - "issue:43606|issue:45468", - "issue:44261|issue:45468", - "issue:42890|issue:43704" + "issue:31795|issue:33453" ], - "prepared_review_unit_hash": "29de62433a586a9a9e696284bee6ba78242b251b0d8d97d00d6763e590afca6d", + "prepared_review_unit_hash": "ae419bdddcfe4001e061f812d8f51e72be5ffe7a7fd5aa86252487dc5e24613b", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 51, - "serialized_chars": 42379, - "estimated_input_tokens": 10595, - "estimated_eval_tokens": 21446 + "soft_pair_count": 38, + "serialized_chars": 38384, + "estimated_input_tokens": 9596, + "estimated_eval_tokens": 19448 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab-bag of unrelated Transformers issues; the only loose common thread is v5-era loading/regression reports. Issue 44556 is the closest representative, but none of the soft pairs look like true duplicates.", - "confidence": 0.71, - "canonical_issue_reason": "44556 is the most central, concrete v5 regression report in the set: a clear checkpoint reload failure after upgrading, which best represents the broad loading-breakage thread seen here.", + "summary": "This cluster is highly heterogeneous: it mixes import/export regressions, lazy-loading/performance problems, model-specific bugs, docs issues, and unrelated training/runtime errors. None of the soft pairs look like the same concrete bug or change.", + "confidence": 0.91, + "canonical_issue_reason": "No single issue is representative; the items do not form one duplicate set and span unrelated subsystems and failure modes.", "canonical_pr_reason": null, - "best_issue_reason": "44556 is the best umbrella issue among these because it is actionable, user-facing, and broadly aligned with the other v5 loading-related reports, even though the set is not a tight duplicate cluster.", + "best_issue_reason": "No issue is a good global canonical for this cluster because the themes are too diverse and the pairwise similarities are mostly superficial.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:44261", - "right": "issue:44360", + "left": "issue:44961", + "right": "issue:45468", "accept": false, - "reason": "Different model components and failure modes: RMS norm precision/config vs DSA ReLU behavior." + "reason": "Unrelated titles: a nonsensical issue title vs a Gemma-4 audio positional-encoding bug." }, { - "left": "issue:44623", - "right": "issue:45230", + "left": "issue:31795", + "right": "issue:38617", "accept": false, - "reason": "45230 is too generic to establish the same save_pretrained file-loss bug." + "reason": "Documentation confusion about forward args vs an import error for a missing symbol." }, { "left": "issue:43576", - "right": "issue:44556", + "right": "issue:44351", "accept": false, - "reason": "Both mention v5 breakage, but one is an env command issue and the other is checkpoint reload incompatibility." + "reason": "Both involve v5 imports, but one is a broken env command and the other is a missing cache class export." }, { - "left": "issue:41669", - "right": "issue:43673", + "left": "issue:44908", + "right": "issue:44961", "accept": false, - "reason": "Import-time slowdown from star imports is unrelated to missing GenerationMixin cache in chunked prefill." + "reason": "Completely unrelated: scheduler kwargs vs an unrelated placeholder issue title." }, { - "left": "issue:44393", - "right": "issue:45071", + "left": "issue:41628", + "right": "issue:42548", "accept": false, - "reason": "Qwen3-VL bbox output bug is unrelated to PretrainedConfig type checking." + "reason": "Both are import errors, but for different top-level symbols and likely different missing exports." }, { - "left": "issue:44038", - "right": "issue:45125", + "left": "issue:36296", + "right": "issue:43901", "accept": false, - "reason": "Same model family, but different bugs: general Qwen3-VL-Moe breakage vs missing tensor-parallel plan." + "reason": "Tensor-parallel training bug vs pipeline docs mismatch; different subsystem and failure mode." }, { - "left": "issue:44556", - "right": "issue:45092", + "left": "issue:44485", + "right": "issue:44961", "accept": false, - "reason": "Both are upgrade/loading regressions, but they affect different model paths and different initialization mechanisms." + "reason": "RoPE implementation discussion is unrelated to the placeholder issue." }, { - "left": "issue:42371", - "right": "issue:44492", + "left": "issue:36296", + "right": "issue:43526", "accept": false, - "reason": "TF32 API behavior and a cache-strategy typo are unrelated." + "reason": "Tensor parallelism bug vs BeitImageProcessorFast label-reduction bug." }, { - "left": "issue:42890", - "right": "issue:44908", - "accept": false, - "reason": "A flaky integration test and an inverse_sqrt scheduler kwargs bug are unrelated." - }, - { - "left": "issue:44704", - "right": "issue:44908", - "accept": false, - "reason": "Processor kwarg forwarding and scheduler kwargs handling are different code paths." - }, - { - "left": "issue:44246", - "right": "issue:44273", - "accept": false, - "reason": "Both are vaguely import/lazy-loading related, but not clearly the same underlying defect." - }, - { - "left": "issue:43408", - "right": "issue:44315", - "accept": false, - "reason": "Model-type warning and Liger Kernel application are unrelated." - }, - { - "left": "issue:44297", - "right": "issue:45230", + "left": "issue:36296", + "right": "issue:43012", "accept": false, - "reason": "Tokenizer save metadata mismatch is not supported by the generic bug report." + "reason": "Training parallelism bug vs bfloat16 compilation warning; no shared code-path." }, { - "left": "issue:43576", - "right": "issue:44908", + "left": "issue:44273", + "right": "issue:45003", "accept": false, - "reason": "Env command regression and scheduler kwarg propagation are unrelated." + "reason": "Both mention loading/import behavior, but one is a vague lazy-loading failure and the other is a specific sys.modules access bug; not clearly the same defect." }, { - "left": "issue:43352", - "right": "issue:44393", + "left": "issue:41628", + "right": "issue:44355", "accept": false, - "reason": "Flash Attention support gating and Qwen3-VL bbox output issues are unrelated." + "reason": "Importing a symbol from transformers vs errors in compiled Python files; unrelated." }, { - "left": "issue:44908", - "right": "issue:45071", + "left": "issue:44261", + "right": "issue:44360", "accept": false, - "reason": "Scheduler kwargs bug and PretrainedConfig type checking are unrelated." + "reason": "MLA RMS norm epsilon precision issue vs a DSA indexer ReLU omission." }, { - "left": "issue:43976", - "right": "issue:44393", + "left": "issue:44273", + "right": "issue:45542", "accept": false, - "reason": "Python version compatibility and Qwen3-VL bounding-box output are unrelated." + "reason": "Lazy-loading problem vs undefined TF backend when only tensorboard is installed." }, { - "left": "issue:44295", + "left": "issue:44623", "right": "issue:45230", "accept": false, - "reason": "Position_ids buffer access error is not the same as a generic bug report." - }, - { - "left": "issue:41669", - "right": "issue:44351", - "accept": false, - "reason": "Import-star performance regression and HybridCache import failure are different issues." - }, - { - "left": "issue:41669", - "right": "issue:45341", - "accept": false, - "reason": "Import-time blowup and a testing_utils bug are unrelated." + "reason": "Processor save_pretrained missing files vs a generic bug report with no concrete overlap." }, { - "left": "issue:44492", - "right": "issue:44945", + "left": "issue:43576", + "right": "issue:44556", "accept": false, - "reason": "Cache-strategy typo and pipeline-parallelism output corruption are unrelated." + "reason": "v5 env-command breakage vs checkpoint reload incompatibility; similar release era, but different concrete failures." }, { "left": "issue:41669", - "right": "issue:44492", - "accept": false, - "reason": "Import-star usage and cache strategy naming are not the same bug." - }, - { - "left": "issue:44315", - "right": "issue:44829", + "right": "issue:43673", "accept": false, - "reason": "Model_init not applying Liger Kernel is unrelated to flash_attention_3 training collapse." + "reason": "Import-time slowdown from model imports vs missing GenerationMixin cache in chunked prefill." }, { "left": "issue:44393", - "right": "issue:45478", + "right": "issue:45071", "accept": false, - "reason": "Different Qwen issues with different failure modes and code paths." + "reason": "Qwen3-VL bbox output issue vs PretrainedConfig type-checking regression; different code paths." }, { - "left": "issue:44077", - "right": "issue:44861", + "left": "issue:44038", + "right": "issue:45125", "accept": false, - "reason": "Optional post_init policy and tied-weights key handling are different problems." + "reason": "Both mention Qwen3-Moe, but one is a general v5/Qwen3-VL-Moe bug and the other is a missing tensor-parallel plan for Qwen3_5Moe." }, { - "left": "issue:31795", - "right": "issue:33453", + "left": "issue:44556", + "right": "issue:45092", "accept": false, - "reason": "Documentation confusion is unrelated to tokenizer loading regression." + "reason": "Both are checkpoint/loading compatibility issues, but one is generic version upgrade reload failure and the other is remote-code/meta-init incompatibility; too broad to call duplicates." }, { - "left": "issue:43827", - "right": "issue:44908", + "left": "issue:42371", + "right": "issue:44492", "accept": false, - "reason": "Pipeline docs references and inverse_sqrt scheduler kwargs are unrelated." + "reason": "TF32 API guidance vs a typo in cache strategies." }, { - "left": "issue:41669", + "left": "issue:42890", "right": "issue:44908", "accept": false, - "reason": "Import performance regression and scheduler configuration bug are unrelated." - }, - { - "left": "issue:43723", - "right": "issue:44393", - "accept": false, - "reason": "Tokenizer loading in v5 and Qwen3-VL bbox output are unrelated." + "reason": "Test flakiness from missing set_seed vs scheduler kwargs handling." }, { - "left": "issue:44655", - "right": "issue:44861", + "left": "issue:44704", + "right": "issue:44908", "accept": false, - "reason": "Saving Pipeline objects and tied-weight key crashes are unrelated." + "reason": "AutoProcessor cached_file kwargs bug vs scheduler kwargs bug; different APIs and behavior." }, { - "left": "issue:43576", + "left": "issue:44246", "right": "issue:44273", "accept": false, - "reason": "Env command breakage and lazy-loading failure are not the same issue." - }, - { - "left": "issue:44360", - "right": "issue:45468", - "accept": false, - "reason": "DSA ReLU discussion and Gemma4AudioRelPositionalEncoding are unrelated." - }, - { - "left": "issue:45230", - "right": "issue:45310", - "accept": false, - "reason": "Generic bug report does not establish the same Qwen3.5-Moe from_pretrained regression." + "reason": "Import performance complaint vs lazy-loading failure; related theme, not the same bug." }, { - "left": "issue:44273", - "right": "issue:44908", + "left": "issue:43408", + "right": "issue:44315", "accept": false, - "reason": "Lazy loading and scheduler kwargs are unrelated." + "reason": "Model-type warning in SAM3 vs Liger Kernel not applied with model_init." }, { - "left": "issue:41669", - "right": "issue:43704", + "left": "issue:44297", + "right": "issue:45230", "accept": false, - "reason": "Import-time regression and VRAM leak in dataloader threads are unrelated." + "reason": "Tokenizer save_pretrained mismatch vs generic bug report." }, { - "left": "issue:43519", - "right": "issue:44485", + "left": "issue:43352", + "right": "issue:44393", "accept": false, - "reason": "Timestamp calculation in Qwen3VL is unrelated to GLM-5 RoPE implementation." + "reason": "Flash Attention 2 unsupported in Nemotron vs Qwen3-VL bbox hallucination/error." }, { - "left": "issue:31795", - "right": "issue:34689", + "left": "issue:43976", + "right": "issue:44393", "accept": false, - "reason": "Documentation confusion and Llama 3.2 model loading failure are unrelated." + "reason": "Python version compatibility issue vs a vision-language output bug." }, { - "left": "issue:31795", - "right": "issue:37428", + "left": "issue:44295", + "right": "issue:45230", "accept": false, - "reason": "Documentation issue and flash attention import error are unrelated." + "reason": "position_ids buffer access error vs generic bug report." }, { "left": "issue:44315", - "right": "issue:45092", - "accept": false, - "reason": "Both touch model creation/loading, but they are different bugs with different root causes." - }, - { - "left": "issue:44273", - "right": "issue:45071", - "accept": false, - "reason": "Lazy loading issue and PretrainedConfig type checking are unrelated." - }, - { - "left": "issue:43704", - "right": "issue:44485", - "accept": false, - "reason": "VRAM leak in threaded dataloading and GLM-5 RoPE bug are unrelated." - }, - { - "left": "issue:42371", - "right": "issue:43704", - "accept": false, - "reason": "TF32 behavior and VRAM leak are unrelated." - }, - { - "left": "issue:43502", - "right": "issue:43519", + "right": "issue:44829", "accept": false, - "reason": "Offline-loading API requests and timestamp math in Qwen3VL are unrelated." + "reason": "Liger Kernel application problem vs degenerate training with flash_attention_3; distinct training issues." }, { - "left": "issue:31795", - "right": "issue:34689", + "left": "issue:44351", + "right": "issue:44861", "accept": false, - "reason": "Documentation confusion and model loading regression are unrelated." + "reason": "Missing HybridCache export vs tied-weights key AttributeError." }, { - "left": "issue:31795", - "right": "issue:37428", + "left": "issue:44360", + "right": "issue:45003", "accept": false, - "reason": "Documentation confusion and flash-attention import failure are unrelated." + "reason": "DSA indexer implementation issue vs sys.modules access in modeling_utils." }, { - "left": "issue:44315", - "right": "issue:45092", + "left": "issue:44945", + "right": "issue:44961", "accept": false, - "reason": "Model_init/Liger kernel behavior is unrelated to remote-code checkpoint meta-init incompatibility." + "reason": "Pipeline parallelism output corruption vs placeholder issue title." }, { - "left": "issue:44273", + "left": "issue:45003", "right": "issue:45071", "accept": false, - "reason": "Lazy loading and type checking are unrelated." - }, - { - "left": "issue:43704", - "right": "issue:44485", - "accept": false, - "reason": "Different subsystems and failure modes." + "reason": "Unsafe sys.modules access vs PretrainedConfig typing regression." }, { - "left": "issue:44485", - "right": "issue:45468", - "accept": false, - "reason": "GLM-5 RoPE implementation and Gemma-4 audio positional encoding are unrelated." - }, - { - "left": "issue:42371", - "right": "issue:43704", + "left": "issue:41669", + "right": "issue:44492", "accept": false, - "reason": "TF32 controls and VRAM leakage are unrelated." + "reason": "Import-star performance regression vs a typo in cache strategy docs/code." }, { - "left": "issue:43502", - "right": "issue:43519", + "left": "issue:31795", + "right": "issue:33453", "accept": false, - "reason": "Local-files-only network calls and timestamp calculation are unrelated." + "reason": "Docs confusion in model.forward vs tokenizer loading regression; no shared bug." } ] }, @@ -13599,32 +13308,79 @@ "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", - "cluster_id": "cluster-15354-362", + "cluster_id": "cluster-15354-364", "nodes": [ "issue:31515", "issue:31795", "issue:33453", + "issue:34689", "issue:36296", "issue:36683", + "issue:37428", "issue:38617", + "issue:41669", + "issue:42371", + "issue:42890", "issue:42913", "issue:43066", "issue:43298", + "issue:43502", + "issue:43519", "issue:43525", "issue:43572", + "issue:43576", + "issue:43606", "issue:43618", + "issue:43704", + "issue:43723", + "issue:43827", "issue:44230", + "issue:44261", + "issue:44263", + "issue:44273", + "issue:44315", + "issue:44360", + "issue:44393", + "issue:44485", "issue:44492", "issue:44556", "issue:44623", + "issue:44655", "issue:44861", "issue:44908", "issue:44998", + "issue:45092", + "issue:45230", "issue:45290", "issue:45310", - "issue:45341" + "issue:45341", + "issue:45468" ], "soft_pairs": [ + "issue:43827|issue:44908", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:44655|issue:44861", + "issue:43576|issue:44273", + "issue:44360|issue:45468", + "issue:45230|issue:45310", + "issue:44273|issue:44908", + "issue:41669|issue:43704", + "issue:43519|issue:44485", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31795|issue:37428", + "issue:44315|issue:45092", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704", + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:42890|issue:43704", "issue:42913|issue:43066", "issue:44908|issue:44998", "issue:44861|issue:45290", @@ -13643,136 +13399,268 @@ "issue:31515|issue:36683", "issue:43298|issue:44230" ], - "prepared_review_unit_hash": "7f84563d201133250993ddd8526ff070b7e178d53fc9c47800acfb5e55252eb8", + "prepared_review_unit_hash": "91b8a4b00e517b7f2d11a8fcd7dd8383ff091f008d7a742f64cbed4a58c7a950", "component_budget": { - "node_count": 362, - "item_count": 362, - "soft_pair_count": 1352, - "serialized_chars": 697653, - "estimated_input_tokens": 174414, - "estimated_eval_tokens": 349084 + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 }, "budget": { - "node_count": 22, - "item_count": 22, - "soft_pair_count": 17, - "serialized_chars": 17371, - "estimated_input_tokens": 4343, - "estimated_eval_tokens": 8942 + "node_count": 45, + "item_count": 45, + "soft_pair_count": 41, + "serialized_chars": 37507, + "estimated_input_tokens": 9377, + "estimated_eval_tokens": 19010 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is heterogeneous and mostly not duplicate-worthy: it mixes checkpoint/tokenizer loading regressions, config attribute errors, scheduler bugs, chat-template crashes, docs, and unrelated issues. No soft pair looks like the same concrete bug.", - "confidence": 0.78, - "canonical_issue_reason": null, + "summary": "This cluster is mostly a noisy mix of Transformers v5 loading/tokenizer/config regressions and several unrelated model-specific bugs. There is no strong exact-duplicate pair; the best anchor is a broad checkpoint reload compatibility issue, but confidence is low-moderate.", + "confidence": 0.43, + "canonical_issue_reason": "Issue 44556 is the broadest, clearest representative of the main cluster theme: checkpoint reload incompatibility after upgrading to v5. It is more general than the model-specific loading reports and closer to a canonical tracker issue.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44556 is the clearest, most concrete representative of the loading/checkpoint-regression subset: it describes a reproducible failure reloading v4.57 checkpoints after upgrading to v5.2/v5.3. It is the best anchor if this cluster is grouped around load-time regressions.", + "best_issue_reason": "Issue 44556 is the best single issue to anchor this set because it captures the overarching v4-to-v5 loading regression theme that recurs across several nearby reports, even though the cluster itself is heterogeneous.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Different subsystems: pipeline-removal docs issue vs scheduler keyword handling." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "Import-star performance/cleanup issue is unrelated to inverse_sqrt scheduler kwargs." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading regression and Qwen3-VL bbox output bug are different code paths." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Saving Pipeline objects and tied-weight-key crash are separate save-path bugs." + }, + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Broken env command and lazy loading failure are different features and failure modes." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "Unrelated model internals: DSA indexer ReLU vs Gemma audio positional encoding." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "Generic bug report vs concrete Qwen3.5 MoE from_pretrained error; not the same bug." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading problem is unrelated to lr_scheduler_kwargs handling." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "Model import cleanup and VRAM leak in dataloader threads are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Timestamp calculation in Qwen3VL and GLM-5 RoPE implementation are different model-specific bugs." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Timestamp calculation and scheduler kwargs are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Doc confusion in forward args does not match a model-loading regression." + }, + { + "left": "issue:31795", + "right": "issue:37428", + "accept": false, + "reason": "Documentation issue and flash-attention import error are unrelated." + }, + { + "left": "issue:44315", + "right": "issue:45092", + "accept": false, + "reason": "Both involve model creation/loading, but Liger kernel application and remote-code meta-init incompatibility are distinct code paths." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak in multi-threaded inference is unrelated to RoPE implementation." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "Both mention positional-style internals, but they are different models and different bugs." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 API guidance and VRAM leak are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "Local-files-only network leakage and timestamp math are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Timestamp calculation bug and processor.save_pretrained missing files are different features." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Different model bugs: timestamp math vs VRAM leak." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return handling and Gemma audio encoding are unrelated." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "Missing rms_norm_eps in MLA and Gemma audio positional encoding are different implementation bugs." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch is unrelated to Gemma audio positional encoding." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "Flaky integration test seeding and VRAM leak are unrelated." + }, { "left": "issue:42913", "right": "issue:43066", "accept": false, - "reason": "Both are tokenizer v5 behavior regressions, but one is a broad behavior difference and the other is a specific decoder-type mismatch; they do not read like the same concrete bug." + "reason": "Both are tokenizer-related v5 reports, but they describe different symptoms and likely different fixes." }, { "left": "issue:44908", "right": "issue:44998", "accept": false, - "reason": "Scheduler LR kwargs bug versus an unrelated 'Unemployment' issue; no substantive overlap." + "reason": "Scheduler kwargs bug and unrelated placeholder issue." }, { "left": "issue:44861", "right": "issue:45290", "accept": false, - "reason": "Different code paths: tied-weights key handling crash versus chat template tokenization with tool calls." + "reason": "Tied-weight-key crash and chat template tool-call crash are distinct code paths." }, { "left": "issue:44998", "right": "issue:45341", "accept": false, - "reason": "No relation; one is unrelated/spam-like, the other is a testing_utils bug." + "reason": "Placeholder unemployment issue is unrelated to testing_utils." }, { "left": "issue:44492", "right": "issue:44998", "accept": false, - "reason": "A typo in cache strategy docs versus an unrelated issue title." + "reason": "Cache-strategy typo and unrelated placeholder issue." }, { "left": "issue:43618", "right": "issue:44998", "accept": false, - "reason": "CLIP output attentions regression is unrelated to the other item." + "reason": "CLIPOutput attentions regression is unrelated to the placeholder issue." }, { "left": "issue:44556", "right": "issue:44998", "accept": false, - "reason": "Checkpoint reload regression versus an unrelated issue; not the same bug." - }, - { - "left": "issue:43525", - "right": "issue:44861", - "accept": false, - "reason": "Missing pad_token_id in Llama4Config is unrelated to _get_tied_weight_keys crashing on a list." + "reason": "Checkpoint reload regression and unrelated placeholder issue." }, { - "left": "issue:43572", - "right": "issue:44861", + "left": "issue:44230", + "right": "issue:44623", "accept": false, - "reason": "StableLmConfig pad_token_idx regression and tied-weight key crash are different problems." + "reason": "FP8 inference support request and processor.save_pretrained missing files are unrelated." }, { "left": "issue:31515", "right": "issue:38617", "accept": false, - "reason": "Checkpoint loading slowness and an ImportError from configuration_utils are different failure modes and code paths." + "reason": "Checkpoint loading slowness and missing layer_type_validation import are different failures." }, { "left": "issue:44230", - "right": "issue:44623", + "right": "issue:45310", "accept": false, - "reason": "FP8 audio/VL inference support versus processor.save_pretrained missing files; not the same underlying bug." + "reason": "FP8 support request is unrelated to Qwen3.5 MoE from_pretrained error." }, { "left": "issue:31515", "right": "issue:36296", "accept": false, - "reason": "Slow from_pretrained checkpoint loading is unrelated to tensor-parallel training." + "reason": "Slow from_pretrained loading and tensor-parallel training bug are unrelated." }, { "left": "issue:31515", "right": "issue:31795", "accept": false, - "reason": "Loading performance issue versus documentation confusion in model.forward." - }, - { - "left": "issue:44230", - "right": "issue:45310", - "accept": false, - "reason": "Both mention Qwen and from_pretrained/inference, but one is FP8 support and the other is a concrete from_pretrained error; too different to merge as one bug." + "reason": "Checkpoint load performance and forward-argument docs confusion are unrelated." }, { "left": "issue:31515", "right": "issue:33453", "accept": false, - "reason": "Checkpoint loading slowness and tokenizer loading regression are adjacent but distinct bugs." + "reason": "Slow loading and tokenizer-loading regression are different symptoms, not the same bug." }, { "left": "issue:31515", "right": "issue:36683", "accept": false, - "reason": "Loading slowdown versus Gemma3Config missing vocab_size; different root causes and symptoms." + "reason": "Slow checkpoint loading and Gemma3Config missing vocab_size are unrelated." }, { "left": "issue:43298", "right": "issue:44230", "accept": false, - "reason": "Continuous batching audio-model support and FP8 Qwen inference support are separate feature/support issues, not the same bug." + "reason": "Audio-model batching support and FP8 inference support are separate requests." } ] }, @@ -13802,12 +13690,12 @@ "soft_pairs": [ "issue:26350|issue:33260" ], - "prepared_review_unit_hash": "b261b980791df93c5240915135806a4ed4cac2be50ace80618b515c280a7e73d", + "prepared_review_unit_hash": "614238314873a4bb7aa5a575268b45ebd5ff33c459a8a435a18facfb0a911fd9", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1590, + "serialized_chars": 1589, "estimated_input_tokens": 398, "estimated_eval_tokens": 1052 }, @@ -13815,7 +13703,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1590, + "serialized_chars": 1589, "estimated_input_tokens": 398, "estimated_eval_tokens": 1052 }, @@ -13824,18 +13712,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two separate community contribution requests for different features: one for Flash Attention 2 support and one for GGUF support. They share a similar template, but not the same underlying change or bug.", - "confidence": 0.96, - "canonical_issue_reason": "No canonical issue: the two issues request different capabilities in different code paths.", + "summary": "These are both community-contribution issues, but they target different features (Flash Attention 2 vs GGUF) and different code paths, so they should not be deduplicated.", + "confidence": 0.97, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Neither issue is a duplicate of the other; both should remain separate feature-tracking issues.", + "best_issue_reason": "No single issue is a canonical representative here; the two issues are unrelated feature requests despite similar wording.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:26350", "right": "issue:33260", "accept": false, - "reason": "Same broad template, but different requested features and likely different implementation areas; not the same underlying change." + "reason": "Different requested features with different underlying implementations; the shared phrasing about supporting more architectures is too generic to imply the same bug/change." } ] }, @@ -13872,12 +13760,12 @@ "issue:43834|issue:44365", "issue:44365|issue:44370" ], - "prepared_review_unit_hash": "a197ca558079d48b1d79a161254bffa075baa3dd91ec0d4bbb112254b7ebe100", + "prepared_review_unit_hash": "adc76524be98365e6a2c4a601a5dc3947fc7c5ea63d96672bffec0a5377b18cf", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4454, + "serialized_chars": 4455, "estimated_input_tokens": 1114, "estimated_eval_tokens": 2484 }, @@ -13885,7 +13773,7 @@ "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4454, + "serialized_chars": 4455, "estimated_input_tokens": 1114, "estimated_eval_tokens": 2484 }, @@ -13894,55 +13782,58 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is mostly about docs translation requests, but only one issue is concrete (Turkish). The others use a generic placeholder title and are not clearly the same underlying request, so I would not merge them as duplicates based on the available evidence.", - "confidence": 0.67, - "canonical_issue_reason": "issue:27088 is the only specific, non-templated request ([i18n-TR] Translating docs to Turkish). It is the clearest representative of this cluster from the available metadata.", + "summary": "The cluster is dominated by near-duplicate i18n translation-request issues. The three placeholder-titled issues look effectively identical; the Turkish-specific issue is related thematically but not the same concrete request.", + "confidence": 0.91, + "canonical_issue_reason": "Issue #43834 is the earliest of the near-identical placeholder issues, with the same title/body pattern as the others, so it is the cleanest representative of the duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "issue:27088 is the most actionable and concrete issue; the others look like generic/template placeholders rather than a clearly identified duplicate request.", + "best_issue_reason": "#43834 is the best global issue candidate because it most clearly represents the repeated template issue and is earlier than the other near-identical copies.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:27088", "right": "issue:44370", "accept": false, - "reason": "Both are translation-request issues, but one is specifically Turkish while the other is only a generic placeholder title; not enough evidence they are the same request." + "reason": "Turkish-specific translation request vs a generic placeholder translation issue; same theme, but not the same concrete request." }, { "left": "issue:27088", "right": "issue:44365", "accept": false, - "reason": "The titles are not concrete matches: Turkish vs a placeholder language template. Similar topic, but not clearly the same underlying issue." + "reason": "Different concrete target language/request: Turkish versus a generic placeholder template, so this is not a true duplicate." }, { "left": "issue:27088", "right": "issue:43834", "accept": false, - "reason": "This is only a broad similarity in the translation-docs theme; the specific language differs/ is unspecified, so I would not treat them as duplicates." + "reason": "The Turkish issue is more specific than the generic placeholder issue; they match on broad topic only, not the same underlying request." }, { "left": "issue:43834", "right": "issue:44370", - "accept": false, - "reason": "Both appear to be generic template issues, but there is no concrete language or target to show they are the same underlying request." + "accept": true, + "reason": "These two look like the same template issue: same placeholder title and near-identical body, with no evidence of a meaningful difference." }, { "left": "issue:43834", "right": "issue:44365", - "accept": false, - "reason": "Identical placeholder titles alone are too weak to confirm the same bug/change; could easily be separate translation requests." + "accept": true, + "reason": "Near-identical placeholder translation issues with no visible substantive distinction." }, { "left": "issue:44365", "right": "issue:44370", - "accept": false, - "reason": "Same placeholder pattern, but no concrete language or unique target is available, so duplicate status is not established." + "accept": true, + "reason": "Same placeholder title/body pattern and extremely high similarity indicate the same underlying issue." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and the soft-edge verdicts are conservative. The three placeholder-titled issues are supported as near-identical by very high similarity scores and matching titles, and selecting the earliest as canonical is reasonable. The Turkish-specific issue is treated as related but not identical, which is appropriately cautious given the visible title difference and lack of direct evidence tying it to the same concrete request." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -14000,7 +13891,7 @@ "issue:43837|issue:45027", "issue:43837|issue:45177" ], - "prepared_review_unit_hash": "0d89d398e91be4ec8a841f1d6677a9b28705ebce581c6a14ae11d8391df49c35", + "prepared_review_unit_hash": "d17088d4f578a396881eee9dc4a2740d788586f17c38678bff8400e5529ffa78", "component_budget": { "node_count": 13, "item_count": 13, @@ -14022,162 +13913,162 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is a collection of model-request issues across different families and modalities (BERT variants, Qwen audio, OCR, vision, sparse transformers, etc.). They read as related by topic only, not as true duplicates, so I rejected all soft edges.", - "confidence": 0.33, - "canonical_issue_reason": "issue #37015 is the most generic and active model-add request in the set, so it is the best single issue to stand in for the cluster if one must be chosen.", + "summary": "These are not duplicates; they are separate model-request issues spanning different model families and even different modalities (text, audio, OCR, vision).", + "confidence": 0.98, + "canonical_issue_reason": "No single issue cleanly represents the whole cluster as a duplicate set. If one must be chosen as the broadest umbrella, issue 42738 is the most generic, but it still only covers a subset of the requests.", "canonical_pr_reason": null, - "best_issue_reason": "#37015 is a straightforward, broadly framed model addition request with the highest discussion activity among the issues, making it the most representative candidate here.", + "best_issue_reason": "Issue 42738 is the broadest and least specific title ('BERT-like models with RoPE'), so it is the closest to an umbrella representative, though the cluster is too heterogeneous for true deduplication.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36817", "right": "issue:41211", "accept": false, - "reason": "EuroBert config addition and DEIMv2 are different model families with no shared code-path or concrete change." + "reason": "Different model requests: EuroBert vs DEIMv2; same broad area, but not the same underlying change." }, { "left": "issue:36817", "right": "issue:43671", "accept": false, - "reason": "A BERT config request and Qwen3-TTS support target unrelated model types and tasks." + "reason": "EuroBert config support vs Qwen3-TTS support are unrelated models and modalities." }, { "left": "issue:37015", "right": "issue:43837", "accept": false, - "reason": "NeoBERT and Qwen3-ASR are unrelated model requests; same broad 'add model' theme is not enough." + "reason": "NeoBERT and Qwen3-ASR are different model additions with different code paths." }, { "left": "issue:37015", "right": "issue:44121", "accept": false, - "reason": "NeoBERT and the weight-sparse transformer request describe different architectures and changes." + "reason": "NeoBERT vs OpenAI weight-sparse transformer are distinct architectures and requests." }, { "left": "issue:37015", "right": "issue:43909", "accept": false, - "reason": "NeoBERT and LFM2.5 Audio are different model families and modalities." + "reason": "NeoBERT and LFM2.5 Audio 1.5B are not the same model or feature." }, { "left": "issue:37015", "right": "issue:42503", "accept": false, - "reason": "Both are BERT-flavored model requests, but they point to different concrete model families and additions." + "reason": "NeoBERT and ModernVBERT are separate model requests; related theme, not duplicate." }, { "left": "issue:37015", "right": "issue:45027", "accept": false, - "reason": "NeoBERT and Voxtral-4B-TTS-2603 are unrelated models with different integration paths." + "reason": "NeoBERT vs Voxtral TTS support are different models and different functionality." }, { "left": "issue:41211", "right": "issue:43671", "accept": false, - "reason": "DEIMv2 and Qwen3-TTS are unrelated requests with no evidence of the same underlying fix." + "reason": "DEIMv2 and Qwen3-TTS are unrelated model additions." }, { "left": "issue:42503", "right": "issue:43837", "accept": false, - "reason": "ModernVBERT models and Qwen3-ASR are different model-support requests, not the same bug or change." + "reason": "ModernVBERT models and Qwen3-ASR support are distinct requests, not one concrete fix." }, { "left": "issue:42503", "right": "issue:43671", "accept": false, - "reason": "ModernVBERT models and Qwen3-TTS concern different model families and modalities." + "reason": "ModernVBERT vs Qwen3-TTS spans different model families and modality." }, { "left": "issue:42503", "right": "issue:42738", "accept": false, - "reason": "These are both BERT-like requests, but they are for different specific model sets and not one mergeable change." + "reason": "Both mention BERT-like ideas, but ModernVBERT models and generic RoPE BERT-like support are not the same request." }, { "left": "issue:42503", "right": "issue:45027", "accept": false, - "reason": "ModernVBERT models and Voxtral TTS support are unrelated additions." + "reason": "ModernVBERT models and Voxtral TTS support are unrelated." }, { "left": "issue:42738", "right": "issue:45027", "accept": false, - "reason": "BERT-like RoPE support and Voxtral TTS are entirely different model classes and code paths." + "reason": "BERT-like RoPE models vs Voxtral TTS support are different classes of model changes." }, { "left": "issue:43671", "right": "issue:43837", "accept": false, - "reason": "Qwen3-TTS and Qwen3-ASR are different tasks; similar naming is not enough to merge them." + "reason": "Both are Qwen3 proposals, but TTS and ASR are different modalities and code paths." }, { "left": "issue:43671", "right": "issue:45027", "accept": false, - "reason": "Both mention TTS, but they request support for different models and likely different integration work." + "reason": "Qwen3-TTS and Voxtral TTS are different model integrations, not one duplicate request." }, { "left": "issue:43671", "right": "issue:45177", "accept": false, - "reason": "Qwen3-TTS and DeepSeek-OCR2 are unrelated modalities and model families." + "reason": "Audio TTS support and DeepSeek-OCR2 are unrelated model requests." }, { "left": "issue:43671", "right": "issue:44121", "accept": false, - "reason": "Qwen3-TTS support and a sparse-transformer request are unrelated changes." + "reason": "Qwen3-TTS and weight-sparse transformer support are different changes." }, { "left": "issue:43671", "right": "issue:43909", "accept": false, - "reason": "Qwen3-TTS and LFM2.5 Audio are separate audio-model requests, not the same concrete change." + "reason": "Qwen3-TTS and LFM2.5 Audio are different audio models, not the same bug/change." }, { "left": "issue:43837", "right": "issue:43909", "accept": false, - "reason": "Qwen3-ASR and LFM2.5 Audio are different audio model requests with no shared underlying bug." + "reason": "Qwen3-ASR and LFM2.5 Audio are different speech-model requests." }, { "left": "issue:43837", "right": "issue:45322", "accept": false, - "reason": "Qwen3-ASR support and EUPE are unrelated model requests." + "reason": "Qwen3-ASR support and EUPE encoder support are unrelated model additions." }, { "left": "issue:43837", "right": "issue:44121", "accept": false, - "reason": "Qwen3-ASR support and weight-sparse transformer support are different changes." + "reason": "ASR support and weight-sparse transformer support are not the same code-path problem." }, { "left": "issue:43909", "right": "issue:44205", "accept": false, - "reason": "LFM2.5 Audio and SAM3-LiteText are unrelated model additions." + "reason": "LFM2.5 Audio and SAM3-LiteText are different model families and modalities." }, { "left": "issue:44121", "right": "issue:45322", "accept": false, - "reason": "Weight-sparse transformer support and EUPE are unrelated requests with different implementation needs." + "reason": "Weight-sparse transformer support and EUPE encoder support are separate architecture requests." }, { "left": "issue:43837", "right": "issue:45027", "accept": false, - "reason": "Qwen3-ASR and Voxtral TTS are different models and different tasks; not the same change." + "reason": "Qwen3-ASR and Voxtral-4B-TTS-2603 are different speech-model integrations." }, { "left": "issue:43837", "right": "issue:45177", "accept": false, - "reason": "Qwen3-ASR and DeepSeek-OCR2 are unrelated model support requests." + "reason": "ASR support and DeepSeek-OCR2 are unrelated model requests." } ] }, @@ -14218,82 +14109,82 @@ "issue:43668|issue:44031", "issue:43668|issue:43813" ], - "prepared_review_unit_hash": "28bf702e79ed9fe3ed45f271cbda6075d21af087e7d7aace827a6e3c5c2e7e29", + "prepared_review_unit_hash": "d01e8031a2f96939f16383ae9fec1210926b19760a394a383cfb608fb6d94ffe", "component_budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6293, - "estimated_input_tokens": 1574, - "estimated_eval_tokens": 3404 + "serialized_chars": 6291, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 }, "budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6293, - "estimated_input_tokens": 1574, - "estimated_eval_tokens": 3404 + "serialized_chars": 6291, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These issues are not duplicates: they span unrelated bugs in model normalization, loss handling, config typing, a typo, and a tokenizer warning. The soft similarities are too broad to justify merging any pair.", + "summary": "These issues are only superficially related (mostly around configs, norms, losses, or tokenizer warnings), but they describe different bugs/questions in different code paths. I would not merge any of the soft-edge pairs as duplicates.", "confidence": 0.93, - "canonical_issue_reason": "Issue 42936 is the most concrete runtime bug report in the set and the best single anchor for the cluster, though the overall group is heterogeneous.", + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "42936 is the strongest representative because it describes an actionable behavioral defect, while the others are mostly unrelated type/typo/API issues.", + "best_issue_reason": "None of these is a clear canonical duplicate target; if one must be chosen as the most concrete user-facing bug report, issue 44031 stands out because it describes a specific regression affecting all tokenizers after a version change.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:37934", "right": "issue:43668", "accept": false, - "reason": "Both mention normalization-related terms, but one is about Llama4TextL2Norm behavior and the other is only a ModernBERT config type hint; not the same bug." + "reason": "One asks whether Llama4TextL2Norm should actually be RMSNorm; the other reports an incorrect type hint for ModernBERTConfig.norm_eps. Different models and different defect types." }, { "left": "issue:42936", "right": "issue:43240", "accept": false, - "reason": "Both concern loss-related code paths, but one is a specific Mask2former ignore_value bug and the other is a generic kwargs-passing issue; too different to merge." + "reason": "Mask2Former ignore_value not being used is a model-specific loss-parameter bug; the other is a broader kwargs-forwarding issue in loss calculation. Related area, but not the same bug." }, { "left": "issue:42936", "right": "issue:44031", "accept": false, - "reason": "Mask2former loss handling and tokenizer regex warnings are unrelated subsystems and failure modes." + "reason": "Mask2Former loss handling and tokenizer regex warnings are unrelated code paths and symptoms." }, { "left": "issue:42936", "right": "issue:43813", "accept": false, - "reason": "A missing use of ignore_value is unrelated to a typo in a quantization attribute name." + "reason": "Mask2Former ignore_value behavior and a typo in a quantization attribute name are completely different issues." }, { "left": "issue:42936", "right": "issue:43668", "accept": false, - "reason": "Loss/ignore_value behavior and a config type-hint correction are different kinds of fixes in different code paths." + "reason": "Different model/components and different defect classes: unused ignore_value vs an incorrect type hint." }, { "left": "issue:43240", "right": "issue:43668", "accept": false, - "reason": "Passing kwargs to a loss function and correcting a type hint do not describe the same underlying defect." + "reason": "A generic kwargs-loss-forwarding bug is not the same as a ModernBERTConfig type annotation error." }, { "left": "issue:43668", "right": "issue:44031", "accept": false, - "reason": "A ModernBERT config annotation issue is not the same as a tokenizer regex warning regression." + "reason": "ModernBERTConfig norm_eps typing and tokenizer regex warnings affect different subsystems with different root causes." }, { "left": "issue:43668", "right": "issue:43813", "accept": false, - "reason": "These are both small metadata/typo-like reports, but they refer to different symbols and unrelated fixes." + "reason": "An incorrect type hint and a typo in a quantization operation name are unrelated." } ] }, @@ -14323,7 +14214,7 @@ "soft_pairs": [ "issue:41084|issue:43037" ], - "prepared_review_unit_hash": "a94916a1580e3bffce2688878597e7e941fa642afdf7ac74d4722483a3624e5f", + "prepared_review_unit_hash": "6b41eb3638e1b4e28bb5458b24ab4af0d1c99e63e0b298d439579ad33d321e1b", "component_budget": { "node_count": 2, "item_count": 2, @@ -14345,7 +14236,7 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two issues are not duplicates: one asks for a specific block decoding change, while the other requests DeepSeek v3.2 support. They refer to different features and likely different implementation work.", + "summary": "These two issues are not duplicates: one is a generic request about block decoding, while the other is a model-specific request for DeepSeek v3.2 support.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, @@ -14356,7 +14247,7 @@ "left": "issue:41084", "right": "issue:43037", "accept": false, - "reason": "Low textual overlap and different scopes: block decoding vs model/version support. No clear evidence they describe the same underlying problem or change." + "reason": "Different underlying topics: decoding behavior vs. support for a specific model/version. No indication they refer to the same bug or change." } ] }, @@ -14386,7 +14277,7 @@ "soft_pairs": [ "issue:43598|issue:43600" ], - "prepared_review_unit_hash": "55ec00d000ce6ce2ebdaad80a89f2d8f0316c8b22708dde5239428f73360ee86", + "prepared_review_unit_hash": "dc9e5582b81ded1c85adc936663f831ea11c5a88f8e424f5f149599cf7b596e7", "component_budget": { "node_count": 2, "item_count": 2, @@ -14408,18 +14299,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two issues are both about Trainer customization, but they describe different problems: one is about `remove_unused_column` behavior, the other about FLOPS estimation for custom trainers. They should not be clustered as duplicates.", - "confidence": 0.93, + "summary": "These two issues are superficially related through Trainer/custom trainer behavior, but they describe different underlying problems: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They should not be treated as duplicates.", + "confidence": 0.92, "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No single canonical issue fits: the reports concern different Trainer code paths and different fixes, despite overlapping in the custom-trainer theme.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43598", "right": "issue:43600", "accept": false, - "reason": "Different underlying issues: one targets `remove_unused_column` customizability, the other targets FLOPS estimation for custom trainers. Shared theme is too broad to treat as the same bug." + "reason": "Different bug reports with different goals: one asks to revisit `remove_unused_column` handling, the other asks to account for custom trainers in FLOPS estimation. Shared Trainer context is too broad to merge them as the same issue." } ] }, @@ -14449,12 +14340,12 @@ "soft_pairs": [ "issue:43684|issue:44405" ], - "prepared_review_unit_hash": "48ff572869f39e019d37b15c06d55c31dabd56b559a7769ea59d88c7ad5803ba", + "prepared_review_unit_hash": "6ab6852a2579e0cdbdbd428acca630119553107b42cbaf7989979c93280a98fc", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1612, + "serialized_chars": 1611, "estimated_input_tokens": 403, "estimated_eval_tokens": 1062 }, @@ -14462,7 +14353,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1612, + "serialized_chars": 1611, "estimated_input_tokens": 403, "estimated_eval_tokens": 1062 }, @@ -14471,18 +14362,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related only at a very broad \u201cadd model support\u201d level, but they target different Qwen variants and different auto-model paths, so they are not duplicates.", - "confidence": 0.93, - "canonical_issue_reason": "No single canonical issue for the pair; #43684 is about registering Qwen3-Omni with AutoModel/AutoModelForConditionalGeneration, while #44405 is about adding AutoModelForSequenceClassification support for Qwen3.5.", + "summary": "These are related only by the Qwen naming family, but they request different capabilities: one adds Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration, the other asks for AutoModelForSequenceClassification support for Qwen3.5. They do not look like the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the issues target different model variants and different auto classes, so they are separate feature requests.", "canonical_pr_reason": null, - "best_issue_reason": "#43684 is the broader, more general issue and is the better representative of the cluster, but it should not subsume #44405 as a duplicate.", + "best_issue_reason": "Issue 43684 is the broader model-registration request and is the closest representative of the cluster, but it is not a duplicate of 44405.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43684", "right": "issue:44405", "accept": false, - "reason": "Different concrete requests: Qwen3-Omni registration for base/conditional-generation auto classes vs Qwen3.5 sequence-classification support. Same family/subsystem, but not the same underlying bug or change." + "reason": "Different Qwen variants and different target heads (conditional generation vs sequence classification); not the same underlying bug/change." } ] }, @@ -14519,12 +14410,12 @@ "issue:43845|issue:43859", "issue:43846|issue:43859" ], - "prepared_review_unit_hash": "7afcc1a56f2d36832f40c7c6eadf7d424c27e22ea6eb32118667003734597aad", + "prepared_review_unit_hash": "98aee86260290997919fd829372c8126987b9bca726e983070a6802d80bf190e", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4266, + "serialized_chars": 4265, "estimated_input_tokens": 1067, "estimated_eval_tokens": 2390 }, @@ -14532,7 +14423,7 @@ "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4266, + "serialized_chars": 4265, "estimated_input_tokens": 1067, "estimated_eval_tokens": 2390 }, @@ -14541,58 +14432,55 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Issues 43845, 43846, and 43859 all appear to be the same short \"huggingface\" report and are likely duplicates; 43835(\"Fett\") looks unrelated.", - "confidence": 0.95, - "canonical_issue_reason": "43845 is the earliest of the near-identical \"huggingface\" issues and has the same sparse shape as 43846, making it the cleanest representative of that duplicate set.", + "summary": "Three issues share the same very vague title (\u201chuggingface \u201d), but there isn\u2019t enough concrete content to confirm they are the same underlying bug. The lone outlier (\u201cFett\u201d) looks unrelated. No PRs are present.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43845 is the best canonical issue by default: it is one of the duplicate-looking \u201chuggingface \u201d reports, is the earliest among them, and has a fuller body than 43859.", "canonical_pr_reason": null, - "best_issue_reason": "43845 is the best issue candidate because it is the earliest and most representative of the repeated \"huggingface\" reports; 43835 is unrelated.", + "best_issue_reason": "issue:43845 is the best cluster representative because it is the earliest of the near-identical huggingface-titled issues and appears at least as informative as the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43835", "right": "issue:43845", "accept": false, - "reason": "Different titles and no evidence of the same underlying bug; 43835 does not match the repeated \"huggingface\" reports." + "reason": "Different titles (\u201cFett\u201d vs \u201chuggingface \u201d) and no shared targets/files or other concrete evidence of the same bug." }, { "left": "issue:43835", "right": "issue:43846", "accept": false, - "reason": "Different titles and no shared problem description; no sign these are the same issue." + "reason": "Different titles and no substantive body/detail evidence tying them to the same underlying issue." }, { "left": "issue:43835", "right": "issue:43859", "accept": false, - "reason": "\"Fett\" is unrelated to the repeated \"huggingface\" issue cluster." + "reason": "Different titles and no concrete overlap beyond generic soft similarity." }, { "left": "issue:43845", "right": "issue:43846", - "accept": true, - "reason": "Same title, very similar sparse bodies, and created minutes apart; they look like duplicate reports of the same issue." + "accept": false, + "reason": "Titles match, but the available metadata is too thin to prove they describe the same specific bug rather than two separate vague reports." }, { "left": "issue:43845", "right": "issue:43859", - "accept": true, - "reason": "Same title and same apparent topic; 43859 is a later variant of the same \"huggingface\" report." + "accept": false, + "reason": "Same broad title, but no explicit issue target, filename, or detailed description to confirm a shared underlying problem." }, { "left": "issue:43846", "right": "issue:43859", - "accept": true, - "reason": "Same title and topic, with no distinguishing evidence; likely the same underlying issue." + "accept": false, + "reason": "Same broad title, but the cluster evidence is insufficient to safely merge them as duplicates of one concrete issue." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet: 43845 is the earliest of the three identically titled \"huggingface\" issues, and 43835 is clearly distinct by title. The duplicate judgments among 43845/43846/43859 are conservative enough given the matching titles, close timestamps, and soft similarity scores." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -14626,7 +14514,7 @@ "issue:44572|issue:44937", "issue:44573|issue:44937" ], - "prepared_review_unit_hash": "999fa09b2885dae5a49785968ea56b8726072ad45297f9cd1277b0c844cb7de4", + "prepared_review_unit_hash": "6d883e2697d2ef0dd0c36fd597ac8d3c143ba2fb4c2368c8bbb87e57341f76c3", "component_budget": { "node_count": 5, "item_count": 5, @@ -14648,66 +14536,66 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster appears to be low-signal noise/spam rather than real bug reports, so I would not deduplicate any of the issues on the available evidence.", - "confidence": 0.93, - "canonical_issue_reason": "No issue has enough substantive content to serve as a canonical duplicate target; the titles are either spam or generic and the bodies provide no actionable detail.", + "summary": "This cluster is mostly low-information/spam issues with no clear shared bug or feature request; they do not look like duplicates of one another.", + "confidence": 0.97, + "canonical_issue_reason": "Issue #44371 is a reasonable anchor only because it is one of the earliest low-information spam tickets in the set; there is no substantive duplicate signal tying the issues together.", "canonical_pr_reason": null, - "best_issue_reason": "None of the issues is a good global representative because there is no clear underlying bug or request to normalize around.", + "best_issue_reason": "No issue here is a strong canonical duplicate target; #44371 is the least-bad representative by chronology, but the cluster does not contain a meaningful underlying issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44371", "right": "issue:44572", "accept": false, - "reason": "Identical spam titles are not enough to conclude the same underlying issue; there is no body or technical evidence." + "reason": "Both are spam-like placeholders, but there is no evidence they describe the same underlying bug or change." }, { "left": "issue:44371", "right": "issue:44573", "accept": false, - "reason": "Identical spam titles alone do not establish a duplicate bug/change, and there is no supporting context." + "reason": "Same as above: identical low-quality titles are not enough to treat them as duplicates of a concrete issue." }, { "left": "issue:44371", "right": "issue:44418", "accept": false, - "reason": "One is spam and the other is a generic documentation suggestion; no evidence they describe the same underlying problem." + "reason": "One is spam-like and the other is a generic documentation suggestion; they are not the same issue." }, { "left": "issue:44371", "right": "issue:44937", "accept": false, - "reason": "The titles are unrelated and there is no shared technical context to justify deduplication." + "reason": "The titles are unrelated; one is spam and the other is a strange named-item suggestion, not a duplicate bug report." }, { "left": "issue:44418", "right": "issue:44572", "accept": false, - "reason": "Generic documentation request vs. spam title; no shared concrete issue." + "reason": "A documentation enhancement request and a spam placeholder do not indicate the same underlying problem." }, { "left": "issue:44418", "right": "issue:44573", "accept": false, - "reason": "Generic documentation request vs. spam title; insufficient evidence of the same underlying change or bug." + "reason": "Generic documentation request versus spam placeholder; no shared concrete issue." }, { "left": "issue:44572", "right": "issue:44573", "accept": false, - "reason": "Both are spam titles, but identical wording alone is not enough to safely merge as duplicates." + "reason": "Both are spam-like placeholders, but there is no concrete shared bug/change to merge them on." }, { "left": "issue:44572", "right": "issue:44937", "accept": false, - "reason": "Spam title vs. unrelated named-title issue; no evidence of the same underlying report." + "reason": "Unrelated titles and no shared issue target or context." }, { "left": "issue:44573", "right": "issue:44937", "accept": false, - "reason": "Spam title vs. unrelated named-title issue; no concrete shared bug/change is visible." + "reason": "Unrelated titles and no shared issue target or context." } ] }, @@ -14737,7 +14625,7 @@ "soft_pairs": [ "issue:45145|issue:45151" ], - "prepared_review_unit_hash": "9176096565fd6a897352caffd746363ffd945e056d2d14e5203389758fef8804", + "prepared_review_unit_hash": "a39b788154ed82dba4a68484e50f7f6fb4df5a6478ec99a494a29eb832b866a7", "component_budget": { "node_count": 2, "item_count": 2, @@ -14759,24 +14647,24 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two closed issues have the same title and similar body lengths, so they are highly similar, but the packet does not show filenames, explicit targets, or body content to confirm they are the same underlying report. The evidence supports cautious clustering, not a strong duplicate claim.", - "confidence": 0.79, - "canonical_issue_reason": "Issue #45145 is a reasonable canonical representative because it matches #45151 on title and has slightly more inbound reference evidence, but the duplicate call should remain conservative given the lack of content-level proof.", + "summary": "The two closed issues are highly similar and likely refer to the same proposed energy-optimization idea, but the evidence is limited to title/body similarity with no explicit linkage, so the relationship should be treated as unresolved rather than a firm duplicate.", + "confidence": 0.87, + "canonical_issue_reason": "#45145 is the better canonical issue because it has the same title and slightly stronger linkage signal (one inbound reference), making it the more established thread.", "canonical_pr_reason": null, - "best_issue_reason": "#45145 is the best issue representative in this cluster because it is one of the two matching issues and has inbound references, while no PRs are present.", + "best_issue_reason": "#45145 is the best global issue representative for the same reason: it appears to be the more established of two near-identical issue threads.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45145", "right": "issue:45151", "accept": false, - "reason": "Same title and similar size indicate high similarity, but there is no direct evidence of identical content, targets, or filenames. That is not enough to confidently treat them as the same underlying issue." + "reason": "The issues have identical titles and similar bodies, but the packet only provides soft_similarity evidence, no explicit cross-links, and deterministic_accept is false. That supports a likely duplicate, not a confident duplicate classification, so this edge should remain unresolved or be treated as low-confidence duplicate-likely." } ] }, "evaluator_result": { "accept": false, - "feedback": "The packet only supports a conservative claim of high similarity: the titles are identical and body lengths are close, with no filenames or explicit targets. The analyst overstates the evidence by calling the issues \"near-identical\"/\"indistinguishable\" and implying matching content, which is not directly shown in the metadata. The soft-edge accept is therefore too strong for the evidence provided." + "feedback": "The canonical issue choice is reasonably grounded, but the soft-edge verdict is too strong for the evidence in the packet. The pair only has soft_similarity support, no explicit linkage, and deterministic_accept is false, so calling them a \"clear duplicate\" / \"almost certainly duplicates\" overstates confidence. Please downgrade to a more conservative duplicate-likelihood assessment or leave the edge unresolved." }, "evaluator_used": true, "retried": true, @@ -14890,178 +14778,172 @@ "pull_request:43823|pull_request:45489", "pull_request:43251|pull_request:43254" ], - "prepared_review_unit_hash": "b9ffa8bd9fa4e25cc596cd7c64bacd7cbba0482fa0860859c0583a57f4ed60ba", + "prepared_review_unit_hash": "483dbb8c46708baf8cb704092db786ffbce9f3fca891548ec90f9a7ef6dc3f5f", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 23, - "serialized_chars": 37549, - "estimated_input_tokens": 9388, - "estimated_eval_tokens": 19032 + "serialized_chars": 37543, + "estimated_input_tokens": 9386, + "estimated_eval_tokens": 19028 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 23, - "serialized_chars": 37549, - "estimated_input_tokens": 9388, - "estimated_eval_tokens": 19032 + "serialized_chars": 37543, + "estimated_input_tokens": 9386, + "estimated_eval_tokens": 19028 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Mostly unrelated pull requests with one genuine overlap: two fixes for `fixed_cross_entropy` in `loss_utils.py` that both expand kwargs passed to `nn.functional.cross_entropy`. All other soft-similarity pairs are different features, models, or tests and should stay separate.", - "confidence": 0.92, + "summary": "The cluster is mostly a mix of unrelated PRs spanning model additions, docs, tests, and refactors. The only clear duplicate pair is the two `fixed_cross_entropy` PRs targeting the same issue and code path; the rest should stay separate.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "`pull_request:43251` is the cleanest representative of the only true overlap: it directly fixes `fixed_cross_entropy` and has an explicit issue link, while the paired PR makes the same core change with a slightly different API shape.", + "canonical_pr_reason": "PR 43251 is the best canonical choice for the `fixed_cross_entropy` fix: it explicitly targets issue 43240, updates the same loss helper as 43254, and has the more complete/intentional implementation and discussion. The other items are unrelated to each other.", "best_issue_reason": null, - "best_pr_reason": "`pull_request:43251` is the best overall fit because it addresses a concrete bug in a single code path, has the explicit issue target, and is the strongest duplicate candidate against `pull_request:43254`.", + "best_pr_reason": "PR 43251 is the strongest overall PR in the set because it is a focused, well-scoped fix with an explicit issue link and clear behavior change. Most other PRs are unrelated feature or refactor work, or test/doc-only changes.", "soft_edge_verdicts": [ { "left": "pull_request:39895", "right": "pull_request:43424", "accept": false, - "reason": "Different domains: new VideoPrism model/docs vs. an executorch export test." + "reason": "One adds VideoPrism support; the other adds an Executorch dynamic-shape test. Different models and different code paths." }, { "left": "pull_request:42668", "right": "pull_request:43823", "accept": false, - "reason": "Processor robustness for existing audio model vs. adding a new MobileLLM model." + "reason": "Processor robustness changes for existing models versus adding a new MobileLLM model. Not the same bug or change." }, { "left": "pull_request:43251", "right": "pull_request:43823", "accept": false, - "reason": "Same repo area only at a very broad level; one is a loss-function fix, the other is a new model implementation." + "reason": "Loss helper kwargs fix versus MobileLLM model creation. Unrelated." }, { "left": "pull_request:43251", "right": "pull_request:44827", "accept": false, - "reason": "Loss utility kwargs fix is unrelated to Mistral4 test/integration changes." + "reason": "`fixed_cross_entropy` kwargs support is unrelated to Mistral4 test/model refactoring." }, { "left": "pull_request:43424", "right": "pull_request:44827", "accept": false, - "reason": "Executorch dynamic-shape test is unrelated to Mistral4 test fixes." + "reason": "Executorch dynamic-shape export tests and Mistral4 fixes are different features in different subsystems." }, { "left": "pull_request:43424", "right": "pull_request:45435", "accept": false, - "reason": "Torch export dynamic-shape test and Whisper tokenization bug fix are different code paths." + "reason": "Dynamic-shape export testing is unrelated to Whisper tokenizer bounds checking." }, { "left": "pull_request:43424", "right": "pull_request:43823", "accept": false, - "reason": "Exportability test vs. a new model addition; no shared underlying bug." + "reason": "Different model/export areas with no shared underlying bug." }, { "left": "pull_request:43823", "right": "pull_request:45454", "accept": false, - "reason": "MobileLLM model addition is unrelated to Gemma4 text-only training behavior." + "reason": "MobileLLM model addition versus Gemma4 training behavior fix; no shared code-path problem." }, { "left": "pull_request:43823", "right": "pull_request:45435", "accept": false, - "reason": "New model addition vs. Whisper tokenizer indexing fix." + "reason": "MobileLLM model creation and Whisper tokenizer indexing are unrelated." }, { "left": "pull_request:43823", "right": "pull_request:43995", "accept": false, - "reason": "New MobileLLM model vs. Falcon output-interface refactor; not the same change." + "reason": "New model addition versus Falcon output-interface refactor; different changes." }, { "left": "pull_request:43995", "right": "pull_request:45435", "accept": false, - "reason": "Falcon output-collection refactor is unrelated to Whisper tokenization." + "reason": "Falcon output-capture refactor is not the same issue as Whisper tokenization bounds handling." }, { "left": "pull_request:43995", "right": "pull_request:45213", "accept": false, - "reason": "Falcon model refactor and model-creation skill are unrelated." + "reason": "Falcon refactor and model-creation skill content are unrelated." }, { "left": "pull_request:43995", "right": "pull_request:44827", "accept": false, - "reason": "Falcon output-interface refactor is not the same bug as Mistral4 test fixes." + "reason": "Falcon output-interface refactor and Mistral4 test fixes are different code paths." }, { "left": "pull_request:44827", "right": "pull_request:45489", "accept": false, - "reason": "Mistral4 tests vs. Gemma3n/Gemma4 cache-sharing alignment are different model families and problems." + "reason": "Mistral4 fixes and Gemma3n/Gemma4 cache-sharing alignment are different model families and bugs." }, { "left": "pull_request:44827", "right": "pull_request:45170", "accept": false, - "reason": "Mistral4 test fix is unrelated to the layrnorm\u2192layernorm renaming sweep." + "reason": "Mistral4 test fixes do not match the CLIP-like `layrnorm` rename/correction PR." }, { "left": "pull_request:43823", "right": "pull_request:44827", "accept": false, - "reason": "New MobileLLM model and Mistral4 test fixes do not share a concrete underlying bug." + "reason": "MobileLLM model addition and Mistral4 fixes are unrelated." }, { "left": "pull_request:45213", "right": "pull_request:45435", "accept": false, - "reason": "Model-creation skill changes are unrelated to Whisper tokenizer bounds checking." + "reason": "A new-model skill PR and a Whisper tokenizer bugfix are not the same underlying change." }, { "left": "pull_request:44827", "right": "pull_request:45213", "accept": false, - "reason": "Mistral4 test fixes and the model-creation skill are unrelated; shared auto-model file is incidental." - }, - { - "left": "pull_request:43823", - "right": "pull_request:45213", - "accept": false, - "reason": "New model addition vs. workflow/skill change; no duplicate relationship." + "reason": "Mistral4 bugfixes and a model-creation skill are unrelated." }, { "left": "pull_request:44827", "right": "pull_request:45435", "accept": false, - "reason": "Mistral4 test adjustments and Whisper tokenizer fix address different code paths." + "reason": "Mistral4 test fixes and Whisper tokenizer indexing fixes are unrelated." }, { "left": "pull_request:45454", "right": "pull_request:45489", "accept": false, - "reason": "Both are Gemma-related, but one targets text-only training and the other cache-sharing alignment; not the same concrete fix." + "reason": "Both touch Gemma4-related files, but one is about training text-only samples and the other is about cache-sharing alignment; they fix different concrete problems." }, { "left": "pull_request:43823", "right": "pull_request:45489", "accept": false, - "reason": "MobileLLM model addition and Gemma cache-sharing changes are unrelated." + "reason": "MobileLLM addition and Gemma3n/Gemma4 cache-sharing alignment are unrelated." }, { "left": "pull_request:43251", "right": "pull_request:43254", "accept": true, - "reason": "Both modify `fixed_cross_entropy` in `loss_utils.py` to forward supported kwargs to `nn.functional.cross_entropy`; same underlying bug and mergeable into one PR." + "reason": "Both PRs fix the same `fixed_cross_entropy` helper to support additional kwargs/weight/label_smoothing for issue 43240, and they modify the same function on the same code path." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays conservative. The only accepted overlap, `pull_request:43251`/`pull_request:43254`, is supported by the shared file, shared issue target, and near-identical `fixed_cross_entropy` changes. The remaining soft-edge verdicts are appropriately rejected and do not overstate duplicate evidence." + "feedback": "The summary is grounded in the packet and stays conservative. The only clearly mergeable duplicate pair is 43251/43254, which both target the same `fixed_cross_entropy` helper and the same issue. The other pair rejections are consistent with the listed titles/files and do not overclaim shared bugs or mergeability." }, "evaluator_used": true, "retried": false, @@ -15088,7 +14970,7 @@ "soft_pairs": [ "pull_request:40546|pull_request:42781" ], - "prepared_review_unit_hash": "e494025f6923f2d95fd2ad3b8107b7b06e40a51a2684b7e37e5dd3a8fd0ea760", + "prepared_review_unit_hash": "7d5f44780d748776aece7817aedfcd212a3ef371ed53752c634e79f5fc8bc80b", "component_budget": { "node_count": 2, "item_count": 2, @@ -15110,18 +14992,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two PRs in the same VibeVoice area, but they are not duplicates: one implements the base VibeVoice model, the other adds a realtime variant and additional tokenizer/feature/generation plumbing.", - "confidence": 0.87, + "summary": "Both PRs are about VibeVoice, but they are not the same change: one is the base model integration and the other is a later realtime extension with additional tokenizer/generation work. They overlap heavily in shared model scaffolding, but the concrete goals differ enough that they should not be merged as duplicates.", + "confidence": 0.84, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #40546 is the more canonical base implementation: it is broader, non-draft, and appears to establish the main VibeVoice integration. PR #42781 is a separate follow-up for VibeVoice Realtime with extra components.", + "canonical_pr_reason": "PR #42781 is the broader, newer VibeVoice-related PR and appears to extend the base implementation with realtime support and the acoustic tokenizer, so it is the better representative of the cluster.", "best_issue_reason": null, - "best_pr_reason": "PR #40546 is the best representative PR for the cluster because it is the original full VibeVoice implementation and has substantially more review activity. PR #42781 is a distinct realtime extension rather than the same change.", + "best_pr_reason": "PR #42781 is the most complete and current VibeVoice PR in the set, covering the broader feature set beyond the initial implementation.", "soft_edge_verdicts": [ { "left": "pull_request:40546", "right": "pull_request:42781", "accept": false, - "reason": "They share the VibeVoice subsystem and some files, but #40546 implements the base model while #42781 adds a realtime variant plus extra tokenizer/feature and generation changes. These are related follow-ups, not the same concrete fix/change." + "reason": "40546 implements core VibeVoice support, while 42781 adds VibeVoice Realtime plus acoustic tokenizer and generation changes. They share the same model family and some files, but they are different concrete changes and not safely mergeable as one PR." } ] }, @@ -15151,7 +15033,7 @@ "soft_pairs": [ "pull_request:40755|pull_request:41224" ], - "prepared_review_unit_hash": "65b99922a053f1e44e62b91242090e2d0144fcef41eb5dd8520c3e310bf40617", + "prepared_review_unit_hash": "ac8c1b7a26caaa5d1d6cb181c5fd7df7885f8086155ce24b695016620f15f8d3", "component_budget": { "node_count": 2, "item_count": 2, @@ -15173,18 +15055,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two unrelated open pull requests in different model families: TimesFM covariate forecasting vs. DINOv3 image classification support. They should not be clustered as duplicates.", - "confidence": 0.99, + "summary": "The two pull requests are unrelated: one adds TimesFM covariate-based forecasting, while the other adds DINOv3 image-classification support. They do not appear to fix the same bug or implement the same change.", + "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "No canonical PR: the PRs target different model families, different code paths, and different features.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "PR 41224 is the better standalone representative because it is a narrower, self-contained feature addition with a single clear model capability.", "soft_edge_verdicts": [ { "left": "pull_request:40755", "right": "pull_request:41224", "accept": false, - "reason": "Different codepaths, different models, and different user-facing changes; no evidence they fix the same bug or implement the same change." + "reason": "Different models and different functionality: TimesFM covariate forecasting versus DINOv3 image classification. They are not the same underlying change and should not be merged as duplicates." } ] }, @@ -15280,12 +15162,12 @@ "soft_pairs": [ "pull_request:41797|pull_request:45186" ], - "prepared_review_unit_hash": "a4c50bc535a6bb425760bc5552ef89b59af568fce0ac101bb482a6af8133afe9", + "prepared_review_unit_hash": "a03af63a220b332b5d92c9a22f4904217612bf2f7168cc89b8c71fd7603b6c74", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6723, + "serialized_chars": 6722, "estimated_input_tokens": 1681, "estimated_eval_tokens": 3618 }, @@ -15293,7 +15175,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6723, + "serialized_chars": 6722, "estimated_input_tokens": 1681, "estimated_eval_tokens": 3618 }, @@ -15302,18 +15184,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two PRs are both new model integrations, but for different models and different code paths: DeepSeek OCR vs Isaac. They share boilerplate registration/docs changes, not the same underlying change, so they should not be deduplicated.", - "confidence": 0.97, + "summary": "These are two separate model-addition PRs (DeepSeek OCR vs Isaac) that share only generic registration/docs scaffolding, not the same underlying change.", + "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "No true canonical duplicate here: 41797 adds DeepSeek OCR support, while 45186 adds Isaac support. The overlap is mostly framework plumbing and documentation, not the same feature or bug fix.", + "canonical_pr_reason": "No canonical PR: the diffs implement different models with different configs, processors, tests, and docs, so they are not duplicates.", "best_issue_reason": null, - "best_pr_reason": "If forced to pick one cluster anchor, 45186 is the cleaner representative because it is a newer, self-contained Isaac model addition with model-specific tests; however it is not a duplicate of 41797.", + "best_pr_reason": "Neither PR is a global duplicate representative; each is a distinct model implementation and should be triaged independently.", "soft_edge_verdicts": [ { "left": "pull_request:41797", "right": "pull_request:45186", "accept": false, - "reason": "Different model integrations and different concrete functionality. Shared auto-registration/docs/util files are generic scaffolding and do not indicate the same underlying change." + "reason": "Different model families and code paths (DeepSeek OCR vs Isaac); overlap is limited to shared Transformers registration/doc plumbing, which is not enough to treat them as the same change." } ] }, @@ -15343,12 +15225,12 @@ "soft_pairs": [ "pull_request:42166|pull_request:45426" ], - "prepared_review_unit_hash": "0374cab0d5106cf96ae00285746eea8ddfcd2c92328246ef1392cfd9a7afa44a", + "prepared_review_unit_hash": "4e67818c4df0b90c59cfcccedacdc9b6215e7976cc4b08f75f1d6af750cad2d5", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5407, + "serialized_chars": 5408, "estimated_input_tokens": 1352, "estimated_eval_tokens": 2960 }, @@ -15356,7 +15238,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5407, + "serialized_chars": 5408, "estimated_input_tokens": 1352, "estimated_eval_tokens": 2960 }, @@ -15365,18 +15247,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two unrelated model-addition PRs: one adds InternVLFlash, the other adds AXK1. They share only common registration/auto-discovery files, which is boilerplate and not evidence of the same fix or feature.", + "summary": "Both items are pull requests adding different model integrations: InternVLFlash vs AXK1. They share only common registration/boilerplate files, not the same underlying change.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "No canonical PR: these are unrelated feature additions for different models, so they should not be clustered as duplicates.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "Neither PR is a good global representative of the other; if forced to pick a cluster anchor, PR #42166 has more review activity, but it is not a duplicate of #45426.", "soft_edge_verdicts": [ { "left": "pull_request:42166", "right": "pull_request:45426", "accept": false, - "reason": "Different model integrations with distinct configuration/model files and unrelated code paths; shared auto-registration edits are expected boilerplate and do not indicate duplicate PRs." + "reason": "Reject: they add different models and different codepaths (InternVLFlash vs AXK1). Shared auto-registration files are generic boilerplate, not evidence of the same change." } ] }, @@ -15413,70 +15295,70 @@ "pull_request:43291|pull_request:44815", "pull_request:43665|pull_request:44815" ], - "prepared_review_unit_hash": "881717b6e9206c58b8df2496983604617e9a71538f1ac27f6bef18ad27425560", + "prepared_review_unit_hash": "128e96617927b4e100cff7e108ba48b6b7e4ecd9f70126e4ab65c49174dce506", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 10167, - "estimated_input_tokens": 2542, - "estimated_eval_tokens": 5340 + "serialized_chars": 10170, + "estimated_input_tokens": 2543, + "estimated_eval_tokens": 5342 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 10167, - "estimated_input_tokens": 2542, - "estimated_eval_tokens": 5340 + "serialized_chars": 10170, + "estimated_input_tokens": 2543, + "estimated_eval_tokens": 5342 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is heterogeneous: one Whisper ASR pipeline fix, one Whisper tokenizer/test change, one unrelated CLIP/ViT test edit, and one FP8/dequant loading fix. They do not look like the same underlying bug or change, so no soft edges should be accepted.", - "confidence": 0.96, + "summary": "The four PRs are not duplicates of one another. Two are Whisper-related, but they fix different areas (ASR pipeline language return vs tokenizer/test behavior). The CLIP/ViT test PR and the FP8 dequant PR are unrelated to the others.", + "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44815 is the most substantive, coherent code fix in the set, with real changes in model loading and FP8 integration; the other PRs are unrelated Whisper or test-only edits.", + "canonical_pr_reason": "PR 42227 is the cleanest representative: it has an explicit issue target, a focused code-path fix in the Whisper ASR pipeline, and matching test coverage.", "best_issue_reason": null, - "best_pr_reason": "PR 44815 is the strongest standalone PR to keep as the representative artifact because it fixes a concrete runtime/code-path issue across multiple implementation files, unlike the others which are narrower or unrelated.", + "best_pr_reason": "PR 42227 is the strongest standalone PR in the set because it is a concrete bug fix with a clear user-facing behavior change and direct issue linkage.", "soft_edge_verdicts": [ { "left": "pull_request:42227", "right": "pull_request:43291", "accept": false, - "reason": "Both mention Whisper, but 42227 fixes ASR pipeline return_language plumbing while 43291 changes tokenizer decode/timestamp handling and tests; different code paths and bugs." + "reason": "Both are Whisper-related, but they fix different problems in different code paths: ASR pipeline return-language plumbing vs tokenizer decode/test behavior." }, { "left": "pull_request:42227", "right": "pull_request:43665", "accept": false, - "reason": "Whisper ASR pipeline fix vs unrelated CLIP/ViT test edits; no shared underlying bug." + "reason": "Unrelated areas: Whisper ASR pipeline vs CLIP/ViT test-file changes. No shared underlying bug or mergeable combined fix." }, { "left": "pull_request:42227", "right": "pull_request:44815", "accept": false, - "reason": "Whisper pipeline change and FP8/dequant model-loading fix are unrelated subsystems and failure modes." + "reason": "Different subsystems and bugs: Whisper return-language handling vs FP8 dequant/loading logic. Not the same change." }, { "left": "pull_request:43291", "right": "pull_request:43665", "accept": false, - "reason": "Whisper tokenizer/test change is not the same as the CLIP/ViT test-only edit." + "reason": "Whisper tokenizer/test work does not match the CLIP/ViT test edits; these are separate and not mergeable as one fix." }, { "left": "pull_request:43291", "right": "pull_request:44815", "accept": false, - "reason": "Tokenizer decode/timestamp behavior and FP8/dequant loading are unrelated concrete issues." + "reason": "No common concrete bug or code path: Whisper decode behavior vs FP8 quantization/loading. Unrelated PRs." }, { "left": "pull_request:43665", "right": "pull_request:44815", "accept": false, - "reason": "CLIP/ViT test assertions do not match the FP8/dequant code-path fix; this is not a duplicate change." + "reason": "CLIP/ViT test changes are unrelated to the FP8 dequant fix; they do not address the same issue." } ] }, @@ -15506,7 +15388,7 @@ "soft_pairs": [ "pull_request:42975|pull_request:42976" ], - "prepared_review_unit_hash": "8449fa42b7d20b49badec6fcee7c6bf184b080e5afc21d3657761911ab957f7b", + "prepared_review_unit_hash": "458ead408e6ff51ed837d143544655806020a3b67970cdb8757b0856add250ec", "component_budget": { "node_count": 2, "item_count": 2, @@ -15528,18 +15410,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both items are PRs that touch GitHub Actions workflows, but they are not the same change: PR 42975 broadly upgrades multiple GitHub Actions for Node 24 compatibility, while PR 42976 narrowly bumps docker/build-push-action versions. The overlap is only in subsystem and workflow maintenance, not in the concrete fix.", - "confidence": 0.92, + "summary": "Two maintenance PRs for GitHub Actions upgrades, but they target different action families and change different workflows. They overlap in broad CI infrastructure, not in the same concrete bug or mergeable change set.", + "confidence": 0.89, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR 42975 is the better representative because it has the clearer, more specific goal (Node 24 compatibility) and covers a broader set of workflow action-version bumps.", "best_issue_reason": null, - "best_pr_reason": "PR 42975 is the broader workflow-maintenance umbrella, but it still is not a duplicate of PR 42976; 42976 is a separate docker-action version bump with a different concrete purpose.", + "best_pr_reason": "PR 42975 is the most suitable canonical PR for the cluster: its title and diff make the maintenance intent explicit, while the other PR is a separate dependency bump focused on docker/build-push-action and related workflow updates.", "soft_edge_verdicts": [ { "left": "pull_request:42975", "right": "pull_request:42976", "accept": false, - "reason": "They both update GitHub Actions workflows, but one targets Node 24 compatibility across many actions and the other specifically upgrades docker/build-push-action. Different actions, different scope, and not plausibly one merged fix for the same underlying issue." + "reason": "Do not merge as duplicates: 42975 updates checkout/cache/upload-artifact versions for Node 24 compatibility, while 42976 bumps docker/build-push-action and other workflow actions to latest versions. They are related maintenance changes but not the same concrete change or code-path fix." } ] }, @@ -15590,12 +15472,12 @@ "pull_request:45283|pull_request:45532", "pull_request:45490|pull_request:45532" ], - "prepared_review_unit_hash": "40f905a3ebbbc119b4390707a827ebe3c1b72612aad93de2b6c1f3b04325a613", + "prepared_review_unit_hash": "3495eeacad70709a7e9b98b84524250dae72090dbbf87f752acf60563196a188", "component_budget": { "node_count": 10, "item_count": 10, "soft_pair_count": 14, - "serialized_chars": 32088, + "serialized_chars": 32085, "estimated_input_tokens": 8022, "estimated_eval_tokens": 16300 }, @@ -15603,7 +15485,7 @@ "node_count": 10, "item_count": 10, "soft_pair_count": 14, - "serialized_chars": 32088, + "serialized_chars": 32085, "estimated_input_tokens": 8022, "estimated_eval_tokens": 16300 }, @@ -15612,96 +15494,96 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "No duplicate cluster emerges here: the PRs span unrelated model additions and infrastructure changes, with only broad filename overlap from Hugging Face model scaffolding. I would not merge any of these into a single canonical PR.", - "confidence": 0.93, + "summary": "This cluster is mostly a set of unrelated model-addition and infrastructure PRs that share boilerplate docs/auto-registry edits. I found no true duplicate pairs worth merging.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: the items are distinct changes (different models/features and one distributed-loading PR), so there is no single underlying change to deduplicate around.", + "canonical_pr_reason": "No single canonical PR fits the cluster: the substantive changes span different models and unrelated infra work, and the overlaps are mostly shared documentation/registry boilerplate.", "best_issue_reason": null, - "best_pr_reason": "No best PR for duplicate purposes; each PR is a separate model-support or infra enhancement rather than an obvious duplicate of another.", + "best_pr_reason": "45532 is the closest thing to a representative only because it has the strongest soft link and active discussion, but it still does not duplicate the rest of the cluster.", "soft_edge_verdicts": [ { "left": "pull_request:42978", "right": "pull_request:43448", "accept": false, - "reason": "Different model additions (ViT NEPA vs Molmo); shared docs/auto-registration files are expected boilerplate, not the same change." + "reason": "Different model additions: ViT NEPA vs Molmo. Shared docs/auto-registry files are boilerplate, not the same change." }, { "left": "pull_request:42978", "right": "pull_request:43451", "accept": false, - "reason": "Different model families and code paths; both are new model support PRs, but not the same underlying feature." + "reason": "Different model additions: ViT NEPA vs Molmo2. No shared concrete code-path or bug fix." }, { "left": "pull_request:42978", "right": "pull_request:45490", "accept": false, - "reason": "ViT NEPA and CTSM are unrelated model additions with no shared concrete bug or feature." + "reason": "Different model additions: ViT NEPA vs CTSM. Overlap is limited to generic docs/auto-registration edits." }, { "left": "pull_request:43448", "right": "pull_request:43451", "accept": false, - "reason": "Molmo and Molmo2 are separate model-support PRs for different architectures/versioned families." + "reason": "Molmo and Molmo2 are distinct model implementations with different files and behavior; not the same underlying change." }, { "left": "pull_request:43448", "right": "pull_request:45490", "accept": false, - "reason": "Molmo vs CTSM are unrelated model additions; overlap is only in common registration/docs files." + "reason": "Molmo vs CTSM are unrelated model additions; shared boilerplate files do not indicate duplication." }, { "left": "pull_request:43448", "right": "pull_request:45532", "accept": false, - "reason": "Molmo and SLANet are different model implementations, not one duplicate change." + "reason": "Molmo vs SLANet are different model support PRs; they only share registry/docs plumbing." }, { "left": "pull_request:43451", "right": "pull_request:45490", "accept": false, - "reason": "Molmo2 and CTSM are distinct model-support additions with different targets and code paths." + "reason": "Molmo2 and CTSM are separate model additions with different implementations and targets." }, { "left": "pull_request:43451", "right": "pull_request:45532", "accept": false, - "reason": "Molmo2 and SLANet are unrelated model additions despite shared boilerplate files." + "reason": "Molmo2 vs SLANet are unrelated model support changes despite some shared auto-mapping boilerplate." }, { "left": "pull_request:43838", "right": "pull_request:45283", "accept": false, - "reason": "Qwen3-ASR support and Qwen3.5 GGUF loading support are different feature areas and code paths." + "reason": "Qwen3-ASR model support and Qwen3.5 GGUF loading support are different changes: one adds a model, the other adds loader mapping/quantization support." }, { "left": "pull_request:43973", "right": "pull_request:45283", "accept": false, - "reason": "Lfm2Audio model support and Qwen3.5 GGUF loading are not the same underlying bug/change." + "reason": "Lfm2 Audio model support vs Qwen3.5 GGUF loading support are different concrete code paths and features." }, { "left": "pull_request:44601", "right": "pull_request:45532", "accept": false, - "reason": "Pipeline-parallel loading support is infrastructure work; SLANet is a model addition, so they are not duplicates." + "reason": "Pipeline-parallel loading infrastructure and SLANet model support are unrelated; shared files are incidental registry/util updates." }, { "left": "pull_request:45157", "right": "pull_request:45532", "accept": false, - "reason": "PrismML Bonsai GGUF handling and SLANet model support are unrelated changes." + "reason": "PrismML GGUF dequantization support vs SLANet model support are different feature areas and code paths." }, { "left": "pull_request:45283", "right": "pull_request:45532", "accept": false, - "reason": "Qwen3.5 GGUF loading support and SLANet model support do not share the same concrete code-path problem." + "reason": "Qwen3.5 GGUF loading support and SLANet model support do not fix the same underlying problem." }, { "left": "pull_request:45490", "right": "pull_request:45532", "accept": false, - "reason": "CTSM and SLANet are separate model-support PRs; deterministic similarity is only due to shared registration/docs scaffolding." + "reason": "CTSM and SLANet are distinct model-addition PRs; the shared files are generic docs/auto-registration boilerplate, not a duplicate fix." } ] }, @@ -15734,7 +15616,7 @@ "pull_request:43488|pull_request:45476", "pull_request:43532|pull_request:45476" ], - "prepared_review_unit_hash": "bcd5d30cd3aa82e937712fbf0d1efc8dae7fb50e5ab22cd54f51bd32eb2d68ef", + "prepared_review_unit_hash": "42f5c34ba74a7caf1287e059191967d4327d2221323a541a38d492f4270b4d21", "component_budget": { "node_count": 3, "item_count": 3, @@ -15756,30 +15638,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Three unrelated synthetic PRs were grouped only because they all touch the same vit test file. Their actual changes are different: setup/versioning, workflow/CI plumbing, and CLIP/tokenizer code. No duplicate pair is acceptable.", - "confidence": 0.97, + "summary": "These are three unrelated throwaway PRs that all share the same \"[don't merge]\"/test-noise pattern, but they change different code paths: packaging/version checks, CI workflow plumbing, and a PR-CI caller plus tokenizer/test junk. The shared vit test file looks incidental rather than a common underlying fix.", + "confidence": 0.89, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 43532 is the closest representative because it has the broadest workflow/test surface and the most discussion, but it is not a duplicate of the others.", + "canonical_pr_reason": "PR 43532 is the most coherent representative of the cluster: it has a single CI/workflow-oriented theme and more substantial, connected edits than the others.", "best_issue_reason": null, - "best_pr_reason": "PR 43532 is the best single representative for the cluster, but only as a placeholder; the cluster does not contain a true duplicate PR set.", + "best_pr_reason": "PR 43532 is the best representative because its changes are most internally consistent around CI workflow behavior, whereas the other two are narrower or more obviously synthetic checks.", "soft_edge_verdicts": [ { "left": "pull_request:43488", "right": "pull_request:43532", "accept": false, - "reason": "Different changes: packaging/version/dependency tweaks vs workflow/testing-utils edits. The shared vit test file is incidental and not the same underlying bug/change." + "reason": "Different underlying changes: 43488 is about setup/dependency/version formatting, while 43532 rewires self-comment CI and related utilities. Shared vit test edits look incidental." }, { "left": "pull_request:43488", "right": "pull_request:45476", "accept": false, - "reason": "Different code paths: setup/versioning and deps vs CI workflow caller plus CLIP tokenizer/test edits. Shared test-file overlap is too weak to merge." + "reason": "43488 targets packaging/version/dependency metadata; 45476 adds a PR-CI workflow caller and unrelated tokenizer/test edits. Not the same concrete bug or change." }, { "left": "pull_request:43532", "right": "pull_request:45476", "accept": false, - "reason": "Both touch workflows/tests, but they fix different things: self-comment CI/testing utilities vs PR CI caller and CLIP tokenization. Not the same underlying change." + "reason": "Both touch CI-adjacent files, but they address different mechanisms: self-comment CI versus PR CI caller setup. The overlap in tests is not enough to treat them as one change." } ] }, @@ -15809,12 +15691,12 @@ "soft_pairs": [ "pull_request:43612|pull_request:43613" ], - "prepared_review_unit_hash": "d35e4a2c734e1af26a4de80c365b28dc17274afab8a3c89cbad9f4e36dd39906", + "prepared_review_unit_hash": "06899421bbc6a9f495591f0a33989aac0c0d17bd8e1ce2002360b382f478d260", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6082, + "serialized_chars": 6083, "estimated_input_tokens": 1521, "estimated_eval_tokens": 3298 }, @@ -15822,7 +15704,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6082, + "serialized_chars": 6083, "estimated_input_tokens": 1521, "estimated_eval_tokens": 3298 }, @@ -15831,18 +15713,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two separate PRs add different segmentation pipelines: one for Promptable Concept Segmentation and one for Promptable Visual Segmentation. They share some docs/registry scaffolding, but the underlying features, model support, and test coverage are distinct.", + "summary": "Both pull requests add new, similarly named segmentation pipelines and share a doc/registry scaffolding pattern, but they implement different features and code paths: one is for Promptable Concept Segmentation, the other for Promptable Visual Segmentation. They are related, not duplicates.", "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR: these are distinct feature additions for different segmentation tasks, not the same code-path fix or change.", + "canonical_pr_reason": "If a representative PR must be chosen, #43613 is the broader one: it adds the visual segmentation pipeline plus more model/test integrations and touches a wider set of supporting files.", "best_issue_reason": null, - "best_pr_reason": "If forced to pick a representative, PR 43613 is slightly broader in model coverage, but it is still a separate feature from PR 43612 rather than a duplicate.", + "best_pr_reason": "#43613 is the better cluster representative because it is broader and more complete, but it should not be treated as a duplicate of #43612 since it targets a different pipeline and model path.", "soft_edge_verdicts": [ { "left": "pull_request:43612", "right": "pull_request:43613", "accept": false, - "reason": "Different pipelines and target APIs/models: concept segmentation vs visual segmentation. Shared documentation and registration files are incidental; the PRs are not the same underlying change and should not be merged as one." + "reason": "Different underlying feature sets and code paths: concept segmentation vs visual segmentation. Shared docs/registry files and similar pipeline scaffolding are not enough to merge these as duplicates." } ] }, @@ -15872,7 +15754,7 @@ "soft_pairs": [ "pull_request:43651|pull_request:43663" ], - "prepared_review_unit_hash": "bc1200357a606d5be63d8d32c91d1c58868e45ad790cd3207a2d05556f1bf388", + "prepared_review_unit_hash": "003548d77de3a1260dd8af1fdb16080c8ae8bd408f6713e4a0f1695bc16bf598", "component_budget": { "node_count": 2, "item_count": 2, @@ -15894,18 +15776,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two open PRs touch the same file but implement unrelated Trainer extension points: one changes gradient-accumulation loss scaling, the other refactors signature-column inspection for column filtering.", - "confidence": 0.95, + "summary": "Two separate Trainer extensions in the same file: one adds a hook for gradient-accumulation loss scaling, the other adds a hook for signature-column filtering. They touch different code paths and are not duplicates.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR #43663 is the cleaner, self-contained API extension and is the better representative of this cluster if one must be chosen.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "PR #43663 is the most suitable standalone change: it refactors column-signature handling into an overrideable method without altering training behavior.", "soft_edge_verdicts": [ { "left": "pull_request:43651", "right": "pull_request:43663", "accept": false, - "reason": "Different underlying behaviors and code paths in Trainer; one is about loss normalization during training_step, the other about input signature column selection. They are not the same fix and would not plausibly be merged into one PR." + "reason": "Both modify trainer.py, but they address unrelated concerns: gradient-accumulation loss scaling vs. dataloader column filtering. They do not fix the same concrete bug or change the same behavior." } ] }, @@ -15937,7 +15819,7 @@ "pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842" ], - "prepared_review_unit_hash": "9354cb97907d3ace0498f8bac09a4869d55eecb278e33fb71fd5e980385a8dec", + "prepared_review_unit_hash": "079e389db8d8793a091a9ed93cc581121bb98faf128b46629530ee8e281698b9", "component_budget": { "node_count": 3, "item_count": 3, @@ -15959,30 +15841,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All three PRs target the same runtime annotation/TypeAdapter NameError in `src/transformers/cli/serve.py`. They look like overlapping fixes for the same bug, with 43842 being the most focused patch.", - "confidence": 0.82, + "summary": "All three PRs point at the same serve.py TypeAdapter runtime-annotation crash, with 43842 being the most direct and narrowly scoped fix.", + "confidence": 0.73, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 43842 is the most focused and directly aligned fix: it explicitly addresses the TypeAdapter NameError when pydantic is absent, keeps the change narrowly scoped, and has the clearest issue linkage.", + "canonical_pr_reason": "PR 43842 most directly fixes the reported NameError by keeping TypeAdapter out of runtime evaluation when pydantic is absent, with a minimal targeted diff and matching issue linkage.", "best_issue_reason": null, - "best_pr_reason": "PR 43842 is the best representative because it fixes the exact reported failure with minimal scope, while 43656 is broader annotation cleanup and 43836 appears less focused.", + "best_pr_reason": "PR 43842 is the best standalone fix: it addresses the concrete crash path in serve.py with the least extra churn and the clearest bug-specific intent.", "soft_edge_verdicts": [ { "left": "pull_request:43656", "right": "pull_request:43842", "accept": true, - "reason": "Both fix the same underlying TypeAdapter/runtime-annotation NameError in the CLI serve path and could plausibly be combined into one patch." + "reason": "Both appear to fix the same serve.py TypeAdapter NameError in the CLI, using alternative annotation/runtime-import handling for the same crash path." }, { "left": "pull_request:43836", "right": "pull_request:43842", "accept": true, - "reason": "Both are targeted at the same `serve.py` TypeAdapter NameError; the titles and diff snippets point to the same concrete bug rather than just a broad subsystem overlap." + "reason": "Same file, same TypeAdapter NameError, and both change the import/annotation strategy to avoid evaluating TypeAdapter at runtime when pydantic is absent." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays mostly conservative. All three PRs touch `src/transformers/cli/serve.py` and the diffs support a shared TypeAdapter/annotation NameError theme. The soft-edge acceptances are also plausible: 43836 and 43842 clearly overlap on the same issue target, and 43656 looks like a broader but mergeable variant of the same fix. No overstatement that would warrant rejection." + "feedback": "The summary is grounded in the packet: all three PRs target src/transformers/cli/serve.py and the TypeAdapter NameError/runtime-annotation issue, with 43842 clearly the narrowest fix. The soft-edge verdicts are conservative enough, and the pairs look mergeable as alternative fixes for the same bug." }, "evaluator_used": true, "retried": false, @@ -16012,12 +15894,12 @@ "pull_request:43757|pull_request:45500", "pull_request:43757|pull_request:45506" ], - "prepared_review_unit_hash": "8ca49c58cb211002432f9b511e8c7ecf5aba711904626c7c648666625440feb6", + "prepared_review_unit_hash": "89e25286137d69b5eece3fecc1d687959f9116531fdbbe39cef479e20daed9bf", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 7755, + "serialized_chars": 7756, "estimated_input_tokens": 1939, "estimated_eval_tokens": 4134 }, @@ -16025,7 +15907,7 @@ "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 7755, + "serialized_chars": 7756, "estimated_input_tokens": 1939, "estimated_eval_tokens": 4134 }, @@ -16034,30 +15916,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two PRs are near-duplicates for full GPT-OSS GGUF loading support; PR 45506 is the more complete/latest implementation. PR 43757 is only a limited fallback patch and should not be merged with the full-support PRs.", + "summary": "All three PRs are about GPT-OSS GGUF loading, but only 45506 looks like the complete end-state implementation. 45500 is a draft follow-up and 43757 is a narrower fallback-only stopgap.", "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45506 is the best canonical PR: it is the latest, explicitly supersedes 43757, and contains the fuller GPT-OSS GGUF support across loader mapping, utilities, and tests.", + "canonical_pr_reason": "45506 is the strongest canonical PR: it adds the GGUF metadata mapping, loader support, and tests, is not draft, and has review activity. 45500 is draft and 43757 only provides a fallback.", "best_issue_reason": null, - "best_pr_reason": "PR 45506 is the strongest representative of the cluster because it addresses the full concrete GPT-OSS GGUF loading path rather than just a partial fallback.", + "best_pr_reason": "45506 best represents the full fix for GPT-OSS GGUF support; it is the most complete and mergeable implementation among the three.", "soft_edge_verdicts": [ { "left": "pull_request:45500", "right": "pull_request:45506", "accept": true, - "reason": "Same underlying change: both implement GPT-OSS GGUF loading support for issue #43366, and 45506 is an expanded/latest version of 45500 with the same code path and files." + "reason": "Same underlying GPT-OSS GGUF loading work, same tracking issue, and nearly identical scope; 45506 is the fuller version of 45500." }, { "left": "pull_request:43757", "right": "pull_request:45500", "accept": false, - "reason": "43757 only adds a fallback from hard failure to gpt-neox, while 45500 implements broader full GGUF loading support; they do not look like one mergeable change." + "reason": "Related but not the same concrete change: 43757 only adds a fallback to avoid hard failure, while 45500 implements full GGUF support." }, { "left": "pull_request:43757", "right": "pull_request:45506", "accept": false, - "reason": "43757 is a partial workaround and 45506 is a full support implementation; they target the same issue but not the same concrete change, so they should not be clustered as duplicates." + "reason": "43757 is a narrow compatibility fallback, whereas 45506 is a broader full-support PR; they are not the same fix and would not plausibly merge as one PR." } ] }, @@ -16184,8 +16066,8 @@ "pull_request:44066|pull_request:44072", "pull_request:44013|pull_request:44044", "pull_request:44066|pull_request:44086", - "pull_request:44066|pull_request:44071", "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44071", "pull_request:44066|pull_request:44068", "pull_request:44018|pull_request:44066", "pull_request:44019|pull_request:44722", @@ -16199,11 +16081,11 @@ "pull_request:44071|pull_request:44722", "pull_request:44018|pull_request:44086", "pull_request:44018|pull_request:44085", - "pull_request:44071|pull_request:44085", "pull_request:44068|pull_request:44086", + "pull_request:44071|pull_request:44085", "pull_request:44059|pull_request:44086", - "pull_request:44059|pull_request:44722", "pull_request:44085|pull_request:44086", + "pull_request:44059|pull_request:44722", "pull_request:44098|pull_request:44101", "pull_request:44018|pull_request:44071", "pull_request:44017|pull_request:44722", @@ -16211,11 +16093,11 @@ "pull_request:44017|pull_request:44073", "pull_request:44068|pull_request:44071", "pull_request:44071|pull_request:44086", - "pull_request:44059|pull_request:44071", "pull_request:43997|pull_request:44073", "pull_request:44017|pull_request:44071", - "pull_request:44059|pull_request:44068", + "pull_request:44059|pull_request:44071", "pull_request:44073|pull_request:44101", + "pull_request:44059|pull_request:44068", "pull_request:44018|pull_request:44059", "pull_request:44056|pull_request:44101", "pull_request:44073|pull_request:44161", @@ -16249,12 +16131,12 @@ "pull_request:44044|pull_request:44129", "pull_request:44044|pull_request:44154" ], - "prepared_review_unit_hash": "999b2713a6c14de45a4f051910c1e84ac06ea98952763f597a0944734c1e6a20", + "prepared_review_unit_hash": "8e4bbb82040c6f2fadbbd9d411e63ae54af34b8876ae39d8893f031653b99f67", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 78, - "serialized_chars": 84897, + "serialized_chars": 84898, "estimated_input_tokens": 21225, "estimated_eval_tokens": 42706 }, @@ -16271,400 +16153,334 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This packet is broader than a single GPT-J/GPT-Neo duplicate cluster: it contains a small set of accepted bridge pairs around the standardized output-tracing refactor campaign, especially between CVT/FNet, ResNet/SpeechT5, DeBERTa, and VITS, plus the GPT-J/CodeGen lineage. Most other pairs are only theme-level matches and should remain separate.", - "confidence": 0.81, + "summary": "This cluster is mostly a set of model-specific refactors to standardized output tracing, not one single duplicate change. Only the GPTJ/CodeGen variants and the GPT-Neo variants look like true overlaps; the rest share the umbrella issue but not the same code path.", + "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44129 is the best global representative because it sits at the center of the accepted bridge edges and captures the standardized output-tracing migration pattern that links several otherwise separate model families.", + "canonical_pr_reason": "PR 44722 is the best canonical PR for the GPTJ/CodeGen subcluster: it is the latest and most complete instance of that concrete output-tracing refactor.", "best_issue_reason": null, - "best_pr_reason": "PR 44129 is the strongest single exemplar of the packet's accepted output-tracing refactors, with the highest bridge value across the accepted pairs.", + "best_pr_reason": "44722 is the strongest representative of the shared decorator-based output-tracing migration, with the broadest concrete GPTJ/CodeGen implementation among the overlapping PRs.", "soft_edge_verdicts": [ { "left": "pull_request:43996", "right": "pull_request:44085", "accept": false, - "reason": "Different concrete model code paths; the shared output-tracing theme is too broad to treat as one change." + "reason": "Different model families and code paths; only the umbrella tracking issue matches." }, { "left": "pull_request:43996", "right": "pull_request:44044", "accept": false, - "reason": "Different model families and implementation paths; this is only a thematic similarity." + "reason": "Different model families and code paths; same issue target is too broad to merge." }, { "left": "pull_request:44066", "right": "pull_request:44085", - "accept": false, - "reason": "Both are output-tracing refactors, but this pair does not look like the same mergeable code-path change." + "accept": true, + "reason": "Both are GPTJ output-tracing refactors on the same code path and could plausibly be one PR." }, { "left": "pull_request:44007", "right": "pull_request:44072", "accept": false, - "reason": "Related standardized-output refactors, but they target different vision backbones and edited surfaces." + "reason": "ResNet/RT-DETR ResNet versus EfficientNet; same theme, but not the same concrete change." }, { "left": "pull_request:44072", "right": "pull_request:44722", "accept": false, - "reason": "Different model families and code paths; sharing the tracking issue/theme is not enough." + "reason": "EfficientNet refactor versus GPTJ/CodeGen refactor; unrelated code paths." }, { "left": "pull_request:44066", "right": "pull_request:44072", "accept": false, - "reason": "GPT-J/CodeGen versus EfficientNet-style output handling; too different to be the same underlying change." + "reason": "GPTJ/CodeGen versus EfficientNet; shared output-tracing theme only." }, { "left": "pull_request:44013", "right": "pull_request:44044", "accept": false, - "reason": "MobileNetV2 and DeBERTa are different implementations; this is only the same refactor pattern." + "reason": "MobileNetV2 versus DeBERTa; separate model-specific migrations." }, { "left": "pull_request:44066", "right": "pull_request:44086", "accept": false, - "reason": "Different model families and code paths; not the same concrete bug or change." - }, - { - "left": "pull_request:44066", - "right": "pull_request:44071", - "accept": false, - "reason": "GPT-J/CodeGen versus MPT; same broad refactor style, but not the same change." + "reason": "GPTJ/CodeGen versus MGP-STR; not the same underlying bug or change." }, { "left": "pull_request:44018", "right": "pull_request:44068", + "accept": true, + "reason": "Both are GPT-Neo output-tracing refactors for the same model and code path." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", "accept": false, - "reason": "The GPT-Neo variants are not close enough in the edited surfaces here to count as one duplicate PR." + "reason": "GPTJ/CodeGen versus MPT; only the shared decorator pattern matches." }, { "left": "pull_request:44066", "right": "pull_request:44068", "accept": false, - "reason": "GPT-J/CodeGen versus GPT-Neo; same theme only, not the same underlying change." + "reason": "GPTJ versus GPT-Neo; similar infrastructure work but different model code paths." }, { "left": "pull_request:44018", "right": "pull_request:44066", "accept": false, - "reason": "GPT-Neo and GPT-J/CodeGen are different code paths in this packet." + "reason": "GPT-Neo versus GPTJ/CodeGen; not the same refactor." }, { "left": "pull_request:44019", "right": "pull_request:44722", "accept": false, - "reason": "ResNet versus GPT-J/CodeGen; too broad a similarity at the refactor-pattern level." + "reason": "ResNet versus GPTJ/CodeGen; same umbrella issue, different implementations." }, { "left": "pull_request:44086", "right": "pull_request:44722", "accept": false, - "reason": "MGP-STR versus GPT-J/CodeGen; different model implementations." + "reason": "MGP-STR versus GPTJ/CodeGen; no shared concrete code path." }, { "left": "pull_request:44019", "right": "pull_request:44085", "accept": false, - "reason": "Different model families and concrete edits; same refactor theme only." + "reason": "ResNet versus GPTJ; distinct model changes." }, { "left": "pull_request:44018", "right": "pull_request:44019", "accept": false, - "reason": "GPT-Neo and ResNet are unrelated concrete refactors." + "reason": "GPT-Neo versus ResNet; related pattern, but not a duplicate." }, { "left": "pull_request:44019", "right": "pull_request:44071", "accept": false, - "reason": "ResNet versus MPT; same cleanup style, but not the same code-path problem." + "reason": "ResNet versus MPT; different model-specific output handling." }, { "left": "pull_request:44068", "right": "pull_request:44116", "accept": false, - "reason": "GPT-Neo versus Flaubert/XLM; unrelated implementations." + "reason": "GPT-Neo versus Flaubert/XLM; not the same underlying fix." }, { "left": "pull_request:44068", "right": "pull_request:44722", "accept": false, - "reason": "GPT-Neo versus GPT-J/CodeGen; not the same underlying PR change." + "reason": "GPT-Neo versus GPTJ/CodeGen; same refactor style, different code paths." }, { "left": "pull_request:44018", "right": "pull_request:44116", "accept": false, - "reason": "GPT-Neo versus Flaubert/XLM; different code paths." + "reason": "GPT-Neo versus Flaubert/XLM; separate model migrations." }, { "left": "pull_request:44071", "right": "pull_request:44722", "accept": false, - "reason": "MPT versus GPT-J/CodeGen; same refactor theme only." + "reason": "MPT versus GPTJ/CodeGen; only the output-tracing framework is shared." }, { "left": "pull_request:44018", "right": "pull_request:44086", "accept": false, - "reason": "GPT-Neo versus MGP-STR; different model families and edited surfaces." + "reason": "GPT-Neo versus MGP-STR; not the same concrete change." }, { "left": "pull_request:44018", "right": "pull_request:44085", "accept": false, - "reason": "GPT-Neo versus GPT-J/CodeGen; not the same concrete change." - }, - { - "left": "pull_request:44071", - "right": "pull_request:44085", - "accept": false, - "reason": "MPT versus GPT-J/CodeGen; only the output-tracing pattern matches." + "reason": "GPT-Neo versus GPTJ; similar theme, different implementation." }, { "left": "pull_request:44068", "right": "pull_request:44086", "accept": false, - "reason": "GPT-Neo versus MGP-STR; unrelated code paths." + "reason": "GPT-Neo versus MGP-STR; unrelated model code paths." }, { "left": "pull_request:44059", "right": "pull_request:44086", "accept": false, - "reason": "GPT-2 versus MGP-STR; shared output-tracing migration theme is not enough." + "reason": "GPT2 versus MGP-STR; only the umbrella output-tracing work overlaps." }, { - "left": "pull_request:44059", - "right": "pull_request:44722", + "left": "pull_request:44085", + "right": "pull_request:44086", "accept": false, - "reason": "GPT-2 versus GPT-J/CodeGen; different refactors." + "reason": "GPTJ versus MGP-STR; not the same bug or refactor." }, { - "left": "pull_request:44085", - "right": "pull_request:44086", + "left": "pull_request:44059", + "right": "pull_request:44722", "accept": false, - "reason": "GPT-J versus MGP-STR; unrelated model implementations." + "reason": "GPT2 versus GPTJ/CodeGen; distinct model-specific refactors." }, { "left": "pull_request:44098", "right": "pull_request:44101", "accept": false, - "reason": "ViLT versus Flaubert/XLM; different implementations." + "reason": "ViLT versus Flaubert/XLM; different model families." }, { - "left": "pull_request:44018", - "right": "pull_request:44071", + "left": "pull_request:44073", + "right": "pull_request:44101", "accept": false, - "reason": "GPT-Neo versus MPT; not the same underlying change." + "reason": "VisualBert versus Flaubert/XLM; not the same code path." }, { "left": "pull_request:44017", "right": "pull_request:44722", "accept": false, - "reason": "SegFormer versus GPT-J/CodeGen; same tracking issue alone does not make them duplicates." + "reason": "Segformer versus GPTJ/CodeGen; separate migrations." }, { "left": "pull_request:44017", "right": "pull_request:44161", "accept": false, - "reason": "SegFormer versus LongT5; different model code paths." + "reason": "Segformer versus LongT5; same pattern, different models." }, { "left": "pull_request:44017", "right": "pull_request:44073", "accept": false, - "reason": "SegFormer versus VisualBert; unrelated concrete refactors." - }, - { - "left": "pull_request:44068", - "right": "pull_request:44071", - "accept": false, - "reason": "GPT-Neo versus MPT; same theme only." + "reason": "Segformer versus VisualBert; unrelated implementations." }, { - "left": "pull_request:44071", - "right": "pull_request:44086", + "left": "pull_request:43997", + "right": "pull_request:44073", "accept": false, - "reason": "MPT versus MGP-STR; different model families." + "reason": "RegNet versus VisualBert; different model-specific code." }, { - "left": "pull_request:44059", + "left": "pull_request:44010", "right": "pull_request:44071", "accept": false, - "reason": "GPT-2 versus MPT; broadly similar cleanup, but not the same change." + "reason": "SqueezeBert versus MPT; not a duplicate change." }, { - "left": "pull_request:43997", + "left": "pull_request:44010", "right": "pull_request:44073", "accept": false, - "reason": "RegNet versus VisualBert; different model families and code paths." - }, - { - "left": "pull_request:44056", - "right": "pull_request:44101", - "accept": false, - "reason": "MPNet versus Flaubert/XLM; same pattern, different implementation." + "reason": "SqueezeBert versus VisualBert; separate output-tracing refactors." }, { - "left": "pull_request:44073", - "right": "pull_request:44161", + "left": "pull_request:44010", + "right": "pull_request:44018", "accept": false, - "reason": "VisualBert versus LongT5; not the same change." + "reason": "SqueezeBert versus GPT-Neo; same theme only." }, { - "left": "pull_request:44101", - "right": "pull_request:44161", + "left": "pull_request:43997", + "right": "pull_request:44056", "accept": false, - "reason": "Flaubert/XLM versus LongT5; unrelated concrete edits." + "reason": "RegNet versus MPNet; different models and code paths." }, { - "left": "pull_request:44017", + "left": "pull_request:43997", "right": "pull_request:44018", "accept": false, - "reason": "SegFormer and GPT-Neo share only the global refactor theme." + "reason": "RegNet versus GPT-Neo; not the same underlying fix." }, { "left": "pull_request:43997", - "right": "pull_request:44056", - "accept": false, - "reason": "RegNet versus MPNet; different model families." - }, - { - "left": "pull_request:44071", - "right": "pull_request:44076", + "right": "pull_request:44161", "accept": false, - "reason": "MPT versus ImageGPT; unrelated implementations." + "reason": "RegNet versus LongT5; separate refactors." }, { "left": "pull_request:44074", "right": "pull_request:44101", "accept": false, - "reason": "TextNet versus Flaubert/XLM; different code paths." + "reason": "TextNet versus Flaubert/XLM; different model implementations." }, { "left": "pull_request:44056", "right": "pull_request:44161", "accept": false, - "reason": "MPNet versus LongT5; not a duplicate." - }, - { - "left": "pull_request:43997", - "right": "pull_request:44018", - "accept": false, - "reason": "RegNet versus GPT-Neo; only the refactor pattern overlaps." + "reason": "MPNet versus LongT5; same umbrella issue, different code paths." }, { "left": "pull_request:44074", "right": "pull_request:44161", "accept": false, - "reason": "TextNet versus LongT5; different model implementations." - }, - { - "left": "pull_request:44010", - "right": "pull_request:44071", - "accept": false, - "reason": "SqueezeBert versus MPT; same refactor style, not the same code path." - }, - { - "left": "pull_request:44010", - "right": "pull_request:44073", - "accept": false, - "reason": "SqueezeBert versus VisualBert; unrelated concrete changes." - }, - { - "left": "pull_request:44018", - "right": "pull_request:44129", - "accept": false, - "reason": "GPT-Neo versus SpeechT5; different model families and code paths." - }, - { - "left": "pull_request:43997", - "right": "pull_request:44129", - "accept": false, - "reason": "RegNet versus SpeechT5; unrelated implementations." - }, - { - "left": "pull_request:44071", - "right": "pull_request:44129", - "accept": false, - "reason": "MPT versus SpeechT5; same issue family only." - }, - { - "left": "pull_request:43996", - "right": "pull_request:44161", - "accept": false, - "reason": "CVT/FNet versus LongT5; different model code paths." + "reason": "TextNet versus LongT5; not the same concrete change." }, { "left": "pull_request:43996", "right": "pull_request:44017", "accept": false, - "reason": "CVT/FNet versus SegFormer; unrelated implementations." + "reason": "FNet/CVT versus Segformer; unrelated model-specific work." }, { "left": "pull_request:44010", "right": "pull_request:44044", "accept": false, - "reason": "SqueezeBert versus DeBERTa; same refactor theme only." + "reason": "SqueezeBert versus DeBERTa; distinct changes." }, { "left": "pull_request:44044", "right": "pull_request:44074", "accept": false, - "reason": "DeBERTa versus TextNet; different model families." + "reason": "DeBERTa versus TextNet; same pattern, different code paths." }, { "left": "pull_request:44066", "right": "pull_request:44722", "accept": true, - "reason": "Same GPT-J/CodeGen output-tracing refactor family with overlapping file and change intent; these look like iterations of one change." + "reason": "Both cover the same GPTJ/CodeGen decorator migration and could be merged into one PR." }, { "left": "pull_request:44085", "right": "pull_request:44722", "accept": true, - "reason": "Both are GPT-J output-tracing decorator refactors touching the same code path; plausibly one merged change." + "reason": "Both are GPTJ output-tracing refactors with overlapping concrete changes." }, { "left": "pull_request:44085", "right": "pull_request:44129", - "accept": true, - "reason": "Both implement the same standardized output-tracing migration pattern over comparable forward paths." + "accept": false, + "reason": "GPTJ versus SpeechT5; same umbrella issue, not the same bug." }, { "left": "pull_request:43996", "right": "pull_request:44007", - "accept": true, - "reason": "Same standardized output-tracing refactor in the vision model stack; the code changes line up as one concrete migration." + "accept": false, + "reason": "FNet/CVT versus ResNet/RT-DETR ResNet; unrelated model refactors." }, { "left": "pull_request:43996", "right": "pull_request:44129", - "accept": true, - "reason": "Both are the same output-tracing modernization work, and the packet treats them as a direct bridge." + "accept": false, + "reason": "FNet/CVT versus SpeechT5; different code paths." }, { "left": "pull_request:44044", "right": "pull_request:44129", - "accept": true, - "reason": "Both replace the old output plumbing with the standardized capture/tuple handling style in comparable forward paths." + "accept": false, + "reason": "DeBERTa versus SpeechT5; separate model-specific changes." }, { "left": "pull_request:44044", "right": "pull_request:44154", - "accept": true, - "reason": "Both are the same standardized output-tracing collection refactor pattern and appear mergeable as one change." + "accept": false, + "reason": "DeBERTa versus ViTS; not the same underlying refactor." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The writeup is too restrictive for this packet. It correctly identifies the GPT-J/CodeGen and GPT-Neo subclusters, but it marks several packet-level deterministic-accept pairs as false by relying only on model-family differences. In particular, the packet marks 43996\u201344007, 43996\u201344129, 44044\u201344129, and 44044\u201344154 as accept, so the claim that everything outside the GPT-J/CodeGen and GPT-Neo groups must stay separate is not well grounded. This makes the soft-edge verdicts insufficiently conservative relative to the packet." + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative about overlap versus shared umbrella issue. The accepted soft-edge pairs are all within the same model-family refactor and look plausibly mergeable into a single PR for the same bugfix, while the rejected pairs are appropriately cautious about only sharing the broader standardized output-tracing theme." }, "evaluator_used": true, - "retried": true, - "accepted_nontrivial_soft_edge": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -16698,12 +16514,12 @@ "pull_request:43997|pull_request:44010", "pull_request:44010|pull_request:44129" ], - "prepared_review_unit_hash": "2fd724325b95132e74904e879acbb7ddb041206c408a4dbfeebf881c49d5eaa7", + "prepared_review_unit_hash": "a39be23eb4eeaf89398285982d8c83e5ee8f2f9c5677e4d1af2a76e2e9da9b0b", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 78, - "serialized_chars": 84897, + "serialized_chars": 84898, "estimated_input_tokens": 21225, "estimated_eval_tokens": 42706 }, @@ -16711,7 +16527,7 @@ "node_count": 8, "item_count": 8, "soft_pair_count": 6, - "serialized_chars": 17125, + "serialized_chars": 17126, "estimated_input_tokens": 4282, "estimated_eval_tokens": 8820 }, @@ -16720,48 +16536,48 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is a family of model-specific PRs that all migrate output tracing to the new standardized capture/decorator APIs, but they touch different architectures and code paths. They are related by pattern, not duplicates.", + "summary": "These PRs are all model-specific refactors to standardized output tracing/capture_outputs, but they target different architectures and code paths, so they are not duplicates of one another.", "confidence": 0.91, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44129 is the most concrete reference point in the set: it has an explicit issue target and a narrowly scoped SpeechT5 output-tracing refactor, making it the clearest representative of the migration pattern.", + "canonical_pr_reason": "PR 44129 is the strongest representative of the cluster: it has an explicit issue link, a clear output-tracing refactor, and enough surrounding changes to show the intended migration pattern.", "best_issue_reason": null, - "best_pr_reason": "PR 44129 is the strongest standalone PR in the cluster because it has explicit linkage, a clear model-specific change, and enough implementation detail to serve as the best representative of this group.", + "best_pr_reason": "PR 44129 is the best single PR to anchor the cluster because it is the most explicitly scoped and documented, while the others are similar migrations in unrelated models.", "soft_edge_verdicts": [ { "left": "pull_request:44024", "right": "pull_request:44076", "accept": false, - "reason": "Same general output-tracing migration theme, but FocalNet and ImageGPT are different model implementations with separate forward paths; not the same underlying change." + "reason": "Same broad refactor theme, but different models (FocalNet vs ImageGPT) and different code paths; not the same underlying change." }, { "left": "pull_request:44073", "right": "pull_request:44074", "accept": false, - "reason": "Both adopt the standardized output-capture pattern, but they modify different models (VisualBert vs TextNet) and distinct code paths, so they are not duplicates." + "reason": "Both migrate output tracing, but one is VisualBert and the other TextNet; they are separate model implementations and cannot be merged as one concrete fix." }, { "left": "pull_request:44056", "right": "pull_request:44073", "accept": false, - "reason": "MPNet and VisualBert are separate architectures; this is the same refactor pattern applied independently, not one concrete fix/change." + "reason": "MPNet and VisualBert are unrelated model code paths; the similarity is only the shared standardized-output refactor pattern." }, { "left": "pull_request:44056", "right": "pull_request:44074", "accept": false, - "reason": "These are independent model-specific refactors in MPNet and TextNet, sharing a theme but not a single underlying bug or patch." + "reason": "MPNet and TextNet touch different architectures and forward paths, so this is not the same underlying bug or change." }, { "left": "pull_request:43997", "right": "pull_request:44010", "accept": false, - "reason": "RegNet and SqueezeBert are unrelated model code paths; the overlap is only the standardized output-tracing migration pattern." + "reason": "RegNet and SqueezeBert are unrelated implementations; these are separate output-tracing migrations, not one shared fix." }, { "left": "pull_request:44010", "right": "pull_request:44129", "accept": false, - "reason": "SqueezeBert and SpeechT5 are different architectures with different forward logic; same broad refactor category, but not mergeable as one PR." + "reason": "SqueezeBert and SpeechT5 both adjust output capture, but they affect different model internals and cannot plausibly be merged into one PR as the same concrete code-path fix." } ] }, @@ -16840,7 +16656,7 @@ "pull_request:44028|pull_request:44030", "pull_request:44029|pull_request:44030" ], - "prepared_review_unit_hash": "34fd02f24a564acab2539fb4972259ddc7a284dcb692bf683012606d134f4638", + "prepared_review_unit_hash": "1ed95264fa2191610891d8373501d4dee45e5e4cbed692316150e57dc1dc82b1", "component_budget": { "node_count": 13, "item_count": 13, @@ -16862,246 +16678,246 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is a batch of similar-looking but model-specific refactors for output tracing across different architectures. They share the same naming pattern and helper usage, but they do not appear to be the same underlying change or bug fix, so they should not be deduplicated into one PR.", - "confidence": 0.91, + "summary": "These PRs are all similar output-tracing refactors, but they target different model implementations and code paths. They are related by theme, not by the same underlying bug/change, so the soft edges should be rejected. No issue is present; one PR can serve as the cluster representative.", + "confidence": 0.84, "canonical_issue_reason": null, - "canonical_pr_reason": "No single canonical PR truly covers the cluster; 44003 is the broadest representative in scope, but it is still only the Mamba-family refactor, not a duplicate of the others.", + "canonical_pr_reason": "PR 44003 is the strongest representative because it is a broader output-tracing refactor that touches two related model files and introduces the new capturing plumbing, making it more central than the single-model variants.", "best_issue_reason": null, - "best_pr_reason": "44003 is the strongest representative because it spans two related files and introduces the shared output-capturing plumbing, making it the closest to the common pattern across the set.", + "best_pr_reason": "PR 44003 best represents the cluster\u2019s common pattern while still being a concrete, nontrivial code change across multiple files.", "soft_edge_verdicts": [ { "left": "pull_request:43998", "right": "pull_request:44002", "accept": false, - "reason": "Same refactor theme, but different model families (`timm_backbone` vs `upernet`) and different code paths." + "reason": "Different model families and files (timm_backbone vs upernet); same refactor theme, not the same code-path bug." }, { "left": "pull_request:43998", "right": "pull_request:44000", "accept": false, - "reason": "Both are output-tracing refactors, but they target different model implementations and are not the same concrete change." + "reason": "Timm backbone and vision-text dual encoder are unrelated implementations; this is only a shared output-tracing pattern." }, { "left": "pull_request:43998", "right": "pull_request:44001", "accept": false, - "reason": "`timm_backbone` and `univnet` are unrelated model code paths; similarity is only at the helper/refactor level." + "reason": "Different model code paths (timm_backbone vs univnet); no evidence of one shared defect." }, { "left": "pull_request:43998", "right": "pull_request:44003", "accept": false, - "reason": "Different architectures (`timm_backbone` vs `mamba/falcon_mamba`), so these are separate model-specific refactors." + "reason": "These touch different model stacks (timm_backbone vs mamba/falcon_mamba) and are separate refactors." }, { "left": "pull_request:43998", "right": "pull_request:44029", "accept": false, - "reason": "`timm_backbone` and `rwkv` are different model families; not the same underlying fix." + "reason": "Different model families and mechanics; same refactor motif, not mergeable as one fix." }, { "left": "pull_request:43999", "right": "pull_request:44001", "accept": false, - "reason": "Both refactor output handling, but one is `mobilenet_v1` and the other is `univnet`; distinct implementations." + "reason": "MobileNetV1 and UnivNet are unrelated model paths; the overlap is only in output-tracing cleanup." }, { "left": "pull_request:43999", "right": "pull_request:44002", "accept": false, - "reason": "Different model families (`mobilenet_v1` vs `upernet`); shared pattern is not enough to merge." + "reason": "MobileNetV1 vs UperNet are separate implementations; no shared underlying bug is shown." }, { "left": "pull_request:43999", "right": "pull_request:44025", "accept": false, - "reason": "`mobilenet_v1` and `depth_anything` are separate model-specific refactors, not one code-path issue." + "reason": "Different model families and files; both are refactors, but not the same concrete change." }, { "left": "pull_request:43999", "right": "pull_request:44026", "accept": false, - "reason": "These touch different composite-model code paths (`mobilenet_v1` vs `vision_encoder_decoder`)." + "reason": "MobileNetV1 and VisionEncoderDecoder are unrelated code paths despite similar signature cleanup." }, { "left": "pull_request:43999", "right": "pull_request:44027", "accept": false, - "reason": "`mobilenet_v1` and `speech_encoder_decoder` are unrelated model paths; do not dedupe." + "reason": "MobileNetV1 and SpeechEncoderDecoder are separate model stacks; same pattern, different fixes." }, { "left": "pull_request:44000", "right": "pull_request:44001", "accept": false, - "reason": "`vision_text_dual_encoder` and `univnet` share the refactor motif only; the underlying changes are different." + "reason": "Vision-Text Dual Encoder and UnivNet are different model paths; no shared bug or mergeable PR pair." }, { "left": "pull_request:44000", "right": "pull_request:44002", "accept": false, - "reason": "Different model families and different forward signatures; not the same concrete bug/change." + "reason": "Different model families (vision-text dual encoder vs UperNet); only the refactor style matches." }, { "left": "pull_request:44000", "right": "pull_request:44003", "accept": false, - "reason": "`vision_text_dual_encoder` and `mamba/falcon_mamba` are distinct code paths with no mergeable overlap." + "reason": "Vision-text dual encoder and mamba/falcon_mamba are unrelated concrete changes." }, { "left": "pull_request:44000", "right": "pull_request:44025", "accept": false, - "reason": "These are separate model refactors (`vision_text_dual_encoder` vs `depth_anything`)." + "reason": "Different targets and implementations; both are output-tracing refactors, but not the same fix." }, { "left": "pull_request:44001", "right": "pull_request:44002", "accept": false, - "reason": "`univnet` and `upernet` are unrelated; same refactor framework, different changes." + "reason": "UnivNet and UperNet are distinct model paths; no evidence they address one underlying bug." }, { "left": "pull_request:44001", "right": "pull_request:44003", "accept": false, - "reason": "Different model families (`univnet` vs `mamba/falcon_mamba`), so not a duplicate change." + "reason": "UnivNet vs mamba/falcon_mamba are separate changes with only superficial similarity." }, { "left": "pull_request:44001", "right": "pull_request:44025", "accept": false, - "reason": "`univnet` and `depth_anything` are distinct model-specific output-tracing refactors." + "reason": "Different model stacks and changed files; not the same concrete issue." }, { "left": "pull_request:44002", - "right": "pull_request:44003", + "right": "pull_request:44029", "accept": false, - "reason": "`upernet` and `mamba/falcon_mamba` are different architectures and code paths." + "reason": "UperNet and RWKV are unrelated model implementations; shared theme is too broad." }, { "left": "pull_request:44002", - "right": "pull_request:44029", + "right": "pull_request:44003", "accept": false, - "reason": "`upernet` and `rwkv` are unrelated model implementations; not the same fix." + "reason": "Different model code paths and refactor scopes; not plausibly one merged PR." }, { "left": "pull_request:44003", "right": "pull_request:44004", "accept": false, - "reason": "Both are refactors, but `mamba/falcon_mamba` and `codegen` touch different model internals and are not mergeable into one PR." + "reason": "Mamba/falcon_mamba output tracing and CodeGen output tracing are separate model-specific changes." }, { "left": "pull_request:43998", "right": "pull_request:44004", "accept": false, - "reason": "`timm_backbone` and `codegen` are separate model paths with different output-tracing mechanics." + "reason": "Timm backbone and CodeGen are unrelated subsystems; the similarity is only generic refactoring." }, { "left": "pull_request:44001", "right": "pull_request:44004", "accept": false, - "reason": "`univnet` and `codegen` are different models; similarity is only at the abstraction level." + "reason": "UnivNet and CodeGen fix different model code paths, so they should not be merged as duplicates." }, { "left": "pull_request:44002", "right": "pull_request:44004", "accept": false, - "reason": "`upernet` and `codegen` are unrelated concrete code paths." + "reason": "UperNet and CodeGen are distinct implementations with no shared concrete defect." }, { "left": "pull_request:44000", "right": "pull_request:44004", "accept": false, - "reason": "`vision_text_dual_encoder` and `codegen` are separate model implementations; not the same underlying change." + "reason": "Vision-text dual encoder vs CodeGen are different model paths; same broad theme is insufficient." }, { "left": "pull_request:44025", "right": "pull_request:44026", "accept": false, - "reason": "`depth_anything` and `vision_encoder_decoder` are different model families and different refactors." + "reason": "Depth Anything and VisionEncoderDecoder are unrelated model families; not the same bug or change." }, { "left": "pull_request:44025", "right": "pull_request:44027", "accept": false, - "reason": "`depth_anything` and `speech_encoder_decoder` are separate code paths; do not merge." + "reason": "Depth Anything and SpeechEncoderDecoder are different code paths; only the output-tracing refactor matches." }, { "left": "pull_request:44025", "right": "pull_request:44028", "accept": false, - "reason": "`depth_anything` and `superpoint` are unrelated model changes." + "reason": "Depth Anything and SuperPoint are separate models; no shared underlying issue is evident." }, { "left": "pull_request:44025", "right": "pull_request:44029", "accept": false, - "reason": "Different architectures (`depth_anything` vs `rwkv`), so not a duplicate." + "reason": "Depth Anything and RWKV are unrelated implementations, so this is not one duplicate change." }, { "left": "pull_request:44025", "right": "pull_request:44030", "accept": false, - "reason": "`depth_anything` and `dpr` are distinct model-specific refactors." + "reason": "Depth Anything and DPR touch different model stacks and should remain separate PRs." }, { "left": "pull_request:44026", - "right": "pull_request:44027", + "right": "pull_request:44029", "accept": false, - "reason": "`vision_encoder_decoder` and `speech_encoder_decoder` are separate wrapper models with different implementations." + "reason": "VisionEncoderDecoder and RWKV are different code paths; common output-tracing refactor is too broad." }, { "left": "pull_request:44026", - "right": "pull_request:44028", + "right": "pull_request:44027", "accept": false, - "reason": "`vision_encoder_decoder` and `superpoint` are unrelated model code paths." + "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are separate encoder-decoder variants with different implementations." }, { "left": "pull_request:44026", - "right": "pull_request:44029", + "right": "pull_request:44028", "accept": false, - "reason": "Different model families (`vision_encoder_decoder` vs `rwkv`); not the same concrete change." + "reason": "Different model families (vision encoder-decoder vs SuperPoint), so no same-bug basis." }, { "left": "pull_request:44026", "right": "pull_request:44030", "accept": false, - "reason": "`vision_encoder_decoder` and `dpr` are distinct implementations and should not be deduped." + "reason": "VisionEncoderDecoder and DPR are unrelated model code paths; not mergeable as one fix." }, { "left": "pull_request:44027", "right": "pull_request:44028", "accept": false, - "reason": "`speech_encoder_decoder` and `superpoint` are unrelated model changes." + "reason": "SpeechEncoderDecoder and SuperPoint are unrelated model implementations." }, { "left": "pull_request:44027", "right": "pull_request:44029", "accept": false, - "reason": "`speech_encoder_decoder` and `rwkv` are different code paths with no direct overlap." + "reason": "SpeechEncoderDecoder and RWKV are different concrete code paths; only the refactor style overlaps." }, { "left": "pull_request:44027", "right": "pull_request:44030", "accept": false, - "reason": "Different model families (`speech_encoder_decoder` vs `dpr`); same pattern, not same change." + "reason": "SpeechEncoderDecoder and DPR are unrelated targets, so this is not a duplicate change." }, { "left": "pull_request:44028", "right": "pull_request:44029", "accept": false, - "reason": "`superpoint` and `rwkv` are different model implementations." + "reason": "SuperPoint and RWKV address different model logic; same theme does not justify merging." }, { "left": "pull_request:44028", "right": "pull_request:44030", "accept": false, - "reason": "`superpoint` and `dpr` are separate model-specific refactors." + "reason": "SuperPoint and DPR are distinct model code paths with no shared bug." }, { "left": "pull_request:44029", "right": "pull_request:44030", "accept": false, - "reason": "`rwkv` and `dpr` are unrelated concrete code paths; not a duplicate PR." + "reason": "RWKV and DPR are unrelated implementations; these are separate model-specific refactors." } ] }, @@ -17134,12 +16950,12 @@ "pull_request:44662|pull_request:45075", "pull_request:45075|pull_request:45401" ], - "prepared_review_unit_hash": "b43c81f7b139017747e9f7843f366957762537843d62813738e9ad83bcadf114", + "prepared_review_unit_hash": "89e02490afbfc5586ddd71e25599998ba6dac42e01bab8a9ce87a708572a10ae", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 11072, + "serialized_chars": 11071, "estimated_input_tokens": 2768, "estimated_eval_tokens": 5792 }, @@ -17147,7 +16963,7 @@ "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 11072, + "serialized_chars": 11071, "estimated_input_tokens": 2768, "estimated_eval_tokens": 5792 }, @@ -17156,30 +16972,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "All three PRs are separate model-addition changes, not duplicates: PenguinVL, DeepSeek-OCR-2, and Voxtral TTS each introduce distinct model packages and docs, with only shared registration/boilerplate files in common.", - "confidence": 0.95, + "summary": "These are three separate model-addition pull requests for different architectures (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They share only boilerplate registry/docs updates, not the same underlying bug or code change, so none should be clustered as duplicates.", + "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, - "best_pr_reason": "PR #45401 is the cleanest representative only in the sense that it is explicitly tied to issue 45027 and has a focused scope, but it is still a distinct Voxtral TTS model addition rather than a duplicate of the others.", + "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "pull_request:44662", "right": "pull_request:45401", "accept": false, - "reason": "Different model families and codepaths: PenguinVL vs Voxtral TTS. Shared auto-registration/docs are boilerplate, not evidence of the same fix." + "reason": "Different model implementations and code paths: PenguinVL vision-language model vs Voxtral TTS speech model. Shared auto-registration/doc scaffolding is generic boilerplate, not the same change." }, { "left": "pull_request:44662", "right": "pull_request:45075", "accept": false, - "reason": "These add different models (PenguinVL vs DeepSeek-OCR-2) with different implementation files and tests; overlap is only in common Transformers integration points." + "reason": "Different targets and functionality: PenguinVL vs DeepSeek-OCR-2. The overlap is limited to standard Transformers integration files, which is insufficient to treat them as one PR." }, { "left": "pull_request:45075", "right": "pull_request:45401", "accept": false, - "reason": "DeepSeek-OCR-2 and Voxtral TTS are unrelated model implementations; they touch similar framework wiring but do not fix the same underlying bug or change." + "reason": "These add unrelated models for OCR vs text-to-speech. They touch similar framework plumbing but do not fix or implement the same concrete feature or bug." } ] }, @@ -17209,7 +17025,7 @@ "soft_pairs": [ "pull_request:44979|pull_request:45363" ], - "prepared_review_unit_hash": "2b7c861e4e1eeb9567ebcf05b8cf4a736329f200217639bb3719a95ea1ba7c08", + "prepared_review_unit_hash": "7302f7c67423962ccec9fef113101cdd411e5b09ed6f79626d8ff25f77b1b004", "component_budget": { "node_count": 2, "item_count": 2, @@ -17231,18 +17047,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs are both about fusion, but they are not the same change: #44979 adds a new general Module Fusion API, while #45363 adds a specific n-to-1 KernelConfig-based fusion path and example. Related area, different concrete code paths and not mergeable as one PR.", - "confidence": 0.93, + "summary": "These PRs are related feature work around fusion, but they target different layers and code paths: one introduces a generic Module Fusion API, the other wires KernelConfig-based n-to-1 kernel fusion into model loading and adds an example.", + "confidence": 0.87, "canonical_issue_reason": null, - "canonical_pr_reason": "#44979 is the broader, foundational fusion abstraction: it introduces `module_fusion.py` and tests for a general API rather than one specific kernel-config workflow.", + "canonical_pr_reason": "No single canonical PR: 44979 is the foundational module-fusion API, while 45363 is a separate integration of KernelConfig-driven n-to-1 fusion built on top of that idea.", "best_issue_reason": null, - "best_pr_reason": "#44979 is the best representative of this cluster because it defines the general module-fusion mechanism; #45363 is a narrower implementation specific to `KernelConfig` and a particular fused pair.", + "best_pr_reason": "44979 is the broader foundational abstraction and the cleaner cluster representative; 45363 is a more concrete integration/example change rather than the core API surface.", "soft_edge_verdicts": [ { "left": "pull_request:44979", "right": "pull_request:45363", "accept": false, - "reason": "Different underlying changes: one adds a generic Module Fusion API, the other implements n-to-1 kernel fusion via `KernelConfig`. They share a theme but not the same concrete bug/feature." + "reason": "Related but not duplicates: 44979 adds a new generic Module Fusion API (`src/transformers/module_fusion.py` and tests), while 45363 implements a specific `KernelConfig`-based n-to-1 fusion path across integration/model-loading files. They address different abstractions and code paths." } ] }, @@ -17272,7 +17088,7 @@ "soft_pairs": [ "pull_request:45086|pull_request:45317" ], - "prepared_review_unit_hash": "af6b583c118706a28d728b19166608f253c256d6aeb711ff896b2fd4dd67fc47", + "prepared_review_unit_hash": "14470d6c689f3a4471d4ec01fd794d0a50221ba7eb0186a5054faee4d0eaf933", "component_budget": { "node_count": 2, "item_count": 2, @@ -17294,18 +17110,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two PRs are near-identical fixes for the same AttributeError in `_patch_mistral_regex` / Mistral tokenizer pre-tokenizer handling, with the later PR adding tests and broader coverage.", - "confidence": 0.98, + "summary": "Both pull requests target the same Mistral regex AttributeError in `_patch_mistral_regex` and make the same code fix in `tokenization_utils_tokenizers.py`; the later PR also adds a test, so it is the stronger canonical PR candidate.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45317 is the stronger canonical PR because it contains the same core fix plus test coverage, making it the more complete implementation of the shared bug fix.", + "canonical_pr_reason": "PR 45317 is the more complete version of the same fix: it applies the same code change as 45086 and also adds test coverage, making it the better canonical PR.", "best_issue_reason": null, - "best_pr_reason": "PR #45317 is the best PR overall since it directly addresses the bug with the same code change and includes validation in tests.", + "best_pr_reason": "PR 45317 is the best representative because it fixes the same concrete bug as 45086 but includes additional validation via tests and has more discussion activity.", "soft_edge_verdicts": [ { "left": "pull_request:45086", "right": "pull_request:45317", "accept": true, - "reason": "Same underlying AttributeError in the same tokenizer code path; the diffs are effectively the same fix and could be merged as one PR." + "reason": "Accept: both PRs address the same AttributeError in `_patch_mistral_regex` with the same underlying code-path fix in the same file, and 45317 is a superset with tests." } ] }, @@ -17335,12 +17151,12 @@ "soft_pairs": [ "pull_request:45332|pull_request:45333" ], - "prepared_review_unit_hash": "2ea8761e97f722ded8015a0337e7ae38bcfa94b6ad39d47476f56add37e682e4", + "prepared_review_unit_hash": "81cd4f0525f766dac1bba01b28a0b2a2db1e228b0e84f248d286c1357244d986", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5397, + "serialized_chars": 5398, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17348,7 +17164,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5397, + "serialized_chars": 5398, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17357,18 +17173,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related but not duplicates: one PR introduces heterogeneous per-layer config support, while the other consumes that API to add heterogeneous model/caching behavior. They look like a foundation-and-follow-up pair, not the same underlying change.", - "confidence": 0.9, + "summary": "Two open PRs add adjacent heterogeneous support: #45333 introduces per-layer config infrastructure, while #45332 adds model/cache handling on top of it. They are related, but not duplicates.", + "confidence": 0.87, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45333 is the more foundational and self-contained change: it adds the per-layer heterogeneous config machinery that the modeling PR builds on.", + "canonical_pr_reason": "Choose #45333 as the canonical PR because it introduces the foundational per-layer heterogeneous config API and helper utilities that the other PR builds on.", "best_issue_reason": null, - "best_pr_reason": "PR 45333 is the best representative of the cluster because it defines the core config abstraction; PR 45332 is a downstream consumer that depends on it and spans broader model/cache behavior.", + "best_pr_reason": "Choose #45332 as the best representative PR because it covers the broader end-to-end heterogeneous model and cache integration, making it the more complete user-facing change.", "soft_edge_verdicts": [ { "left": "pull_request:45332", "right": "pull_request:45333", "accept": false, - "reason": "Reject: these target different layers of the feature. 45333 adds per-layer heterogeneous configuration support, while 45332 adds heterogeneous model/cache handling using that config. They share subsystem files, but they are not the same concrete code-path fix and would not cleanly collapse into one PR." + "reason": "Reject: these are related prerequisite/follow-up PRs, not the same underlying change. #45333 adds config infrastructure; #45332 adds modeling/cache support and broader integration work." } ] }, @@ -17398,7 +17214,7 @@ "soft_pairs": [ "pull_request:45415|pull_request:45425" ], - "prepared_review_unit_hash": "9cf3f9a9d88d6d5b2d3886e9b55b1ad7742c03a20648815bd38aa7fefbd94fed", + "prepared_review_unit_hash": "10750f0dc6fa0cb26f7c1ccc490a0f34e97e22ebf74b2d58a691713e74fa429c", "component_budget": { "node_count": 2, "item_count": 2, @@ -17420,18 +17236,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both items are typing-focused PRs, but they target different levels of change: one is a broad type-checking sweep across many `src/transformers/*.py` files, while the other adds typing helpers/protocols for `modeling_utils` and PEFT integration. They are related, but not the same concrete change.", - "confidence": 0.87, + "summary": "Two open PRs in the same typing-cleanup area, but they are not the same underlying change. PR 45415 is a broad sweep adding type checking across many transformers modules and relaxing type-ignore warnings, while PR 45425 is a narrower typing helper update around `modeling_utils`/PEFT-related protocols and aliases.", + "confidence": 0.86, "canonical_issue_reason": null, - "canonical_pr_reason": "No clear canonical PR: 45415 is a wide typing pass across many files, while 45425 is a narrower typing-API/helper update affecting a different slice of the codebase.", + "canonical_pr_reason": "PR 45415 is the best representative of the cluster because it is the broader, more central typing/type-checking sweep affecting many files and build config.", "best_issue_reason": null, - "best_pr_reason": "PR 45425 is the better representative of the cluster because it is more self-contained and concrete; 45415 is a broad sweep that is less suitable as the single canonical change.", + "best_pr_reason": "PR 45415 is the strongest global representative: it covers the larger cross-cutting type-checking initiative, whereas PR 45425 is a narrower follow-up/helper change.", "soft_edge_verdicts": [ { "left": "pull_request:45415", "right": "pull_request:45425", "accept": false, - "reason": "Reject: both are type-checking related, but 45415 is a broad lint/type pass and 45425 introduces typing protocols/aliases for specific modeling/PEFT paths. They do not fix the same concrete bug/change and would not plausibly be merged as one PR." + "reason": "Both are typing-related, but they address different scopes: 45415 is a broad repository-wide type-checking pass, while 45425 adds specific typing aliases/protocols for `modeling_utils`/PEFT. They are not the same concrete fix and would not naturally merge into one PR." } ] }, @@ -17461,40 +17277,40 @@ "soft_pairs": [ "pull_request:45470|pull_request:45487" ], - "prepared_review_unit_hash": "1ce3065828b8b7a05a0ce008641cc060cb85183a46d4f3895ac04bfb3b2d817d", + "prepared_review_unit_hash": "1661d1c946f4c4e43067e21000404becf0f43d59ec1853e950583fc551f1d05b", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4778, - "estimated_input_tokens": 1195, - "estimated_eval_tokens": 2646 + "serialized_chars": 5068, + "estimated_input_tokens": 1267, + "estimated_eval_tokens": 2790 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4778, - "estimated_input_tokens": 1195, - "estimated_eval_tokens": 2646 + "serialized_chars": 5068, + "estimated_input_tokens": 1267, + "estimated_eval_tokens": 2790 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These PRs are not duplicates: one only skips a SAM3 Lite text test due to a flash-attn dispatch mismatch, while the other fixes a model-parallel/device placement bug across AltCLIP and ChineseCLIP-related code paths. They touch different models and different failure modes.", - "confidence": 0.95, + "summary": "The two pull requests are unrelated: one only skips flash-attn composite-model tests for Sam3LiteText, while the other fixes a real model-parallel/device-placement bug across several text models. They should not be clustered as duplicates.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #45487 is the stronger representative because it contains an actual cross-model code fix, whereas PR #45470 is only a test skip for a single model-specific mismatch.", + "canonical_pr_reason": "PR 45487 is the strongest canonical PR because it contains the substantive code fix (device placement for buffered token type ids and related model-parallel adjustments) across multiple affected models, unlike PR 45470 which only adds test skips.", "best_issue_reason": null, - "best_pr_reason": "PR #45487 is the most substantive and broadly representative change in the cluster; PR #45470 is a narrow test adjustment and not a code-path fix.", + "best_pr_reason": "PR 45487 is the best representative artifact for this cluster since it addresses an actual implementation bug with concrete code changes and broader impact; PR 45470 is just a test skip and not a comparable fix.", "soft_edge_verdicts": [ { "left": "pull_request:45470", "right": "pull_request:45487", "accept": false, - "reason": "Different underlying problems: #45470 skips one SAM3 Lite flash-attn test, while #45487 fixes model-parallel device handling in AltCLIP/ChineseCLIP code. Not mergeable as one PR." + "reason": "Different problems and change types: 45470 skips flash-attn tests for Sam3LiteText due to unsupported flash attention, while 45487 fixes a model-parallel/device mismatch in AltCLIP/ChineseCLIP and related models. They are not the same underlying bug or mergeable change." } ] }, diff --git a/analysis/current/manifest.json b/analysis/current/manifest.json index 83567582223a4117da62ac402f88f1e4423373b6..7dec0a15a6d72a70c6b4458797f6fe7a3f193048 100644 --- a/analysis/current/manifest.json +++ b/analysis/current/manifest.json @@ -1,8 +1,8 @@ { - "analysis_id": "hybrid-model-20260421t000044z", + "analysis_id": "hybrid-model-20260421t060039z", "archived_artifacts": { - "hybrid": "snapshots/20260421T000044Z/analysis-runs/hybrid-model-20260421t000044z/analysis-report-hybrid.json", - "hybrid_reviews": "snapshots/20260421T000044Z/analysis-runs/hybrid-model-20260421t000044z/analysis-report-hybrid.llm-reviews.json" + "hybrid": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.llm-reviews.json" }, "artifacts": { "hybrid": "analysis/current/analysis-report-hybrid.json", @@ -10,9 +10,9 @@ }, "channel": "canonical", "model": null, - "published_at": "2026-04-21T00:08:12Z", + "published_at": "2026-04-21T06:06:36Z", "repo": "huggingface/transformers", "schema_version": 1, - "snapshot_id": "20260421T000044Z", + "snapshot_id": "20260421T060039Z", "variant": "hybrid" } diff --git a/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.json b/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.json new file mode 100644 index 0000000000000000000000000000000000000000..8e247fe394d9df4eefa2c6509f663af3cf9c1964 --- /dev/null +++ b/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.json @@ -0,0 +1,2104 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260421T060039Z", + "generated_at": "2026-04-21T06:06:35Z", + "evidence_quality": "full", + "llm_enrichment": true, + "meta_bugs": [ + { + "cluster_id": "cluster-43979-11", + "summary": "Cluster of 1 issues and 10 PRs centered on issue #43979.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43979, + "canonical_pr_number": 44007, + "issue_numbers": [ + 43979 + ], + "pr_numbers": [ + 43996, + 44007, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43996, + "right_pr_number": 44007, + "code_similarity": 0.179, + "size_similarity": 0.576, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.429, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44013, + "code_similarity": 0.122, + "size_similarity": 0.318, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.392, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44044, + "code_similarity": 0.245, + "size_similarity": 0.864, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.479, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44066, + "code_similarity": 0.225, + "size_similarity": 0.818, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.408, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44072, + "code_similarity": 0.14, + "size_similarity": 0.303, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.528, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44085, + "code_similarity": 0.216, + "size_similarity": 0.783, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44129, + "code_similarity": 0.163, + "size_similarity": 0.643, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.229, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44154, + "code_similarity": 0.153, + "size_similarity": 0.535, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.31, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44722, + "code_similarity": 0.225, + "size_similarity": 0.848, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.368, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44013, + "code_similarity": 0.19, + "size_similarity": 0.553, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.531, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44044, + "code_similarity": 0.186, + "size_similarity": 0.667, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.354, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44066, + "code_similarity": 0.188, + "size_similarity": 0.704, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.315, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44072, + "code_similarity": 0.212, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.708, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44085, + "code_similarity": 0.195, + "size_similarity": 0.735, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.318, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44129, + "code_similarity": 0.103, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.191, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44154, + "code_similarity": 0.238, + "size_similarity": 0.93, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.344, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44722, + "code_similarity": 0.178, + "size_similarity": 0.679, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.28, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44044, + "code_similarity": 0.126, + "size_similarity": 0.368, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.351, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44066, + "code_similarity": 0.127, + "size_similarity": 0.389, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.325, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44072, + "code_similarity": 0.29, + "size_similarity": 0.952, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.667, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44085, + "code_similarity": 0.131, + "size_similarity": 0.406, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.329, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44129, + "code_similarity": 0.07, + "size_similarity": 0.205, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.192, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44154, + "code_similarity": 0.177, + "size_similarity": 0.594, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.389, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44722, + "code_similarity": 0.118, + "size_similarity": 0.375, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.287, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44066, + "code_similarity": 0.25, + "size_similarity": 0.947, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.404, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44072, + "code_similarity": 0.136, + "size_similarity": 0.351, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.442, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44085, + "code_similarity": 0.24, + "size_similarity": 0.906, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.394, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44129, + "code_similarity": 0.147, + "size_similarity": 0.555, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.243, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44154, + "code_similarity": 0.17, + "size_similarity": 0.62, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.306, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44722, + "code_similarity": 0.257, + "size_similarity": 0.982, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.402, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44072, + "code_similarity": 0.133, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.393, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44085, + "code_similarity": 0.763, + "size_similarity": 0.957, + "file_overlap": 0.5, + "area_overlap": 0.825, + "patch_similarity": 0.887, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ] + } + ] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44129, + "code_similarity": 0.145, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.263, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44154, + "code_similarity": 0.174, + "size_similarity": 0.654, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.286, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44722, + "code_similarity": 0.907, + "size_similarity": 0.964, + "file_overlap": 1.0, + "area_overlap": 0.808, + "patch_similarity": 0.874, + "shared_filenames": [ + "src/transformers/models/codegen/modeling_codegen.py", + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/codegen/modeling_codegen.py", + "left_ranges": [ + [ + 245, + 266 + ] + ], + "right_ranges": [ + [ + 228, + 234 + ] + ] + }, + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44085, + "code_similarity": 0.137, + "size_similarity": 0.387, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44129, + "code_similarity": 0.074, + "size_similarity": 0.195, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.231, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44154, + "code_similarity": 0.175, + "size_similarity": 0.566, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.414, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44722, + "code_similarity": 0.124, + "size_similarity": 0.357, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.347, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44129, + "code_similarity": 0.141, + "size_similarity": 0.503, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.272, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44154, + "code_similarity": 0.18, + "size_similarity": 0.684, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.289, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44722, + "code_similarity": 0.728, + "size_similarity": 0.923, + "file_overlap": 0.5, + "area_overlap": 0.791, + "patch_similarity": 0.78, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44154, + "code_similarity": 0.099, + "size_similarity": 0.344, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.199, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44722, + "code_similarity": 0.146, + "size_similarity": 0.545, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.247, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44154, + "right_pr_number": 44722, + "code_similarity": 0.164, + "size_similarity": 0.631, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.255, + "shared_filenames": [], + "shared_file_areas": [] + } + ] + }, + { + "cluster_id": "cluster-41211-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41211.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 41211, + "canonical_pr_number": 44339, + "issue_numbers": [ + 41211 + ], + "pr_numbers": [ + 41356, + 44339 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 41356, + "right_pr_number": 44339, + "code_similarity": 0.164, + "size_similarity": 0.096, + "file_overlap": 0.3, + "area_overlap": 0.078, + "patch_similarity": 0.182, + "shared_filenames": [ + "docs/source/en/model_doc/deimv2.md", + "src/transformers/models/deimv2/__init__.py", + "src/transformers/models/deimv2/configuration_deimv2.py", + "src/transformers/models/deimv2/modeling_deimv2.py", + "tests/models/deimv2/__init__.py", + "tests/models/deimv2/test_modeling_deimv2.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/deimv2.md", + "left_ranges": [ + [ + 1, + 132 + ] + ], + "right_ranges": [ + [ + 1, + 65 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/__init__.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/configuration_deimv2.py", + "left_ranges": [ + [ + 1, + 74 + ] + ], + "right_ranges": [ + [ + 1, + 266 + ] + ] + }, + { + "filename": "tests/models/deimv2/test_modeling_deimv2.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 1753 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43656-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43656, + "issue_numbers": [ + 43824 + ], + "pr_numbers": [ + 43656, + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43656, + "right_pr_number": 43836, + "code_similarity": 0.36, + "size_similarity": 0.176, + "file_overlap": 1.0, + "area_overlap": 0.051, + "patch_similarity": 0.048, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ] + } + ] + }, + { + "left_pr_number": 43656, + "right_pr_number": 43842, + "code_similarity": 0.405, + "size_similarity": 0.5, + "file_overlap": 1.0, + "area_overlap": 0.0, + "patch_similarity": 0.036, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 30, + 36 + ], + [ + 315, + 323 + ], + [ + 665, + 671 + ], + [ + 931, + 937 + ], + [ + 1843, + 1849 + ], + [ + 1868, + 1874 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + }, + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43366-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43366.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43366, + "canonical_pr_number": 45506, + "issue_numbers": [ + 43366 + ], + "pr_numbers": [ + 43757, + 45500, + 45506 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43757, + "right_pr_number": 45500, + "code_similarity": 0.096, + "size_similarity": 0.149, + "file_overlap": 0.2, + "area_overlap": 0.005, + "patch_similarity": 0.031, + "shared_filenames": [ + "src/transformers/modeling_gguf_pytorch_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 462, + 474 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + } + ] + }, + { + "left_pr_number": 43757, + "right_pr_number": 45506, + "code_similarity": 0.116, + "size_similarity": 0.17, + "file_overlap": 0.25, + "area_overlap": 0.005, + "patch_similarity": 0.034, + "shared_filenames": [ + "src/transformers/modeling_gguf_pytorch_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 462, + 474 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + } + ] + }, + { + "left_pr_number": 45500, + "right_pr_number": 45506, + "code_similarity": 0.886, + "size_similarity": 0.877, + "file_overlap": 0.75, + "area_overlap": 0.996, + "patch_similarity": 0.912, + "shared_filenames": [ + "src/transformers/integrations/ggml.py", + "src/transformers/modeling_gguf_pytorch_utils.py", + "tests/quantization/ggml/test_ggml.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/ggml.py", + "left_ranges": [ + [ + 89, + 109 + ] + ], + "right_ranges": [ + [ + 89, + 109 + ] + ] + }, + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + }, + { + "filename": "tests/quantization/ggml/test_ggml.py", + "left_ranges": [ + [ + 351, + 358 + ], + [ + 386, + 406 + ] + ], + "right_ranges": [ + [ + 351, + 358 + ], + [ + 386, + 405 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45081-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45081, + "canonical_pr_number": 45317, + "issue_numbers": [ + 45081 + ], + "pr_numbers": [ + 45086, + 45317 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45086, + "right_pr_number": 45317, + "code_similarity": 0.589, + "size_similarity": 0.34, + "file_overlap": 0.5, + "area_overlap": 1.0, + "patch_similarity": 0.136, + "shared_filenames": [ + "src/transformers/tokenization_utils_tokenizers.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/tokenization_utils_tokenizers.py", + "left_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ], + "right_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43698-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43698.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43698, + "canonical_pr_number": 43779, + "issue_numbers": [ + 43698 + ], + "pr_numbers": [ + 43779, + 43816 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43779, + "right_pr_number": 43816, + "code_similarity": 0.418, + "size_similarity": 0.538, + "file_overlap": 1.0, + "area_overlap": 0.02, + "patch_similarity": 0.02, + "shared_filenames": [ + "src/transformers/integrations/integration_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/integration_utils.py", + "left_ranges": [ + [ + 2227, + 2241 + ], + [ + 2303, + 2309 + ] + ], + "right_ranges": [ + [ + 2278, + 2291 + ], + [ + 2309, + 2322 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] + } + ], + "duplicate_issues": [], + "duplicate_prs": [ + { + "cluster_id": "cluster-41211-3", + "canonical_pr_number": 44339, + "duplicate_pr_numbers": [ + 41356 + ], + "target_issue_number": 41211, + "reason": "PRs in cluster-41211-3 are treated as duplicates because they converge on issue #41211 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43240-3", + "canonical_pr_number": 43251, + "duplicate_pr_numbers": [ + 43254 + ], + "target_issue_number": 43240, + "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43366-4", + "canonical_pr_number": 45506, + "duplicate_pr_numbers": [ + 43757, + 45500 + ], + "target_issue_number": 43366, + "reason": "PRs in cluster-43366-4 are treated as duplicates because they converge on issue #43366 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-43656-4", + "canonical_pr_number": 43656, + "duplicate_pr_numbers": [ + 43836, + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-43656-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-43698-3", + "canonical_pr_number": 43779, + "duplicate_pr_numbers": [ + 43816 + ], + "target_issue_number": 43698, + "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43979-11", + "canonical_pr_number": 44007, + "duplicate_pr_numbers": [ + 43996, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "target_issue_number": 43979, + "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, + { + "cluster_id": "cluster-45081-3", + "canonical_pr_number": 45317, + "duplicate_pr_numbers": [ + 45086 + ], + "target_issue_number": 45081, + "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "best_issue": { + "cluster_id": "cluster-43366-4", + "issue_number": 43366, + "reason": "Issue #43366 is the strongest global issue candidate because it is open, belongs to a cluster with 4 artifacts, and carries 5 discussion comments plus 7 inbound references.", + "score": 157.0 + }, + "best_pr": { + "cluster_id": "cluster-43979-11", + "pr_number": 44007, + "reason": "PR #44007 is the strongest global PR candidate because it is open, belongs to a cluster with 11 artifacts, links to 1 issues, and carries 2 review/discussion events.", + "score": 238.886 + } +} diff --git a/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.llm-reviews.json b/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.llm-reviews.json new file mode 100644 index 0000000000000000000000000000000000000000..77cb05758f7a406dbe0d9db7a56d9cbf7620ce3e --- /dev/null +++ b/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.llm-reviews.json @@ -0,0 +1,17325 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260421T060039Z", + "generated_at": "2026-04-21T06:06:35Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "31dd82c6fb7a9693f3ae7988bd88e9fed476e751189d484c1bbf29009a125ea4", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These do not look like duplicates: one is about Tapas tokenization differing from TensorFlow, one is about a bad login/auth error when downloading a private model, and one is a general failure to load bert-base-uncased. They touch different code paths and failure modes.", + "confidence": 0.96, + "canonical_issue_reason": "Issue #16618 is the broadest, most general model-loading report, so it is the best single issue to represent the cluster if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #16618 is the most general and user-facing of the three; the other two are narrower and clearly about different problems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Tokenization mismatch for Tapas is a different problem from failing to load bert-base-uncased; they involve different subsystems and symptoms." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "Private-model download/auth error is a distinct login/authorization issue, not the same as a generic model-loading failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34634", + "issue:34928", + "issue:35545", + "issue:36010", + "issue:36331", + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:42915", + "issue:43039", + "issue:43296", + "issue:43366", + "issue:43404", + "issue:43425", + "issue:43531", + "issue:43541", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43792", + "issue:43827", + "issue:43828", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:43975", + "issue:44292", + "issue:44322", + "issue:44457", + "issue:44509", + "issue:44512", + "issue:44560", + "issue:44661", + "issue:44805", + "issue:44841", + "issue:44863", + "issue:44918", + "issue:45070", + "issue:45081", + "issue:45084", + "issue:45161", + "issue:45237", + "issue:45362", + "issue:45464", + "issue:45507" + ], + "soft_pairs": [ + "issue:44322|issue:45464", + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:45237|issue:45507", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:35545|issue:36010", + "issue:43638|issue:44805", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918", + "issue:44661|issue:44805", + "issue:30277|issue:43638", + "issue:34634|issue:35545", + "issue:43596|issue:43638", + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43975|issue:45237", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:43716|issue:45507", + "issue:39290|issue:45081", + "issue:43828|issue:45161", + "issue:43425|issue:44292", + "issue:34928|issue:41720", + "issue:44918|issue:45161", + "issue:43541|issue:43716", + "issue:43828|issue:44292", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:43039|issue:44918", + "issue:41762|issue:43638", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43716|issue:44918", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805", + "issue:34928|issue:36010", + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:39290|issue:41720", + "issue:43039|issue:44560", + "issue:43866|issue:44863", + "issue:43854|issue:43866", + "issue:43425|issue:45070", + "issue:39290|issue:43531", + "issue:41093|issue:43638", + "issue:43404|issue:43866", + "issue:45362|issue:45464", + "issue:43792|issue:45070", + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762", + "issue:43296|issue:43366" + ], + "prepared_review_unit_hash": "538109df4c9a9e0dd7be883c0b25881fc90bb4a5eb48c1b663147c582af79d04", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 63, + "serialized_chars": 46478, + "estimated_input_tokens": 11620, + "estimated_eval_tokens": 23496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: many issues share broad themes like model loading, DeepSpeed/ZeRO, or docs cleanup, but they point to different concrete failures. Only one soft pair looks like a true duplicate: the two ZeRO-3/Bert index-out-of-bounds reports.", + "confidence": 0.89, + "canonical_issue_reason": "issue:43596 is the cleanest representative of the ZeRO-3/Bert \"index 0 is out of bounds for dimension 0 with size 0\" bug; issue:43638 appears to be the same failure with a slightly different wording.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43596 is the strongest canonical issue for the only clear duplicate subcluster in this set; it is specific, reproducible, and has a near-identical twin in issue:43638.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44322", + "right": "issue:45464", + "accept": false, + "reason": "Same Qwen3.5 area, but different failure modes: config attribute error vs streaming chat/completions failure." + }, + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Different models and different bugs; one is a dtype mismatch in Mistral-3 preprocessing, the other is an AMD GPU runtime issue for GPT-OSS." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "Both are runtime failures, but the concrete code paths and models differ: autocast dtype mismatch vs AMD GPU incompatibility." + }, + { + "left": "issue:45237", + "right": "issue:45507", + "accept": false, + "reason": "Different model families and different failure types; not the same underlying change or bug." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Different model-specific failures; NVFP4 runtime error is unrelated to AMD GPU support." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "DeepSpeed collective mismatch and ONNX export are unrelated code paths." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "Both mention model export/loading issues, but the concrete problems are unrelated." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "ONNX export error versus auto device mapping cuda assert; different bugs." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are v5 docs cleanups, but they target different removed features and would not merge into one fix." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "Model export error and GenerationMixin import failure are unrelated." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Both are shape/index errors, but the models and failure contexts differ." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "Different symptom details and different model/setup; not the same concrete bug." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "Mask-shape IndexError is unrelated to GPT-OSS tensor-parallel behavior." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Different model families and failure points; not a duplicate." + }, + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Tokenizer-mapping guardrail bug is unrelated to the mask/tensor shape error." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "DeepSpeed collective mismatch and Bert zero-3 index error are different bugs." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "Processor preset handling and ONNX export are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "Same DeepSpeed ZeRO-3/Bert index-0-out-of-bounds failure; these look like the same underlying bug." + }, + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Different problems: detokenization regression versus LoRA merge/save mismatch." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Liger Kernel cross-entropy dispatch and GPT-OSS tensor parallel failure are unrelated." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "Different models and different failure modes." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Activation-checkpointing/FSDP tensor-size recomputation is unrelated to the mask shape IndexError." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Both involve DeepSpeed, but the concrete failures differ completely." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "Unrelated model/runtime issues." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "Bert ZeRO-3 index error and GPT-OSS TP failure are different bugs." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "Training/checkpointing tensor mismatch and custom trainer signature mismatch are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "Different models and different failure classes." + }, + { + "left": "issue:43716", + "right": "issue:45507", + "accept": false, + "reason": "Image-preprocessor dtype mismatch and invalid method call in GraniteMoEHybrid are unrelated." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Gemma3 sliding_window_pattern missing and Mistral tokenizer regex patch crash are different model bugs." + }, + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Autocast dtype mismatch is unrelated to GPT-OSS MoE tensor parallel behavior." + }, + { + "left": "issue:43425", + "right": "issue:44292", + "accept": false, + "reason": "Torch version compatibility and Qwen NVFP4 runtime failure are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "FSDP/activation-checkpointing recomputation issue is unrelated to Qwen auto device mapping." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "TRL SFT embedding unpacking failure and GPT-OSS TP failure are different problems." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": false, + "reason": "Both are ZeRO-3 loading errors, but different models and not clearly the same bug." + }, + { + "left": "issue:43827", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs issues, but they refer to different removed commands/features." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Different models and different error types." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Different model families and unrelated failure paths." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Different runtime problems; no shared concrete bug." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch and trainer API signature mismatch are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Liger Kernel cross-entropy call and mask-shape IndexError are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "Training shape-recompute bug and GenerationMixin import error are unrelated." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Similar ZeRO-3 symptoms, but different models and insufficient evidence of the same underlying bug." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Different docs updates: text classification docs versus run command removal." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Gemma3/vLLM config attribute error and Qwen device-mapping cuda assert are different issues." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross-entropy behavior and Qwen3-vl video StopIteration are unrelated." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Corrupted checkpoint and NemotronH loading implementation bug are not the same failure." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "Unit-test model-loading failure and checkpoint corruption are different problems." + }, + { + "left": "issue:43425", + "right": "issue:45070", + "accept": false, + "reason": "Torch version compatibility and pydantic PretrainedConfig field regression are unrelated." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Both mention sliding_window-like config issues, but they affect different model families and code paths." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Mask/tensor shape mismatch and Bert ZeRO-3 index error are different failures." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Tied-weight bug in Mistral3 and corrupted Ovis2 checkpoint are unrelated." + }, + { + "left": "issue:45362", + "right": "issue:45464", + "accept": false, + "reason": "Both are Qwen3.5 chat-related, but one is a chat-template crash and the other is a streaming API failure." + }, + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Whisper runtime failure and pydantic model regression are unrelated." + }, + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Different MoE dtype/tracing failures with different models and code paths." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "GenerationMixin import failure and Gemma3 sliding_window config regression are unrelated." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Processor failure for Voxtral and template compilation error are unrelated." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Both are IndexErrors, but the model/setup and actual failure context differ." + }, + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "PaddleOCR-VL vLLM loading failure and GGUF gpt-oss support request are different issues." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded and conservative overall. The duplicate call on issue:43596 / issue:43638 is well supported by the very similar ZeRO-3/Bert index-out-of-bounds reports, and the remaining soft-edge rejections stay cautious without overstating equivalence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 2, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36331", + "issue:39186", + "issue:39290", + "issue:39692", + "issue:40990", + "issue:41093", + "issue:41553", + "issue:41762", + "issue:42915", + "issue:43054", + "issue:43366", + "issue:43404", + "issue:43531", + "issue:43541", + "issue:43572", + "issue:43596", + "issue:43644", + "issue:43645", + "issue:43742", + "issue:43828", + "issue:43866", + "issue:43873", + "issue:43883", + "issue:43950", + "issue:43975", + "issue:43994", + "issue:44291", + "issue:44292", + "issue:44360", + "issue:44387", + "issue:44403", + "issue:44492", + "issue:44512", + "issue:44530", + "issue:44534", + "issue:44560", + "issue:44589", + "issue:44661", + "issue:44863", + "issue:44918", + "issue:44928", + "issue:44960", + "issue:45005", + "issue:45070", + "issue:45161", + "issue:45399", + "issue:45464", + "issue:45507" + ], + "soft_pairs": [ + "issue:43572|issue:45070", + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:43828|issue:45464", + "issue:44291|issue:45070", + "issue:43366|issue:45464", + "issue:43531|issue:45070", + "issue:44661|issue:45464", + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:43541|issue:44589", + "issue:41093|issue:43596", + "issue:42915|issue:43541", + "issue:40990|issue:44960", + "issue:39186|issue:44918", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005", + "issue:43873|issue:45005", + "issue:44360|issue:44512", + "issue:42915|issue:45399", + "issue:44918|issue:45507", + "issue:43975|issue:44292", + "issue:44492|issue:44512", + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:44292|issue:45507", + "issue:43828|issue:44928", + "issue:39692|issue:43054", + "issue:43866|issue:44403", + "issue:41553|issue:43883", + "issue:36331|issue:41762", + "issue:44560|issue:45507" + ], + "prepared_review_unit_hash": "48e9e32137015ae86a38cf6980c8aead85328ce7c25a78c9a89a6869484eb6c8", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37563, + "estimated_input_tokens": 9391, + "estimated_eval_tokens": 19038 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly unrelated issue candidates from different regressions; only the non-persistent-buffer corruption reports are clear duplicates. I selected that regression as the canonical issue set.", + "confidence": 0.92, + "canonical_issue_reason": "Issue 43950 is the most explicit statement of the bug: `from_pretrained()` silently corrupts non-persistent buffers in transformers 5.x, which matches the closely related reports in 43644 and 44534.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43950 is the best representative because it names the exact failing API and the corruption mechanism, making triage and fixing easier than the shorter duplicate titles.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "Both involve v5 config-related breakage, but one is a missing `pad_token_idx` field and the other is a pydantic/PretrainedConfig field regression; not the same concrete bug." + }, + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "Different failures: int4 quantization memory growth vs tied-weights handling for translation models." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Different code paths and symptoms: torch dynamo/grouped_mm tracing failure vs tensor-parallel-only failure on GPT-OSS MoE." + }, + { + "left": "issue:43828", + "right": "issue:45464", + "accept": false, + "reason": "Different model families and failure modes: autocast dtype mismatch vs streaming chat/completions API failure." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "`init_empty_weights` unexpected-argument bug is unrelated to the pydantic `PretrainedConfig` field issue." + }, + { + "left": "issue:43366", + "right": "issue:45464", + "accept": false, + "reason": "GGUF architecture support request is not the same as a Qwen3.5 streaming inference bug." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior is a model-specific runtime issue, not the config-field regression in 45070." + }, + { + "left": "issue:44661", + "right": "issue:45464", + "accept": false, + "reason": "Tokenizer mapping/add-new-model-like failure is unrelated to the Qwen3.5 streaming inference error." + }, + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Different models and failures: Gemma3/vLLM missing attribute vs MobileLLM key error." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both mention SigLIP2, but one is about worse text embeddings while the other is a broader nonsensical AutoModel/pipeline result; the concrete bug is not established as the same." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Grouped_mm tracing failure and missing Float8 storage are distinct runtime errors." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Both are IndexErrors, but they happen in different loading paths and involve different shapes/components." + }, + { + "left": "issue:42915", + "right": "issue:43541", + "accept": false, + "reason": "FP8 config failure on Qwen3-MoE is unrelated to Mixtral dynamo tracing/grouped_mm." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "No evidence they share a bug; perplexity regression on GPT-OSS and a generic GLM5 issue are different." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP `'weight' must be 2-D` is unrelated to TRL embedding unpacking." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "Both concern tied weights, but the affected models and failure modes differ; this is too broad to treat as the same concrete bug." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Custom model/Jupyter notebook initialization and flash-attn2 fallback logic are separate issues." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash is unrelated to tied-weight regressions." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint loading failure is not the same as translation-model tied-weight issues." + }, + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Quantization/offloading behavior and tied-weight handling are different problems." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "A DSA indexer design discussion is unrelated to stale docs mentioning a removed command." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "FP8 config support and flash-attn2 fallback checks are separate subsystems and failures." + }, + { + "left": "issue:44918", + "right": "issue:45507", + "accept": false, + "reason": "TRL embedding unpacking and GraniteMoEHybrid invalid method calls do not look like the same bug." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "Incorrect detokenization and Qwen-3-8B-NVFP4 runtime error are different issues." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "A typo in cache-strategy docs is unrelated to removed CLI-command documentation drift." + }, + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Both report the same non-persistent-buffer regression in transformers v5, with the latter phrased as the same junk-filled-buffer symptom." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "These describe the same v5 regression where non-persistent buffers are corrupted/fill with junk during `from_pretrained()`." + }, + { + "left": "issue:44292", + "right": "issue:45507", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 execution failure and GraniteMoEHybrid invalid-method calls are different model/runtime issues." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Autocast dtype mismatch and RLHF gradient explosion from 3D `position_ids` are different failure mechanisms." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "SigLIP2 docs-example errors and SigLIP2 text-embedding quality complaints are related by model family but not clearly the same concrete bug." + }, + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "A corrupted checkpoint and an unrelated loading-noise issue are not the same underlying defect." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Bad AutoTokenizer error messaging and missing `all_tied_weights_keys` are unrelated." + }, + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "`compute_loss(num_items_in_batch)` compatibility and DeepSpeed ZeRO-3 loading `IndexError` are different bugs." + }, + { + "left": "issue:44560", + "right": "issue:45507", + "accept": false, + "reason": "Qwen3-vl video StopIteration and GraniteMoEHybrid invalid method calls are separate failures." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded in the packet. The canonical issue choice is supported by the titles/body themes, and the only accepted soft-edge pairs are the clear non-persistent-buffer regression duplicates (43950/43644/44534). The remaining verdicts stay conservative and do not overclaim sameness." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 3, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36331", + "issue:38175", + "issue:41093", + "issue:41553", + "issue:41762", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43054", + "issue:43257", + "issue:43278", + "issue:43404", + "issue:43493", + "issue:43643", + "issue:43742", + "issue:43782", + "issue:43792", + "issue:43824", + "issue:43827", + "issue:43854", + "issue:43856", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:43940", + "issue:43950", + "issue:43975", + "issue:44220", + "issue:44291", + "issue:44368", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44509", + "issue:44589", + "issue:44661", + "issue:44863", + "issue:44960", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45305", + "issue:45313", + "issue:45356", + "issue:45399", + "issue:45520" + ], + "soft_pairs": [ + "issue:43872|issue:44589", + "issue:41553|issue:43881", + "issue:38175|issue:43054", + "issue:42947|issue:44387", + "issue:43054|issue:43493", + "issue:43881|issue:43883", + "issue:44403|issue:44451", + "issue:42947|issue:43856", + "issue:43975|issue:45520", + "issue:43643|issue:45020", + "issue:44403|issue:44488", + "issue:43782|issue:44960", + "issue:42491|issue:44368", + "issue:41762|issue:42915", + "issue:43975|issue:44451", + "issue:45020|issue:45356", + "issue:44661|issue:45020", + "issue:43054|issue:43278", + "issue:43824|issue:43872", + "issue:44991|issue:45399", + "issue:43827|issue:44509", + "issue:43792|issue:44220", + "issue:45081|issue:45399", + "issue:43404|issue:43950", + "issue:43257|issue:43866", + "issue:42915|issue:45020", + "issue:43742|issue:45399", + "issue:43742|issue:45020", + "issue:41553|issue:44991", + "issue:43940|issue:45313", + "issue:43872|issue:44291", + "issue:43854|issue:44863", + "issue:43257|issue:45305", + "issue:42915|issue:43872", + "issue:41553|issue:44488", + "issue:36331|issue:41093" + ], + "prepared_review_unit_hash": "cd54162ac02442011d7480b3d90ca64caf687ea69765ee8e33f62b638851dc20", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38579, + "estimated_input_tokens": 9645, + "estimated_eval_tokens": 19546 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous: many pairs share a subsystem or model family but not the same concrete bug. The clearest duplicate clusters are the stale v5 docs references, the `_is_hf_initialized` loading regression, and the SigLIP2 implementation discrepancy pair.", + "confidence": 0.74, + "canonical_issue_reason": "No single canonical issue spans the whole list; the items split into several unrelated bug reports. Among the true duplicate-like pairs, issue 43827 is the cleanest canonical anchor for the docs/pipeline-removal cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43827 is the strongest representative issue in this set because 44509 is essentially the same v5 docs problem with outdated pipeline/task references.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Both are loading-time type errors, but one is an `_is_hf_initialized`/bitsandbytes incompatibility and the other is a float8 storage lookup failure; different bugs." + }, + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Different models and different failure modes; one is an AutoTokenizer error message issue, the other is a model loading failure." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": false, + "reason": "Both mention SigLIP2, but one is zero probabilities and the other is lower embedding quality; not enough evidence they are the same concrete defect." + }, + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both are memory-related, but one is LoRA/gradient-checkpointing ineffectiveness and the other is int4 CUDA reserved-memory growth causing OOM." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": true, + "reason": "Both point to the same SigLIP2 HF-vs-JAX implementation discrepancy, with degraded text embeddings as the symptom." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different models and different issues: glm-4v loading vs a missing `all_tied_weights_keys` attribute in Molmo." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "One is about noisy loading warnings, the other is an actual inability to load a specific model." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Both concern memory, but one is checkpointing/LoRA behavior and the other is Qwen3 MoE training memory usage." + }, + { + "left": "issue:43975", + "right": "issue:45520", + "accept": false, + "reason": "Unrelated problems: tokenizer detokenization regression vs Python 3.13 flash_attn import bookkeeping." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "Both involve `remote_code`, but one is missing fields from `AutoConfig` and the other is broad breakage in recent versions." + }, + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "Both are loading-related, but one is a warning/noise report and the other is a specific model load failure." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different model families and errors; no clear shared code-path bug." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "Both mention Qwen LoRA/tie-word-embeddings, but they describe different behaviors and code paths." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Different model-specific DeepSpeed failures; one is a Gemma3 ZeRO-3 load error, the other is Qwen3 MoE plus FP8 config." + }, + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "Different issues entirely: tokenizer detokenization regression vs model loading failure." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Both involve recent-version regressions, but one is broad `remote_code` breakage and the other is a Kimi-K2.5 tokenizer codec/warning regression." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "`add-new-model-like`/tokenizer mapping failure is a different bug from the broader remote_code regressions." + }, + { + "left": "issue:43054", + "right": "issue:43278", + "accept": false, + "reason": "Both mention embeddings, but one is a SigLIP2 quality discrepancy and the other is a dtype change between train and eval." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "Different load errors: import failure for Qwen2.5-VL vs bitsandbytes `_is_hf_initialized` incompatibility." + }, + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Unrelated: tokenizer loading regression vs flash-attn fallback gating logic." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Same docs bug: stale v5 references to removed text-generation/summarization/translation pipeline tasks." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "Likely related to Whisper audio preprocessing, but the reports are too different to confidently call the same concrete bug." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer regex crash and flash-attn fallback gating are separate issues." + }, + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "Different problems: Mistral3 weight tying vs silent corruption of non-persistent buffers." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "Different model and failure mode; Qwen3 MoE weight conversion vs a corrupted Ovis2 checkpoint." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": true, + "reason": "Same `_is_hf_initialized` loading regression; one report surfaces through bitsandbytes, the other through `init_empty_weights`." + }, + { + "left": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "Different models and different loading failures; not the same bug." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Both involve Qwen/DeepSpeed, but one is weight conversion and the other is gradient averaging with GAS/model_accepts_loss_kwargs." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Different models and different failure modes; one is a bad tokenizer error, the other a model load failure." + }, + { + "left": "issue:43940", + "right": "issue:45313", + "accept": false, + "reason": "Both are DeepSpeed ZeRO-3 load failures, but they affect different Qwen model variants and do not read as the same concrete bug." + }, + { + "left": "issue:43742", + "right": "issue:45399", + "accept": false, + "reason": "KeyError during model loading vs flash-attn fallback gating; unrelated." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "Different bugs: a specific MobileLLM key error versus broad remote_code regressions." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Different models and different loading paths; no shared concrete defect." + }, + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "Trainer loss-signature mismatch and tensor-mask shape mismatch are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet, and the accepted soft-edge pairs are conservative enough to look mergeable into single bugfix PRs. The rejected pairs are appropriately cautious about shared subsystems without enough evidence of the same concrete defect." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 4, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:36010", + "issue:38175", + "issue:39692", + "issue:41093", + "issue:42491", + "issue:42617", + "issue:42915", + "issue:43317", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43596", + "issue:43638", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43720", + "issue:43824", + "issue:43828", + "issue:43872", + "issue:43950", + "issue:43957", + "issue:43994", + "issue:44075", + "issue:44451", + "issue:44479", + "issue:44488", + "issue:44530", + "issue:44534", + "issue:44560", + "issue:44589", + "issue:44661", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44910", + "issue:44918", + "issue:44960", + "issue:44977", + "issue:45030", + "issue:45313", + "issue:45325", + "issue:45357", + "issue:45538" + ], + "soft_pairs": [ + "issue:41093|issue:44805", + "issue:44479|issue:44560", + "issue:43872|issue:45538", + "issue:44530|issue:44863", + "issue:43366|issue:43828", + "issue:24643|issue:30064", + "issue:43366|issue:43531", + "issue:43550|issue:44910", + "issue:43645|issue:43646", + "issue:43950|issue:45325", + "issue:43366|issue:44589", + "issue:44560|issue:44918", + "issue:43824|issue:43957", + "issue:42915|issue:43957", + "issue:42617|issue:43366", + "issue:38175|issue:43994", + "issue:43646|issue:43950", + "issue:44560|issue:44805", + "issue:36010|issue:42915", + "issue:43638|issue:44661", + "issue:43596|issue:44661", + "issue:44877|issue:45030", + "issue:43644|issue:44534", + "issue:43317|issue:44756", + "issue:38175|issue:39692", + "issue:45313|issue:45357", + "issue:42491|issue:43720", + "issue:44960|issue:44977", + "issue:44451|issue:44488", + "issue:43708|issue:44075" + ], + "prepared_review_unit_hash": "82a22d2b9d6bea5bebaa223468763440acac526f44b2909f668d5be4c321fc28", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36645, + "estimated_input_tokens": 9162, + "estimated_eval_tokens": 18580 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous, with several unrelated model-loading/regression reports. Only a few soft pairs look like true duplicates; the strongest ones are the Transformers 5 non-persistent-buffer regression and the custom model initialization regression.", + "confidence": 0.52, + "canonical_issue_reason": "issue:43644 is the clearest broad regression in the set and has an obvious duplicate in issue:44534; it best represents a concrete, actionable underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43644 is the best single representative of the duplicate-prone Transformers 5 regression subset, with a clear and general failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41093", + "right": "issue:44805", + "accept": true, + "reason": "Same IndexError class and the same mask-vs-tensor shape mismatch pattern; these look like the same underlying bug." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": false, + "reason": "Both are Qwen video-related, but the affected models and failure modes differ enough that they do not clearly point to the same concrete bug." + }, + { + "left": "issue:43872", + "right": "issue:45538", + "accept": false, + "reason": "Completely different subsystems: bitsandbytes quantization error vs CLIP tokenizer max length behavior." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "Different models and different runtime failures; too broad to treat as the same bug." + }, + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "GGUF/gpt-oss support is unrelated to the Phi-tiny-MoE autocast dtype mismatch." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "DeepSpeed training weight-shape error vs image processor void segmentation map handling are unrelated." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "Both mention Qwen-related areas, but one is GGUF/gpt-oss support and the other is a sliding_window issue; not the same bug." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different model families and different code paths: torch.compile/SDPA on Bamba vs flash-attention position_ids on Qwen3.5." + }, + { + "left": "issue:43645", + "right": "issue:43646", + "accept": true, + "reason": "These describe the same Transformers 5 custom model initialization regression; the notebook mention in 43645 is just a narrower reproduction." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Both are regression-style loading issues, but one is non-persistent buffers in from_pretrained and the other is a Qwen2.5-VL rope/position_ids bug." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "GGUF gpt-oss support is unrelated to the Float8 storage lookup failure." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "Both involve Qwen3.5 video/input handling, but the observed failures and affected paths are distinct." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Importing a missing class is not the same issue as meta-device loading regressions." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and meta-device loading regressions are different code-path problems." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "3d_parallel.py execution failure is unrelated to GGUF gpt-oss support." + }, + { + "left": "issue:38175", + "right": "issue:43994", + "accept": false, + "reason": "Both are SigLIP2-related, but one is zero probabilities while the other is an AutoModel/pipeline loading issue; not clearly the same bug." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": false, + "reason": "Custom model initialization regression is different from non-persistent buffer corruption during from_pretrained." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Both surface as runtime errors, but one is a Qwen video StopIteration issue and the other is a generic mask/tensor shape mismatch." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "ImportError for GenerationMixin and Qwen3Moe FP8 failure are unrelated." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different failures: DeepSpeed zero3 index error vs add-new-model-like tokenizer mapping problem." + }, + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "Both are initialization/config-related, but the concrete bugs and code paths differ." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": false, + "reason": "Strict config loading for granite_speech and glm4v config validation are separate config-regression issues." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": true, + "reason": "Near-identical reports of the Transformers v5 non-persistent-buffer corruption regression." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "device_map/offload loading failure and Strix Halo mmap OOM are different loading/performance issues." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "Same model family, but one is wrong outputs and the other is documentation/example errors plus quantization failure." + }, + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen3.5 regressions, but one is DeepSpeed ZeRO-3 weight loading and the other is save_pretrained key persistence." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "LoRA qwen3_moe compatibility on hf5.x and BitNet packed-weight loading are unrelated." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "GLM5 placeholder issue and Qwen3.5 flash-attention generation failure are unrelated." + }, + { + "left": "issue:44451", + "right": "issue:44488", + "accept": false, + "reason": "Both are model-loading complaints, but the affected checkpoints and likely root causes are different; not enough evidence for a duplicate." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Trainer checkpoint resume batch-size math and SGD optimizer args usage are different training bugs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative about duplicate claims. The accepted soft pairs are well-supported by highly similar titles and failure modes, especially the 43644/44534 non-persistent-buffer regression. No pair is overstated, and the cluster-level characterization as mostly heterogeneous is reasonable." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 5, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:39692", + "issue:41720", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43257", + "issue:43381", + "issue:43404", + "issue:43425", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43828", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44155", + "issue:44164", + "issue:44291", + "issue:44292", + "issue:44387", + "issue:44402", + "issue:44451", + "issue:44479", + "issue:44488", + "issue:44661", + "issue:44683", + "issue:44743", + "issue:44756", + "issue:44863", + "issue:44877", + "issue:44912", + "issue:44928", + "issue:44960", + "issue:44991", + "issue:45216", + "issue:45276", + "issue:45292", + "issue:45335", + "issue:45362", + "issue:45406" + ], + "soft_pairs": [ + "issue:43854|issue:43931", + "issue:43856|issue:43873", + "issue:42491|issue:45276", + "issue:45362|issue:45406", + "issue:41720|issue:42915", + "issue:39692|issue:43994", + "issue:43257|issue:43854", + "issue:43856|issue:44387", + "issue:44488|issue:44991", + "issue:43644|issue:43645", + "issue:44661|issue:44683", + "issue:42915|issue:44756", + "issue:42491|issue:43931", + "issue:42491|issue:43856", + "issue:43404|issue:43454", + "issue:45276|issue:45335", + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:41720|issue:44155", + "issue:44402|issue:44488", + "issue:43873|issue:44387", + "issue:39692|issue:44960", + "issue:44387|issue:44756", + "issue:43425|issue:43957", + "issue:44292|issue:44912", + "issue:42947|issue:43381", + "issue:43854|issue:45216", + "issue:44291|issue:44756", + "issue:45276|issue:45292", + "issue:44743|issue:44960", + "issue:43381|issue:44928", + "issue:43957|issue:44756", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:45292|issue:45335", + "issue:44479|issue:44877", + "issue:44402|issue:44451", + "issue:43645|issue:44164", + "issue:43257|issue:44863", + "issue:43425|issue:43828", + "issue:43828|issue:43957" + ], + "prepared_review_unit_hash": "b911ae06f8b97c8d9e384f2c02aca318c3a9d47f25989add48262015a6eb3b9c", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 41, + "serialized_chars": 39810, + "estimated_input_tokens": 9953, + "estimated_eval_tokens": 20162 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated, but there is a small duplicate set around `resize_token_embeddings` not updating model output/decoder embeddings. The generic open issue is the best representative; no PRs are present.", + "confidence": 0.82, + "canonical_issue_reason": "`issue:45292` is the broadest and most general report of the shared bug: `resize_token_embeddings` not affecting output embeddings. It cleanly covers the model-specific reports in the accepted soft pairs and is still open.", + "canonical_pr_reason": null, + "best_issue_reason": "`issue:45292` is the best cluster representative because it states the underlying behavior generically, rather than for one specific model family, and matches the duplicate resize-token-embedding reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading failures, but they involve different models and different failure modes (`GLM-4.7-Flash` unit-test loading vs `Qwen3-VL-30B` weight-shape mismatch)." + }, + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "Both mention memory/quantization, but one is about Qwen3 MoE training memory usage and the other about offloading behavior with quantization; not the same concrete bug." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Different problems: a Qwen3 MoE LoRA compatibility break vs a `resize_token_embeddings` embedding-update bug." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different regressions in different entry points: `transformers chat` crash for Qwen3.5-35B vs `serve` crash because `Gemma4Processor` lacks `_tokenizer`." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Both concern Qwen3/Qwen3-MoE, but one is a CUDA assert during auto device mapping and the other is a FineGrainedFP8Config failure; different code paths." + }, + { + "left": "issue:39692", + "right": "issue:43994", + "accept": false, + "reason": "Both involve SigLIP2-style usage, but one is a documentation example with model/processor and quantization errors, while the other is nonsensical outputs in AutoModel/pipeline usage." + }, + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Different loading failures: Qwen3 MoE weights not converted under accelerate+deepspeed vs a unit-test load failure for GLM-4.7-Flash." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "One is Qwen3 MoE training memory usage; the other is increased CUDA reserved memory under int4 quantization. Same theme, but not the same bug." + }, + { + "left": "issue:44488", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer/model loading issues, but they concern different models and different failure causes." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "One is about non-persistent buffers being junk-filled; the other is a notebook-specific custom-model initialization regression." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Different features: `add-new-model-like` failing in tokenizer mapping vs compiled flex_attention failing on newer torch." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "Different bug classes: Qwen3 MoE FP8 loading failure vs disabling mmap on Strix Halo to avoid OOM." + }, + { + "left": "issue:42491", + "right": "issue:43931", + "accept": false, + "reason": "Different models and different failure surfaces: LoRA incompatibility for Qwen3 MoE vs Qwen3-VL weight-shape mismatch." + }, + { + "left": "issue:42491", + "right": "issue:43856", + "accept": false, + "reason": "Different Qwen3 MoE reports, but one is LoRA compatibility and the other is training memory usage; not the same concrete bug." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": false, + "reason": "Both mention lm_head tying in multimodal models, but they are separate model implementations (Mistral3 vs AyaVision) with different reports." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": true, + "reason": "Both report the same underlying bug: `resize_token_embeddings` does not update model-specific output/decoder embeddings. The model families differ, but the concrete failure is the same." + }, + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "An import error for `GenerationMixin` is unrelated to Whisper pipeline `return_language` behavior." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading/saving related, but `extra_state` handling and `init_empty_weights` argument errors are different code paths." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "CUDA assert on Qwen3 auto device mapping is unrelated to AudioFlamingo3 batched inference token/embedding leakage." + }, + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Tokenizer vocab-size mismatch for one model is not the same as the broader model-loading failure in the other issue." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Both are memory-related, but offloading-with-quantization and int4 reserved-memory inflation are different defects." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "SigLIP2 doc/example issues are unrelated to the generic GLM5 issue." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "Int4 quantization OOM and disabling mmap on Strix Halo are different memory-management problems." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Torch 2.10 incompatibility is unrelated to meta-device model-loading breakage." + }, + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Both are quantization/model-loading issues, but they affect different models and formats (NVFP4 vs MXFP4) with different symptoms." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Both mention gradient checkpointing, but one is ineffective under PEFT LoRA and the other says it cannot be used in eval mode." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "Different regressions: GLM-4.7-Flash loading in tests vs Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "`init_empty_weights` argument handling and Strix Halo mmap OOM are unrelated loading/memory issues." + }, + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "Same bug family: `resize_token_embeddings` fails to propagate to output/decoder embeddings. `issue:45292` is the generic version of the same defect." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "A recurrent-state/cache bug in modular_qwen3_5.py is unrelated to the generic GLM5 issue." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Eval-mode gradient checkpointing and RLHF NaN/position_ids issues are different failure modes." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Meta-device loading breakage is unrelated to the mmap/OOM workaround issue." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "`extra_state` save/load handling is unrelated to Strix Halo mmap OOM." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom model initialization regressions and `resume_from_checkpoint` max_steps miscalculation are different trainer/model lifecycle bugs." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Both describe the same `resize_token_embeddings` regression; `issue:45292` is the generic umbrella report and `issue:45335` is a model-specific instance." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Different model families and different problems: video-input regression for Qwen models vs strict config blocking `granite_speech`." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "A tokenizer vocab-size mismatch is not the same as the broader inability to load `vesteinn/ScandiBERT`." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "Notebook/custom-model initialization problems are unrelated to `extra_state` save/load failures." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is unrelated to NemotronH checkpoint loading." + }, + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "Torch version incompatibility is unrelated to the Phi-tiny-MoE autocast dtype mismatch." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Autocast dtype mismatch and meta-device loading failures are different code paths and symptoms." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: the only accepted soft edges are the three `resize_token_embeddings` pairs, and `issue:45292` is indeed the broadest open representative of that cluster. The rejections are conservative and do not overclaim equivalence across clearly different bug classes. No PRs are present in the packet." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 6, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:32090", + "issue:34567", + "issue:36032", + "issue:39404", + "issue:40444", + "issue:40990", + "issue:42947", + "issue:43299", + "issue:43421", + "issue:43425", + "issue:43452", + "issue:43582", + "issue:43596", + "issue:43611", + "issue:43632", + "issue:43644", + "issue:43645", + "issue:43716", + "issue:43856", + "issue:43883", + "issue:43901", + "issue:43906", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44079", + "issue:44164", + "issue:44206", + "issue:44220", + "issue:44265", + "issue:44479", + "issue:44488", + "issue:44509", + "issue:44530", + "issue:44589", + "issue:44671", + "issue:44683", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44898", + "issue:44912", + "issue:44918", + "issue:44987", + "issue:44991", + "issue:45081", + "issue:45084", + "issue:45161" + ], + "soft_pairs": [ + "issue:44589|issue:44756", + "issue:43906|issue:44530", + "issue:43931|issue:44863", + "issue:43611|issue:43645", + "issue:39404|issue:44912", + "issue:43425|issue:43716", + "issue:44530|issue:45084", + "issue:44265|issue:44898", + "issue:43299|issue:43931", + "issue:43901|issue:44509", + "issue:44479|issue:44805", + "issue:43994|issue:44079", + "issue:43856|issue:44756", + "issue:44671|issue:44991", + "issue:40990|issue:42947", + "issue:39404|issue:44671", + "issue:43596|issue:44683", + "issue:44589|issue:45084", + "issue:43425|issue:44918", + "issue:36032|issue:43452", + "issue:43582|issue:43632", + "issue:32090|issue:39404", + "issue:44991|issue:45081", + "issue:43957|issue:44164", + "issue:44164|issue:44479", + "issue:40990|issue:43421", + "issue:44488|issue:45081", + "issue:43425|issue:45161", + "issue:43883|issue:44488", + "issue:43611|issue:43644", + "issue:44206|issue:44220", + "issue:42947|issue:44756", + "issue:44488|issue:44987", + "issue:34567|issue:40444", + "issue:43716|issue:45084" + ], + "prepared_review_unit_hash": "bbfd16582678dfdae27099a970c828b67b60ec8491bd8d6a4a68c5fd42e92d7d", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38185, + "estimated_input_tokens": 9547, + "estimated_eval_tokens": 19350 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly a collection of distinct regression reports across unrelated subsystems. I do not see any pair that is safe to merge as the same concrete bug; the closest audio-fbank pair is still too underspecified to treat as a duplicate with confidence.", + "confidence": 0.52, + "canonical_issue_reason": "Issue 43611 is the best representative anchor because it is a clear, concrete v5 model-loading regression and is broader than the very model-specific reports around it.", + "canonical_pr_reason": null, + "best_issue_reason": "43611 is the strongest standalone issue for triage: specific enough to be actionable, but broad enough to represent the larger v5-loading breakage theme in this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Different failures: float8 storage lookup vs mmap/OOM mitigation on Apple Silicon." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "Unrelated domains: isolated reproduction of another issue vs Qwen3.5 PagedAttentionCache crash." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Both are load failures, but for different model families and different symptoms (shape mismatch vs checkpoint loading support)." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both involve v5 model loading, but one is base_model_prefix handling and the other is custom-model initialization in notebooks; not the same bug." + }, + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline return_language regression is unrelated to MXFP4 quantization fallback." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "Torch-version compatibility and image preprocessor dtype mismatch are unrelated issues." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "Different code paths: Qwen3.5 cache/group-type failure vs template compilation error." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is unrelated to Perceiver interpolation on non-default image sizes." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Both are Qwen3/VL loading complaints, but one is MoE model loading and the other is checkpoint weight-shape mismatch; not the same concrete bug." + }, + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Different documentation regressions: return_all_scores vs removed pipeline task docs." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "Video-input regression for Qwen models is unrelated to a masking shape mismatch error." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "One is a model/pipeline quality regression, the other is a ModelOutput key-assignment bug; too different." + }, + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Qwen3 MoE training memory usage and Strix Halo mmap/OOM are different problems." + }, + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression is unrelated to est-roberta tokenizer loading." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "High perplexity on a model is unrelated to LoRA gradient-checkpointing ineffectiveness." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper return_language and CamemBERT masked-LM prediction issues do not share the same bug." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "DeepSpeed ZeRO-3 init index error is unrelated to compiled flex_attention on newer torch." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "Float8 storage lookup failure and template compilation error are unrelated." + }, + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Torch 2.10 compatibility and Qwen3.5 embedding unpacking with TRL are different regressions." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "Tokenizer special-tokens API conflict is unrelated to gguf_file loading breakage." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "Apple Silicon warmup TypeError and _is_hf_initialized flag breakage are different code paths." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper pipeline return_language behavior." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "Both affect tokenizer loading, but they hit different models and different failure modes." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "Meta-device loading regression is not the same as extra_state save/from_pretrained handling." + }, + { + "left": "issue:44164", + "right": "issue:44479", + "accept": false, + "reason": "Save/load extra_state handling is unrelated to video-input regression." + }, + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "Perplexity issue and runtime special-token post-processor update bug are unrelated." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Different tokenizer-loading failures for different checkpoints." + }, + { + "left": "issue:43425", + "right": "issue:45161", + "accept": false, + "reason": "Torch compatibility does not match the GPT-OSS tensor-parallel loading problem." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Missing tied-weights metadata and sleng-bert tokenizer loading are unrelated." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Both are v5 loading regressions, but one is base_model_prefix and the other is non-persistent buffer initialization; not the same bug." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": false, + "reason": "Both mention fbank/audio extraction, but the evidence is too thin to say they are the same concrete regression." + }, + { + "left": "issue:42947", + "right": "issue:44756", + "accept": false, + "reason": "LoRA gradient checkpointing and Strix Halo mmap/OOM are unrelated." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Different model-loading failures with no shared concrete code-path bug." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "TrainerState token counting and multi-image iterable dataset finetuning are unrelated." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Image-preprocessor dtype mismatch and non-template-node compilation error are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 7, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36032", + "issue:39404", + "issue:41553", + "issue:42915", + "issue:43262", + "issue:43296", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43452", + "issue:43482", + "issue:43531", + "issue:43540", + "issue:43541", + "issue:43582", + "issue:43632", + "issue:43643", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43819", + "issue:43825", + "issue:43883", + "issue:43994", + "issue:44062", + "issue:44220", + "issue:44265", + "issue:44403", + "issue:44451", + "issue:44466", + "issue:44479", + "issue:44530", + "issue:44534", + "issue:44610", + "issue:44683", + "issue:44756", + "issue:44849", + "issue:44987", + "issue:44991", + "issue:45081", + "issue:45084", + "issue:45127", + "issue:45216", + "issue:45325", + "issue:45357", + "issue:45507" + ], + "soft_pairs": [ + "issue:43644|issue:43646", + "issue:43883|issue:44991", + "issue:43643|issue:44530", + "issue:43452|issue:43482", + "issue:44451|issue:44987", + "issue:36032|issue:42915", + "issue:43317|issue:43482", + "issue:43541|issue:45084", + "issue:43262|issue:43377", + "issue:39404|issue:43825", + "issue:42915|issue:44530", + "issue:44849|issue:45325", + "issue:43404|issue:44466", + "issue:36032|issue:43645", + "issue:44987|issue:44991", + "issue:43646|issue:44534", + "issue:45084|issue:45507", + "issue:43994|issue:44610", + "issue:43377|issue:43819", + "issue:43645|issue:44534", + "issue:43322|issue:43540", + "issue:45216|issue:45357", + "issue:44403|issue:45081", + "issue:44451|issue:45081", + "issue:43632|issue:44756", + "issue:44220|issue:44683", + "issue:44265|issue:44610", + "issue:41553|issue:43582", + "issue:43404|issue:45127", + "issue:42915|issue:45084", + "issue:44987|issue:45081", + "issue:43296|issue:43531", + "issue:44479|issue:45325", + "issue:44062|issue:44987" + ], + "prepared_review_unit_hash": "1a8adf42b51a7510d31f03d60bea4b54a5fada270f229535353ca0fe27fdcaa5", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 34, + "serialized_chars": 37326, + "estimated_input_tokens": 9332, + "estimated_eval_tokens": 18920 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most pairs are clearly different bugs and should stay separate. The only strong duplicate-looking pair is the Qwen3.5 `save_pretrained` regression pair.", + "confidence": 0.76, + "canonical_issue_reason": "issue:43644 is the clearest, most concrete regression in the set and best serves as the representative issue for the v5 buffer/state corruption theme.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43644 is the most actionable standalone issue here: it states a specific, user-visible regression cleanly and is broad enough to represent the cluster better than the more model-specific follow-ups.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Different failures: non-persistent buffer corruption vs custom model initialization breakage." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated bugs: missing `all_tied_weights_keys` on one model vs tokenizer loading failure for a different model." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "Different code paths: `AutoConfig.from_pretrained` field loss vs PagedAttentionCache crashing on `linear_attention`." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": false, + "reason": "Both involve loading, but one is a generic `gguf_file` API break while the other is a specific Qwen2.5-GGUF load failure; not clearly the same concrete bug." + }, + { + "left": "issue:44451", + "right": "issue:44987", + "accept": false, + "reason": "Different models and likely different loading regressions; not the same underlying issue." + }, + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "Tokenizer `add_special_tokens` conflict is unrelated to Qwen3Moe FP8 config failure." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "Dequantized offload/device-map failure is a different path from GGUF loading failure." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Both mention tracing/compilation, but they are different errors in different code paths and models." + }, + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Audio chat-template sampling-rate default bug is unrelated to MIMI batched-vs-single padding-mask behavior." + }, + { + "left": "issue:39404", + "right": "issue:43825", + "accept": false, + "reason": "One is a pipeline `return_language` regression; the other is an incorrect error message about translation support." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "Different bugs: FineGrainedFP8Config failure vs PagedAttentionCache crash on `linear_attention`." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Different model behaviors: `output_hidden_states=True` regression vs rope-index scaling for still images." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "Untied `lm_head` on one model is not the same as device-dependent serialization of tied weights." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer special-token conflict is unrelated to custom model initialization breaking in notebooks." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Different failures on different artifacts: model loading regression vs tokenizer loading regression." + }, + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Custom model initialization breakage is unrelated to non-persistent buffers being filled with junk." + }, + { + "left": "issue:45084", + "right": "issue:45507", + "accept": false, + "reason": "Different errors: template compilation failure vs GraniteMoEHybrid calling an invalid method." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "Nonsensical model outputs are not the same as a processor/model input-size mismatch." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "MIMI padding-mask inconsistency is unrelated to DAC `from_latents` mismatch without STE." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Custom model initialization in notebooks is unrelated to non-persistent buffer junk serialization." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Segmentation fault loading Llava Next is a different bug from Qwen3OmniMoe video-input validation." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Same underlying Qwen3.5 `save_pretrained` regression: both report incorrect saved checkpoints/visual encoder keys after v5 changes." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "Unnecessary load-time noise is not the same as a tokenizer crash in `_patch_mistral_regex`." + }, + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "Different model-loading failure vs Mistral tokenizer regex-patching crash." + }, + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Broken `_is_hf_initialized` handling is unrelated to disabling mmap on Strix Halo to avoid OOM." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Feature extraction failure and compiled flex_attention failure are unrelated." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "`torch.export`/`torch_compilable_check` failure is unrelated to OmDet-Turbo processor producing the wrong input size." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "Bad error messaging for AutoTokenizer loading Voxtral is unrelated to the Apple Silicon `caching_allocator_warmup` TypeError." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "Untied `lm_head` on Mistral3 is not the same bug as LoRA merge collapse with extended vocabulary." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Different models and different failures: loading `physical-intelligence/fast` vs Mistral tokenizer regex patch crash." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "PaddleOCR-VL load failure and Qwen3-MoE sliding-window issue are separate regressions." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Video-input regression in several Qwen models is unrelated to still-image temporal position scaling in Qwen2.5-VL." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "`AddedToken(..., special=...)` constructor conflict is unrelated to the `physical-intelligence/fast` loading regression." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet, and the soft-edge verdicts stay conservative overall. The only accepted duplicate-looking pair, issue:45216 vs issue:45357, is well supported by the titles. The other rejections mostly distinguish issues at the title/code-path level without overclaiming shared root cause." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 8, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:40990", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43296", + "issue:43317", + "issue:43377", + "issue:43421", + "issue:43425", + "issue:43482", + "issue:43525", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43644", + "issue:43646", + "issue:43742", + "issue:43749", + "issue:43792", + "issue:43818", + "issue:43856", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44403", + "issue:44534", + "issue:44568", + "issue:44589", + "issue:44683", + "issue:44811", + "issue:44829", + "issue:44877", + "issue:44991", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45325", + "issue:45357", + "issue:45381", + "issue:45479", + "issue:45491", + "issue:45538", + "issue:45542" + ], + "soft_pairs": [ + "issue:42175|issue:45542", + "issue:40990|issue:44811", + "issue:42915|issue:44291", + "issue:43611|issue:44534", + "issue:43317|issue:43856", + "issue:36032|issue:43742", + "issue:43632|issue:44534", + "issue:40444|issue:45325", + "issue:42915|issue:44589", + "issue:43531|issue:44877", + "issue:43421|issue:44568", + "issue:43818|issue:45357", + "issue:36032|issue:41720", + "issue:44683|issue:45020", + "issue:43482|issue:45538", + "issue:44829|issue:45479", + "issue:33666|issue:40444", + "issue:43425|issue:44403", + "issue:44403|issue:44991", + "issue:44079|issue:44534", + "issue:43792|issue:44589", + "issue:35707|issue:40444", + "issue:43377|issue:45491", + "issue:43482|issue:43525", + "issue:44291|issue:44589", + "issue:43749|issue:44079", + "issue:43644|issue:44079", + "issue:44079|issue:45072", + "issue:43632|issue:45072", + "issue:44265|issue:45072", + "issue:40444|issue:45381", + "issue:45325|issue:45381", + "issue:43531|issue:44291", + "issue:43611|issue:43646", + "issue:41720|issue:45081", + "issue:42175|issue:43296" + ], + "prepared_review_unit_hash": "5037f19fbae2871559be5d330eb59fef7f30d894589e07d9c000d5c797577624", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 36, + "serialized_chars": 37918, + "estimated_input_tokens": 9480, + "estimated_eval_tokens": 19216 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a mixed cluster of unrelated bugs, with one clear duplicate pair around missing TensorFlow backend support in torch-only installs. Most other pairs share broad subsystems or version contexts but not the same concrete defect.", + "confidence": 0.72, + "canonical_issue_reason": "issue:43644 is the clearest representative of the Transformers v5 non-persistent-buffer regression and is the same report as 44534, with earlier discussion and an inbound reference.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43644 is the strongest anchor in this set: it is a concrete, well-scoped regression report and matches the duplicate report 44534 closely.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:45542", + "accept": true, + "reason": "Both point to the same packaging gap: the torch-only install path leaves the TensorFlow backend unavailable, producing backend/import errors." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Completely different bugs: perplexity evaluation on GPT-OSS vs Whisper batch_decode skipping special tokens." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "Both involve Transformers v5 loading paths, but one is a Qwen3MoE FP8 config failure and the other is an init_empty_weights/_is_hf_initialized TypeError; different root causes." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "Both are v5 regressions, but one is about base_model_prefix loading and the other about non-persistent buffers being filled with junk." + }, + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "Different symptoms and code paths: dequantized model offload/device_map loading vs Qwen3 MoE training memory usage." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "Unrelated tokenizer load error vs a MobileLLM key error when loading a specific model." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 breakages, but one concerns the _is_hf_initialized flag and the other non-persistent buffer initialization." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen2.5-VL bugs, but one is multi-image iterable dataset finetuning and the other is still-image temporal position_id scaling." + }, + { + "left": "issue:42915", + "right": "issue:44589", + "accept": false, + "reason": "Different failures: Qwen3MoE FP8 config handling vs missing Float8 storage object." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Different model/config issues: Qwen3-MoE sliding_window behavior vs granite_speech strict config loading." + }, + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Both involve special tokens, but one is runtime post-processor updates and the other is add_special_tokens not adding BOS/EOS for a specific tokenizer." + }, + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Different multimodal model problems: Video-LLaVA weight/temporal-attention issue vs Qwen3.5 save_pretrained key regression." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "Tokenizer construction error vs Qwen3 auto device mapping CUDA assert; unrelated bugs." + }, + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "Different areas: compiled flex_attention on torch>=2.9 vs remote_code model loading regressions." + }, + { + "left": "issue:43482", + "right": "issue:45538", + "accept": false, + "reason": "Different tokenizer/loading issues: Qwen2.5-GGUF on transformers v5 vs CLIPTokenizer model_max_length behavior." + }, + { + "left": "issue:44829", + "right": "issue:45479", + "accept": false, + "reason": "Both are classification-training degeneracy reports, but one is tied to flash_attention_3 and the other to num_labels=1 loss semantics." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Both are Qwen2-VL/Qwen2.5-VL training issues, but they describe different failure modes and data setups." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Unrelated: Torch 2.10 compatibility vs noisy transformer loading messages." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Different bugs: generic loading noise vs tokenizer loading failure for a specific model." + }, + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "ModelOutput key assignment bug vs non-persistent buffer junk initialization; not the same defect." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper runtime failure vs missing Float8 storage type; unrelated." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL finetuning with multiple images." + }, + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Both are batching/padding issues, but the affected models and mechanisms differ: MIMI padding-mask support vs Gemma3 sliding-window NaNs." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Different loading regressions in unrelated models: Qwen2.5-GGUF vs Llama4Config pad_token_id." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "init_empty_weights/_is_hf_initialized TypeError is unrelated to the Float8 storage lookup failure." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and ModelOutput key assignment are unrelated bugs." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Although both mention Transformers internals, one is about non-persistent buffers being corrupted and the other about ModelOutput key handling." + }, + { + "left": "issue:44079", + "right": "issue:45072", + "accept": false, + "reason": "ModelOutput key assignment bug vs bf16 dtype mismatches in CI; different defects." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "_is_hf_initialized flag regression is unrelated to dtype mismatch failures in bf16 inference." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export / torch_compilable_check failure is unrelated to SwitchTransformers/TimmWrapperModel dtype mismatches." + }, + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL vision-input bugs, but one is multi-image finetuning and the other is video vision_position_ids." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": false, + "reason": "Closely related subsystem, but one is still-image temporal position scaling and the other is video position_id computation; not the same concrete bug." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to init_empty_weights/_is_hf_initialized argument handling." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are Transformers v5 loading regressions, but they describe different failure points: base_model_prefix loading vs custom model initialization." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "Different components: Qwen3 auto device mapping CUDA assert vs Mistral tokenizer regex patch crash." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Both involve model/backend setup, but one is missing TensorFlow in a torch install and the other is PaddleOCR-VL load failure in vLLM." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded overall and conservative. The canonical choice around issue:43644/44534 is well supported by the identical titles and the inbound reference on 43644. The only accepted soft edge, issue:42175 vs issue:45542, is a plausible same-failure packaging/backend-installation match. The remaining rejected pairs mostly distinguish different root causes despite shared subsystems or model families, which is appropriately cautious." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 9, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:42915", + "issue:43262", + "issue:43296", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43454", + "issue:43482", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43632", + "issue:43701", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43792", + "issue:43819", + "issue:43828", + "issue:43873", + "issue:43881", + "issue:43883", + "issue:43957", + "issue:43994", + "issue:44164", + "issue:44186", + "issue:44265", + "issue:44291", + "issue:44387", + "issue:44466", + "issue:44488", + "issue:44534", + "issue:44610", + "issue:44617", + "issue:44683", + "issue:44877", + "issue:44898", + "issue:44977", + "issue:44991", + "issue:45072", + "issue:45325", + "issue:45357" + ], + "soft_pairs": [ + "issue:43881|issue:44488", + "issue:43454|issue:44466", + "issue:43994|issue:44898", + "issue:44387|issue:44617", + "issue:43828|issue:44683", + "issue:42915|issue:43482", + "issue:43526|issue:43883", + "issue:43701|issue:45325", + "issue:43792|issue:43957", + "issue:43262|issue:43746", + "issue:43317|issue:43873", + "issue:34634|issue:35707", + "issue:43606|issue:44265", + "issue:43881|issue:44991", + "issue:44186|issue:44265", + "issue:44291|issue:44877", + "issue:43606|issue:43819", + "issue:43632|issue:44164", + "issue:35707|issue:41720", + "issue:43377|issue:44898", + "issue:43454|issue:45072", + "issue:44610|issue:45325", + "issue:43454|issue:43550", + "issue:43377|issue:44610", + "issue:39692|issue:40444", + "issue:43749|issue:44534", + "issue:44466|issue:45357", + "issue:43296|issue:44991", + "issue:43296|issue:44877", + "issue:43792|issue:44683", + "issue:43454|issue:43720", + "issue:41720|issue:43322", + "issue:41720|issue:44977", + "issue:43404|issue:45357" + ], + "prepared_review_unit_hash": "7c276a7e3b7a4d8a8b48aef0c934eba5e2d18bb038ebba88d4d194b4ec238d0b", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37834, + "estimated_input_tokens": 9459, + "estimated_eval_tokens": 19174 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This is a heterogeneous cluster of mostly unrelated issues with only loose similarity around Transformers v5 loading/serialization regressions. I do not see any soft pair that should be merged as a true duplicate; the closest subtheme is tied-weights / model-loading behavior, where issue 43454 is the best representative.", + "confidence": 0.43, + "canonical_issue_reason": "No exact duplicate cluster emerges; issue 43454 is the closest representative because it is the most central concrete bug in the tied-weights/model-loading subtheme.", + "canonical_pr_reason": null, + "best_issue_reason": "43454 is the most actionable anchor issue among the set, but the cluster is too mixed to serve as a broad canonical for all items.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "Both are load failures, but they involve different models and no shared concrete failure mode." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "Both mention lm_head/tied weights, but one is a generation bug and the other is a device-dependent serialization issue." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "Different model families and different bugs; they only share a broad vision-model failure theme." + }, + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Both mention OOM, but one is an int4 quantization memory regression and the other is a model-specific CUDA OOM." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Autocast dtype mismatch and compiled flex_attention failure are distinct code-path problems." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "Different models and different failure classes: FP8 config failure versus GGUF loading failure." + }, + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "Label-reduction behavior and missing tied-weights metadata are unrelated bugs." + }, + { + "left": "issue:43701", + "right": "issue:45325", + "accept": false, + "reason": "Checkpoint key mismatch is not the same bug as RoPE position-id scaling." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "These are separate loading/runtime failures with different underlying mechanisms." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio chat-template sample-rate defaults and PEFT checkpoint loading are unrelated." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both involve offloading/quantization, but the specific failures and code paths differ." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "Voice preset handling and progressive generation with past key values are different issues." + }, + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "CPU offload device mismatch and torch.export/torch_compilable_check are separate bugs." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "One is a model load failure, the other a tokenizer-loading failure for a different model." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "Tokenizer padding/truncation crashes and export-time compilers checks are unrelated." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "Init-empty-weights argument handling and strict config validation are different failure modes." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "Device mismatch on offload and DAC latent/forward mismatch are unrelated." + }, + { + "left": "issue:43632", + "right": "issue:44164", + "accept": false, + "reason": "Both are v5-related loader/save regressions, but one is `_is_hf_initialized` flag handling and the other is extra_state serialization." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "Progressive generation with embeds/past-key-values is unrelated to an auto-device-map CUDA assert." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "Batched-vs-single encoder mismatch and image-resolution mismatch are different bugs." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "Tied-weights/generation failure and bfloat16 dtype mismatch are different problems." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "Processor image-size mismatch and temporal position-id scaling are not the same bug." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "Tied-weight generation issues are unrelated to the torch.compile SDPA failure." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "MIMI batching/padding-mask behavior and OmDet-Turbo input-size mismatch are unrelated." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "A documentation example error does not match a finetuning failure on multi-image IterableDataset inputs." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and non-persistent buffer initialization are different regressions." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Both are serialization issues, but one concerns lm_head.weight and the other visual encoder keys in a different model." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "PaddleOCR-VL loading in vLLM and tokenizer loading for EMBEDDIA/est-roberta are unrelated." + }, + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Whisper load/run failure and compiled flex_attention on torch 2.9 are distinct bugs." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "Mistral3 tied-weight serialization and Qwen3.5 visual-encoder-key serialization are different model-specific issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 10, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:33357", + "issue:34567", + "issue:35707", + "issue:36010", + "issue:36331", + "issue:40990", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43316", + "issue:43317", + "issue:43322", + "issue:43388", + "issue:43526", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43577", + "issue:43606", + "issue:43708", + "issue:43720", + "issue:43746", + "issue:43782", + "issue:43873", + "issue:43881", + "issue:44186", + "issue:44292", + "issue:44361", + "issue:44423", + "issue:44479", + "issue:44514", + "issue:44545", + "issue:44610", + "issue:44671", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44898", + "issue:44913", + "issue:44918", + "issue:44991", + "issue:45059", + "issue:45072", + "issue:45161", + "issue:45406", + "issue:45464" + ], + "soft_pairs": [ + "issue:44423|issue:45406", + "issue:44186|issue:45072", + "issue:34567|issue:40990", + "issue:43526|issue:43881", + "issue:43873|issue:44756", + "issue:43577|issue:43708", + "issue:42617|issue:42915", + "issue:43550|issue:43720", + "issue:44610|issue:44898", + "issue:44877|issue:44991", + "issue:44479|issue:44918", + "issue:44186|issue:44610", + "issue:36331|issue:42915", + "issue:36331|issue:42617", + "issue:44671|issue:44743", + "issue:43316|issue:44913", + "issue:43550|issue:43606", + "issue:42915|issue:43317", + "issue:41720|issue:42617", + "issue:44292|issue:44479", + "issue:41720|issue:45161", + "issue:43531|issue:43572", + "issue:43540|issue:43782", + "issue:44186|issue:44361", + "issue:43388|issue:45059", + "issue:41720|issue:43873", + "issue:45406|issue:45464", + "issue:42915|issue:43322", + "issue:36010|issue:42617", + "issue:36331|issue:41720", + "issue:44514|issue:44545", + "issue:33357|issue:35707", + "issue:43606|issue:43720", + "issue:41720|issue:43746" + ], + "prepared_review_unit_hash": "ad5846cd3f9798fe24a4744026f15f426dd6ea94f419aaf2d150b3298e770742", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 34, + "serialized_chars": 37226, + "estimated_input_tokens": 9307, + "estimated_eval_tokens": 18870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated issues; only one pair looks like a true duplicate. The two Qwen2_5_VLProcessor.apply_chat_template reports describe the same batched-input/padding=False crash.", + "confidence": 0.97, + "canonical_issue_reason": "issue:44545 is the best canonical issue: it states the bug clearly, matches the duplicate pair exactly, and has slightly more supporting references.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44545 is the best overall issue representative for this cluster because it is the only clear duplicate target and has the cleanest title.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both are serve crashes, but they affect different processors/models and different AttributeErrors, so they are not the same bug." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer NER/padding crash vs bfloat16 dtype mismatch in a different model path; unrelated underlying causes." + }, + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "Trainer token-count tracking and model perplexity are unrelated problems." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "BEiT label reduction bug and GLM-4V-9B load failure do not share the same code path." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/offload, but one is quantization offloading behavior and the other is mmap/OOM on Strix Halo; different issues." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "Model dtype loading bug vs Trainer checkpoint step calculation; no overlap in the concrete defect." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "3d_parallel execution failure and Qwen3-MoE FineGrainedFP8Config failure are separate problems." + }, + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba is unrelated to packed-weight unpacking during accelerate loading in BitNet." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "Different models and different image-size/resolution bugs; not the same underlying change." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config loading and tokenizer loading failures are different bugs in different subsystems." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Video-input regression for several Qwen VL models vs TRL SFT embedding unpacking; same family name, different failure mode." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "Tokenizer crash vs processor output-size mismatch; unrelated defects." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "CustomTrainer argument mismatch and Qwen3-MoE FP8 loading failure are unrelated." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Trainer API breakage is not the same as the 3d_parallel runtime failure." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "CamemBERT masked-LM prediction issue and Qwen3.5 recurrent-state reset bug are unrelated." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Both are config-related, but they concern different model configs and different persistence behaviors." + }, + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "Bamba compile/SDPA failure and Bark CPU-offload device mismatch are different concrete bugs." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "Qwen3-MoE FP8 loading and dequantized model CPU/GPU offload loading are distinct issues." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Different models and different failures: auto device mapping assert vs 3d_parallel execution." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "NVFP4 runtime failure and Qwen VL video regression are not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Both involve parallelism, but the models and failure modes differ; not mergeable as one fix." + }, + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE and missing pad_token_idx in StableLmConfig are unrelated." + }, + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Qwen3OmniMoe video-processing ValueError and Qwen3VL weight_only load error are different code paths." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Two tokenizer/task crashes, but on different tokenizers and different failure conditions." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "Metric label gathering bug and SAM3 text/bbox behavior are not the same underlying defect." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Auto device mapping CUDA assert and quantization offloading behavior are separate problems." + }, + { + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Different serve-time failures on different models/processors and different execution paths." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3-MoE FP8 loading and Llava Next segfault are unrelated." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "GenerationMixin import error and 3d_parallel runtime failure do not match." + }, + { + "left": "issue:36331", + "right": "issue:41720", + "accept": false, + "reason": "Trainer loss signature mismatch is unrelated to Qwen3 auto device-mapping failure." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "Same exact Qwen2_5_VLProcessor.apply_chat_template crash on batched input with padding=False; clearly the same bug." + }, + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "MacOS CLIP bus error and progressive generation with inputs_embeds/past_key_values are unrelated." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "CPU-offload device mismatch and BitNet packed-weight unpacking are different issues." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert and GraniteSpeech PEFT checkpoint loading are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The cluster summary is grounded in the packet and stays conservative. The only accepted duplicate pair, issue:44514 and issue:44545, is well supported by the matching Qwen2_5_VLProcessor.apply_chat_template batched-input/padding=False crash, and the canonical choice is reasonable given the slightly stronger references on issue:44545. The remaining soft-edge rejections are cautious and consistent with the titles." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 11, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:30064", + "issue:34567", + "issue:34634", + "issue:35707", + "issue:39692", + "issue:41720", + "issue:42886", + "issue:43381", + "issue:43421", + "issue:43531", + "issue:43550", + "issue:43596", + "issue:43606", + "issue:43645", + "issue:43653", + "issue:43746", + "issue:43782", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:43994", + "issue:44062", + "issue:44077", + "issue:44186", + "issue:44220", + "issue:44368", + "issue:44451", + "issue:44492", + "issue:44509", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44671", + "issue:44843", + "issue:44898", + "issue:44918", + "issue:44936", + "issue:44987", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45081", + "issue:45183", + "issue:45356", + "issue:45357", + "issue:45381", + "issue:45479" + ], + "soft_pairs": [ + "issue:30064|issue:34634", + "issue:43550|issue:43653", + "issue:44368|issue:45357", + "issue:44492|issue:44509", + "issue:43606|issue:43653", + "issue:44625|issue:45479", + "issue:43421|issue:43927", + "issue:44898|issue:45072", + "issue:43782|issue:45381", + "issue:34634|issue:41720", + "issue:34567|issue:35707", + "issue:44077|issue:44625", + "issue:42886|issue:44843", + "issue:44062|issue:44451", + "issue:43381|issue:43844", + "issue:44451|issue:44991", + "issue:43421|issue:43653", + "issue:44918|issue:44936", + "issue:43531|issue:44589", + "issue:44987|issue:45042", + "issue:43994|issue:44671", + "issue:39692|issue:44610", + "issue:43421|issue:44186", + "issue:43596|issue:44220", + "issue:45081|issue:45356", + "issue:43531|issue:43645", + "issue:45183|issue:45479", + "issue:43746|issue:45381", + "issue:39692|issue:43873" + ], + "prepared_review_unit_hash": "2c82192f7daa3034568ab3ca3efe3ba761146cf4373b2846c60473f206c91079", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36079, + "estimated_input_tokens": 9020, + "estimated_eval_tokens": 18296 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous, but there is a small tokenizer/special-token subset. No pair looks like a true duplicate from titles alone, so I would not merge any of the soft edges.", + "confidence": 0.56, + "canonical_issue_reason": "issue:43421 is the most central and actionable issue in the tokenizer/special-token subset: it describes a concrete backend/post-processor update bug with a clear fix path.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43421 is the best representative issue overall because it is a specific, code-path-level bug rather than a model-specific symptom or documentation problem.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "Different components and failures: image processor segmentation maps vs Bark voice_preset handling." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "Unrelated bugs: torch.compile/SDPA model execution vs tokenizer special-token registration." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "Different problems: Qwen3.5 LoRA tie_word_embeddings warning vs incorrect visual encoder keys on save_pretrained." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "Both are docs/wording-related, but they are not the same underlying bug or change." + }, + { + "left": "issue:43606", + "right": "issue:43653", + "accept": false, + "reason": "Device-mismatch offload bug vs BigBirdTokenizer special-token decode bug; different code paths." + }, + { + "left": "issue:44625", + "right": "issue:45479", + "accept": false, + "reason": "Both involve classification config, but one is propagation of num_labels and the other is a zero-loss problem; not the same issue." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "Both touch token/config state, but one is runtime post-processor updates and the other is save/load losing custom token IDs." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Different failure classes: image resolution/interpolation vs bfloat16 dtype mismatch." + }, + { + "left": "issue:43782", + "right": "issue:45381", + "accept": false, + "reason": "Unrelated multimodal loading error vs video-position-id regression." + }, + { + "left": "issue:34634", + "right": "issue:41720", + "accept": false, + "reason": "Bark voice_preset bug is unrelated to Qwen3 auto device-mapping CUDA assert." + }, + { + "left": "issue:34567", + "right": "issue:35707", + "accept": false, + "reason": "Trainer token-count state bug vs generation with inputs_embeds/past_key_values; different subsystems." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "Patchtsmixer post_init policy issue is unrelated to Qwen3.5 num_labels propagation." + }, + { + "left": "issue:42886", + "right": "issue:44843", + "accept": false, + "reason": "Both involve tokenizer loading/offline behavior, but one is cache fallback and the other is an unconditional model_info call in a regex patch." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "AddedToken keyword collision is a tokenizer-construction bug; ScandiBERT load failure is a different model-loading issue." + }, + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Gradient checkpointing eval-mode limitation vs ZeRO-3 gradient growth; different training-path bugs." + }, + { + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are load failures, but they concern different models and different root causes." + }, + { + "left": "issue:43421", + "right": "issue:43653", + "accept": false, + "reason": "Related tokenizer/special-token area, but not the same concrete bug: runtime post-processor refresh vs missing special-token registration." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "Different trainer failures: SFT embedding unpacking vs evaluate-after-train behavior." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Sliding-window Qwen3-MoE bug is unrelated to Float8 storage object lookup." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "Model-loading regression vs image processor torchvision dependency regression; different code paths." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Both are incorrect model outputs, but they are different models and different symptoms." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "Both mention processor/model mismatches, but one is a docs example error and the other is an OmDet-Turbo resolution mismatch." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Tokenizer-related, but one is special-token post-processing and the other is NER/batched padding truncation crash." + }, + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "IndexError under ZeRO-3 init is unrelated to the fbank feature extraction issue." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Both mention fix_mistral_regex, but one is an AttributeError crash and the other is a different tokenizer regression/warning; not clearly the same concrete bug." + }, + { + "left": "issue:45183", + "right": "issue:45479", + "accept": false, + "reason": "Validation error messaging bug vs zero-loss classification bug; different underlying causes." + }, + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "PEFT local checkpoint loading bug is unrelated to Qwen2.5-VL video input position IDs." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "SigLIP2 docs/model mismatch and quantization offloading problems are different issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 12, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:40444", + "issue:43335", + "issue:43421", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43618", + "issue:43643", + "issue:43644", + "issue:43653", + "issue:43697", + "issue:43716", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43756", + "issue:43782", + "issue:43818", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43873", + "issue:43927", + "issue:43994", + "issue:44008", + "issue:44062", + "issue:44186", + "issue:44220", + "issue:44442", + "issue:44448", + "issue:44488", + "issue:44561", + "issue:44661", + "issue:44671", + "issue:44811", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44977", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45381" + ], + "soft_pairs": [ + "issue:43540|issue:44008", + "issue:43746|issue:43873", + "issue:40444|issue:43746", + "issue:43618|issue:43697", + "issue:44220|issue:44661", + "issue:43716|issue:44008", + "issue:43720|issue:43819", + "issue:44843|issue:45081", + "issue:43653|issue:43720", + "issue:43335|issue:43756", + "issue:44062|issue:44488", + "issue:43421|issue:44811", + "issue:44186|issue:44898", + "issue:30064|issue:36010", + "issue:43550|issue:45072", + "issue:43540|issue:45381", + "issue:43577|issue:43818", + "issue:43550|issue:43927", + "issue:43643|issue:44877", + "issue:43577|issue:43582", + "issue:43531|issue:43643", + "issue:43782|issue:44977", + "issue:43644|issue:43749", + "issue:44448|issue:44671", + "issue:43653|issue:43819", + "issue:44561|issue:45020", + "issue:43825|issue:43994", + "issue:43550|issue:43819", + "issue:44186|issue:44442", + "issue:43825|issue:43827" + ], + "prepared_review_unit_hash": "38fd18ffe6a92ad3f54bcb445cdaddbf0df954c5e8a3836077129f979c5a7b47", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36314, + "estimated_input_tokens": 9079, + "estimated_eval_tokens": 18414 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is very heterogeneous; most items are unrelated model-specific bugs. No soft pair looks like the same concrete underlying bug/change, so all soft edges should be rejected. The broadest and most representative issue is the open remote_code regression report.", + "confidence": 0.49, + "canonical_issue_reason": "issue:45020 is the broadest, most central report in the set: it describes recent-version breakage across models using `remote_code`, which matches the general loading/regression theme better than the narrower single-model reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the best global representative because it is current, open, and describes an umbrella regression affecting multiple remote-code models rather than one narrow model-specific failure.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Different models and failures: Qwen3OmniMoe video-input processing vs Gemma3n variable-name collision in forward()." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "Both touch loading/offloading, but one is PEFT local checkpoint loading and the other is quantization offloading behavior; not the same bug." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "IterableDataset multi-image finetuning failure is unrelated to GraniteSpeech PEFT checkpoint loading." + }, + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "CLIP output attentions assignment and RTDetr output drift are different code paths and symptoms." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Audio fbank extraction issue is unrelated to tokenizer mapping / add-new-model-like failure." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Different modality bugs: image-preprocessor dtype mismatch vs audio/video tensor name collision." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "BitNet packed-weight loading and DAC STE mismatch are unrelated implementation bugs." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Both involve `_patch_mistral_regex`, but one is offline `model_info()` access and the other is a backend_tokenizer attribute crash; distinct failure modes." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "BigBirdTokenizer special-token decode bug is unrelated to BitNet accelerate loading." + }, + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "SwitchTransformers sparse-layer construction and Smollm3 RoPE-layer dropping are different model-specific configuration bugs." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "AddedToken constructor conflict is not the same as a model failing to load a specific checkpoint." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Runtime post-processor update for special tokens is different from Whisper batch_decode skipping special tokens." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer padding/NER crash is unrelated to Perceiver non-default resolution inference." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Image processor segmentation-map handling is unrelated to GenerationMixin import failure." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "Bamba torch.compile+SDPA failure and dtype mismatches in other models are different regressions." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "Qwen3OmniMoe video-input failure is unrelated to Qwen2.5-VL video position id issues." + }, + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "BLIP2 dtype persistence and Video-LLaVA temporal-attention/weight sharing are unrelated." + }, + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "torch.compile+SDPA failure is unrelated to DiaConfig save/load token-id loss." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Both involve config loading, but one is missing fields from `trust_remote_code` and the other is strict config blocking granite_speech; not the same defect." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "BLIP2 dtype mismatch and Apple Silicon caching allocator TypeError are unrelated." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to missing fields in AutoConfig with `trust_remote_code`." + }, + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Qwen3VL weight_only loading error is different from Qwen3.5 flash-attention generation behavior." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Non-persistent buffer junk in v5 and FSDP CPU RAM efficient loading are different regressions." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Both are output regressions in v5, but they affect different models and code paths." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Tokenizer special-token registration bug is unrelated to DAC latent reconstruction mismatch." + }, + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "44561 is one specific remote_code breakage cause, while 45020 is a broad umbrella report; not the same concrete bug." + }, + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "Pipeline error-message wording is unrelated to SigLIP2 nonsensical outputs." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "SDPA/torch.compile failure and DAC from_latents mismatch are unrelated model implementations." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and FastSpeech2Conformer tokenizer loading failure are different tokenizer bugs." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "One is an incorrect pipeline error message; the other is stale docs referencing removed pipelines. Related area, but not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 13, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:36010", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43257", + "issue:43262", + "issue:43329", + "issue:43452", + "issue:43454", + "issue:43526", + "issue:43577", + "issue:43582", + "issue:43606", + "issue:43638", + "issue:43650", + "issue:43653", + "issue:43697", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:43906", + "issue:44060", + "issue:44291", + "issue:44368", + "issue:44485", + "issue:44509", + "issue:44554", + "issue:44568", + "issue:44610", + "issue:44671", + "issue:44805", + "issue:44811", + "issue:44821", + "issue:44857", + "issue:44912", + "issue:44936", + "issue:44991", + "issue:45072", + "issue:45081", + "issue:45084", + "issue:45325", + "issue:45406", + "issue:45488", + "issue:45538" + ], + "soft_pairs": [ + "issue:44821|issue:45084", + "issue:43650|issue:43906", + "issue:41720|issue:44991", + "issue:44805|issue:44936", + "issue:45081|issue:45406", + "issue:43606|issue:45072", + "issue:30064|issue:36331", + "issue:42175|issue:45081", + "issue:44610|issue:45072", + "issue:43782|issue:45325", + "issue:44485|issue:44509", + "issue:34634|issue:36010", + "issue:43720|issue:45072", + "issue:44060|issue:44368", + "issue:43262|issue:43819", + "issue:43582|issue:44857", + "issue:43749|issue:44291", + "issue:43653|issue:44671", + "issue:43526|issue:43697", + "issue:43526|issue:44554", + "issue:45084|issue:45538", + "issue:43653|issue:44912", + "issue:43454|issue:43577", + "issue:44568|issue:45488", + "issue:34634|issue:36331", + "issue:43452|issue:44991", + "issue:43638|issue:44936", + "issue:43257|issue:43329", + "issue:43746|issue:44811" + ], + "prepared_review_unit_hash": "2fa2ced6f8b03f58f5db9f2131400b92a7e881cdd9be57f2f98359e9e132ded2", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36899, + "estimated_input_tokens": 9225, + "estimated_eval_tokens": 18706 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a heterogeneous mix of unrelated bug reports; none of the soft-similarity pairs look like true duplicates or the same fixable code-path change. I would not merge any pair.", + "confidence": 0.97, + "canonical_issue_reason": "No single true canonical duplicate stands out because the cluster is heterogeneous. If one issue must be chosen as the representative anchor, issue #45072 is a concrete, well-scoped loading/inference bug report rather than a vague or documentation-only report.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45072 is the best representative issue in this set because it is a specific, reproducible runtime bug with a clear failure mode and actionable description; several other items are similarly about model loading/inference, but none are close enough to be duplicates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "Different failures: loading AutoImageProcessor from URL versus a Jinja/template compilation TypeError. No shared code-path." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "Unrelated titles and problems; one is a filler issue, the other is a reproduced bug report." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "Different models and subsystems: Qwen3 device-mapping CUDA assert versus tokenizer loading for EMBEDDIA/est-roberta." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Mask/index shape error in model forward path versus trainer evaluate failing after train; not the same bug." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Both are loading-time attribute errors, but they hit different objects and code paths (_patch_mistral_regex/tokenizer backend vs Gemma4Processor _tokenizer)." + }, + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "Both involve inference/loading mismatches, but the concrete failures are different: CPU offload device mismatch versus bfloat16 dtype mismatches." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Different components and symptoms: image processor segmentation maps versus custom trainer loss signature." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Package install/backend dependency issue versus tokenizer regex patch crash; unrelated." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "Processor output size mismatch versus dtype mismatch in inference; not the same underlying bug." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Different Qwen VL bugs: weight_only load error versus RoPE position-id scaling regression." + }, + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "One is a model implementation discussion, the other is a docs cleanup issue; no duplicate relation." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "Different processors and failures: Bark voice_preset versus GenerationMixin import error." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "Accelerate loading of packed weights versus bfloat16 inference dtype mismatch; related theme but different concrete bugs." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Both mention tied-weights warnings, but one reports an incorrect tie to a bias tensor while the other reports a warning emitted during LoRA fine-tuning." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio chat-template sampling-rate default bug versus DAC latent/forward mismatch; unrelated." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "Apple Silicon TypeError in allocator warmup versus CUDA AMP crash in a loss function; different platforms and code paths." + }, + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading-related, but FSDP CPU RAM efficient loading and init_empty_weights unexpected argument are distinct compatibility issues." + }, + { + "left": "issue:43653", + "right": "issue:44671", + "accept": false, + "reason": "Tokenizer special-token registration bug versus masked LM prediction regression; not the same issue." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "Processor label reduction bug versus RTDetr output divergence in v5; different layers of the stack." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "Vision processor label handling versus MPS attention correctness issue; unrelated." + }, + { + "left": "issue:45084", + "right": "issue:45538", + "accept": false, + "reason": "Template-node compilation error versus CLIPTokenizer max-length constant bug; unrelated." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "Tokenizer special token decoding issue versus MXFP4 quantization fallback; different subsystems." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "Weight tying bug in AyaVision versus dtype propagation bug in Blip2; different fixes." + }, + { + "left": "issue:44568", + "right": "issue:45488", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different tokenizer implementations and failure modes." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "BarkProcessor voice preset issue is unrelated to CustomTrainer.compute_loss signature incompatibility." + }, + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "gguf_file/from_pretrained handling versus a specific tokenizer loading regression; not the same code-path." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "DeepSpeed ZeRO-3 empty-index error versus trainer.evaluate failing after train; different behaviors and likely causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 14, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43257", + "issue:43262", + "issue:43317", + "issue:43329", + "issue:43450", + "issue:43452", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43645", + "issue:43696", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43756", + "issue:43819", + "issue:43854", + "issue:43927", + "issue:43931", + "issue:44291", + "issue:44479", + "issue:44560", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44912", + "issue:44938", + "issue:44977", + "issue:44987", + "issue:45042", + "issue:45072", + "issue:45356", + "issue:45357", + "issue:45381", + "issue:45538" + ], + "soft_pairs": [ + "issue:43854|issue:44977", + "issue:38175|issue:44610", + "issue:44589|issue:45042", + "issue:39692|issue:44912", + "issue:43257|issue:43696", + "issue:43819|issue:43927", + "issue:43262|issue:44811", + "issue:36010|issue:43645", + "issue:43720|issue:44610", + "issue:43550|issue:44912", + "issue:44568|issue:45356", + "issue:43749|issue:44756", + "issue:43329|issue:44479", + "issue:43531|issue:45042", + "issue:43257|issue:43931", + "issue:36010|issue:36331", + "issue:43329|issue:43450", + "issue:43257|issue:43756", + "issue:43577|issue:45072", + "issue:43452|issue:43582", + "issue:43697|issue:45357", + "issue:41720|issue:43540", + "issue:42175|issue:44938", + "issue:44479|issue:45381", + "issue:44987|issue:45538", + "issue:43927|issue:44912", + "issue:43540|issue:44560", + "issue:43317|issue:45538", + "issue:42915|issue:43742", + "issue:43540|issue:44479", + "issue:44291|issue:45042" + ], + "prepared_review_unit_hash": "39fe55d49ec512f122f63e69482b751bcdf4bda86d08e1c76a19ef63b3beae1f", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 31, + "serialized_chars": 36508, + "estimated_input_tokens": 9127, + "estimated_eval_tokens": 18510 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the soft-similarity pairs mostly connect unrelated bugs across loading, quantization, video/multimodal processing, tokenizer behavior, and environment issues. I reject all soft edges and treat this as a broad mixed cluster rather than a duplicate set. If a single issue must anchor the cluster, 43749 is the strongest representative.", + "confidence": 0.79, + "canonical_issue_reason": "No true duplicate canonical spans this cluster; if one representative is needed, issue 43749 is the best anchor because it has the highest activity/inbound references and describes a broad loading/regression problem rather than a narrow model-specific symptom.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43749 is the best cluster representative: high discussion/inbound references and a broad infrastructure loading failure make it a stronger triage anchor than the many isolated model- or feature-specific bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "Different failures: GLM-4.7 load/test problem vs Qwen3.5 flash-attention generation regression." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "Unrelated symptoms: zero probabilities in SigLIP2 vs processor/model input-size mismatch in OmDet-Turbo." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "Different layers: Float8 storage/type loading error vs PIL image processor incorrectly requiring torchvision." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "Docs/example mistakes for SigLIP2 vs MXFP4 quantization fallback for git-oss-20b." + }, + { + "left": "issue:43257", + "right": "issue:43696", + "accept": false, + "reason": "Different problems: Qwen3 MoE loading with accelerate+deepspeed vs plain CUDA out-of-memory on GPT-oss-20b." + }, + { + "left": "issue:43819", + "right": "issue:43927", + "accept": false, + "reason": "DAC forward/STE mismatch is unrelated to DiaConfig save/load token ID corruption." + }, + { + "left": "issue:43262", + "right": "issue:44811", + "accept": false, + "reason": "Audio chat-template sampling-rate default bug is unrelated to Whisper batch_decode skip_special_tokens handling." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "Importing GenerationMixin and Jupyter custom-model initialization are separate breakages." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "Packed-weight unpacking during accelerate loading is unrelated to OmDet processor output-shape mismatch." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba is not the same as MXFP4 loading fallback in git-oss-20b." + }, + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "Tokenizer add_special_tokens regression is distinct from Kimi-K2.5 codec/warning behavior." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "Both involve loading/performance, but one is FSDP CPU RAM efficient loading and the other is mmap on Strix Halo OOM; not the same bug." + }, + { + "left": "issue:43329", + "right": "issue:44479", + "accept": false, + "reason": "Undefined video-branch helper variables are a code bug; the other is a broader v5.3 video regression across Qwen models." + }, + { + "left": "issue:43531", + "right": "issue:45042", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior has no overlap with PIL backend torchvision dependency regression." + }, + { + "left": "issue:43257", + "right": "issue:43931", + "accept": false, + "reason": "MoE weights not converted under accelerate+deepspeed is different from Qwen3-VL-30B shape-mismatch loading." + }, + { + "left": "issue:36010", + "right": "issue:36331", + "accept": false, + "reason": "GenerationMixin import path breakage is unrelated to CustomTrainer.compute_loss signature mismatch." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Undefined video token-count helpers differ from batched video processor output-shape issues." + }, + { + "left": "issue:43257", + "right": "issue:43756", + "accept": false, + "reason": "Qwen3 MoE loading/conversion failure is unrelated to Smollm3 RoPE-layer drop." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Blip2 dtype staying float32 is a different dtype bug than SwitchTransformers/TimmWrapperModel bfloat16 mismatches." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "gguf_file tokenizer/model loading failure is unrelated to Apple Silicon caching_allocator_warmup TypeError." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "RTDetrV2 output drift is not the same as Qwen3.5 save_pretrained visual-encoder key regression." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "Qwen3 auto device-map CUDA assert on A800 and Qwen3OmniMoe video ValueError are different failures." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "Missing TensorFlow in extras install and Python 3.14 import failure are unrelated environment/package issues." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": false, + "reason": "The former is a broad video-input regression across several Qwen models; the latter is a specific vision_position_ids bug in qwen2.5-vl." + }, + { + "left": "issue:44987", + "right": "issue:45538", + "accept": false, + "reason": "Loading physical-intelligence/fast on transformers>=5.1.0 and CLIPTokenizer model_max_length are unrelated." + }, + { + "left": "issue:43927", + "right": "issue:44912", + "accept": false, + "reason": "DiaConfig token-ID persistence and MXFP4 quantization fallback affect different code paths and model families." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Qwen3OmniMoe video processing ValueError is distinct from Qwen3-vl-embedding StopIteration." + }, + { + "left": "issue:43317", + "right": "issue:45538", + "accept": false, + "reason": "dequantized model offload/device_map=auto failure is unrelated to CLIPTokenizer max-length handling." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure and MobileLLM key error are different model-loading bugs." + }, + { + "left": "issue:43540", + "right": "issue:44479", + "accept": false, + "reason": "A specific Qwen3OmniMoe video ValueError is not the same as the broader v5.3 video regression across multiple Qwen models." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "init_empty_weights unexpected argument error is unrelated to the torchvision requirement regression in image processors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 15, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:33357", + "issue:34567", + "issue:34689", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:42175", + "issue:42915", + "issue:43452", + "issue:43493", + "issue:43526", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43646", + "issue:43688", + "issue:43742", + "issue:43749", + "issue:43756", + "issue:43782", + "issue:43824", + "issue:43906", + "issue:43994", + "issue:44062", + "issue:44291", + "issue:44484", + "issue:44488", + "issue:44514", + "issue:44561", + "issue:44589", + "issue:44704", + "issue:44792", + "issue:44821", + "issue:44898", + "issue:44964", + "issue:44991", + "issue:45042", + "issue:45081", + "issue:45137", + "issue:45290", + "issue:45325", + "issue:45375", + "issue:45381", + "issue:45405", + "issue:45479", + "issue:45538" + ], + "soft_pairs": [ + "issue:33357|issue:34567", + "issue:45137|issue:45405", + "issue:44062|issue:44991", + "issue:43906|issue:45381", + "issue:42915|issue:45538", + "issue:42915|issue:44821", + "issue:42175|issue:43824", + "issue:42175|issue:44488", + "issue:43526|issue:45479", + "issue:38175|issue:44898", + "issue:43582|issue:43782", + "issue:28282|issue:34689", + "issue:43646|issue:43749", + "issue:43493|issue:43994", + "issue:43742|issue:44291", + "issue:36010|issue:42175", + "issue:43643|issue:45375", + "issue:28282|issue:29127", + "issue:43756|issue:45325", + "issue:44484|issue:44704", + "issue:34567|issue:36331", + "issue:44062|issue:45081", + "issue:43452|issue:44488", + "issue:44291|issue:44821", + "issue:44792|issue:44964", + "issue:43688|issue:43749", + "issue:44514|issue:45290", + "issue:43452|issue:43577", + "issue:43742|issue:44589", + "issue:43643|issue:44561", + "issue:44514|issue:45381", + "issue:45042|issue:45081" + ], + "prepared_review_unit_hash": "beb48c37b6c02c005b90f9cdecc8251348a8ff7d32cb9792226d58c9399aa3b2", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37451, + "estimated_input_tokens": 9363, + "estimated_eval_tokens": 18982 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated issue reports, mostly around distinct model-loading, tokenizer, or backend regressions. The only strong duplicate-looking pair is the Qwen2.5-VL apply_chat_template crash reports (44514/45290); the SigLIP2 pair is a plausible same-bug match but less certain. The rest look like false-positive similarity matches.", + "confidence": 0.66, + "canonical_issue_reason": "Issue 44514 is the better canonical representative for the apply_chat_template crash pair: both 44514 and 45290 describe crashes in the Qwen2.5-VL chat-template/tokenization path, and 44514 is the broader repro while 45290 is a more specific trigger.", + "canonical_pr_reason": null, + "best_issue_reason": "44514 is the clearest issue to keep as canonical because it directly names the shared apply_chat_template crash and has the broader repro description.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "Different subsystems and failures: a CLIP Mac bus error vs TrainerState token-count tracking." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque errors and a PEFT version bump are unrelated." + }, + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "Both involve tokenizer/model loading, but the reported failures and likely causes differ." + }, + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "Different Qwen2.5-VL regressions affecting different behaviors and code paths." + }, + { + "left": "issue:42915", + "right": "issue:45538", + "accept": false, + "reason": "FP8 MoE loading and CLIP tokenizer max length are unrelated." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "Different model/load paths; no clear shared underlying bug." + }, + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Backend packaging vs missing model-class import are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "Dependency packaging and a specific model load failure are not the same bug." + }, + { + "left": "issue:43526", + "right": "issue:45479", + "accept": false, + "reason": "Beit label reduction and sequence-classification zero loss are different problems." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "Same broad vision area, but different models and failure modes." + }, + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Apple Silicon allocator TypeError and Qwen3VL weight_only loading error are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Missing PyTorch ImportError and Llama 3.2 Vision load breakage are different issues." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Custom model initialization breakage and FSDP CPU RAM-efficient loading are separate changes." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Both report SigLIP2 correctness regressions on the same checkpoint, so they plausibly describe the same underlying model bug." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "A MobileLLM key error and an init_empty_weights TypeError are not the same failure." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "GenerationMixin import failure and TensorFlow backend packaging are unrelated." + }, + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both mention missing config fields, but the model/config contexts differ too much to treat as one bug." + }, + { + "left": "issue:28282", + "right": "issue:29127", + "accept": false, + "reason": "Missing PyTorch dependency error vs LayoutLMv3 message clarity are unrelated." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "RoPE layer dropping and Qwen2.5-VL temporal position scaling are distinct model-specific bugs." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "save_pretrained shard sizing and AutoProcessor kwargs propagation are different code paths." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "TrainerState token counting and compute_loss signature compatibility are separate Trainer issues." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "AddedToken special-kw handling and Mistral regex patching are unrelated tokenizer bugs." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "gguf_file loading breakage and a specific sleng-bert load failure are not clearly the same bug." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "init_empty_weights TypeError and loading AutoImageProcessor from URL are unrelated." + }, + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Janus test failure and Phi-4 multimodal model load failure are different multimodal issues." + }, + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Auxiliary-loss normalization and FSDP loading are unrelated." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": true, + "reason": "Both are apply_chat_template/tokenize crashes in the Qwen2.5-VL path; the triggers differ, but they look close enough to be the same underlying bug." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "gguf_file loading and BLIP-2 dtype handling are different bugs." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "Model-load key errors and missing Float8 storage errors are unrelated." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Missing config fields and removal of is_torch_fx_available are different breakages." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "An apply_chat_template crash and a position-id scaling bug are unrelated." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "Image-processor torchvision dependency regression and Mistral regex patching are unrelated." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The writeup overstates the evidence in the packet. In particular, issue 44514 vs 45290 are not shown as exact duplicates by the titles alone: one is about batched input with `padding=False`, the other about assistant tool-call messages with no content. The canonical-issue reason also claims they match \"almost verbatim\" and specifies a concrete shared failure mode that is not grounded in the packet text. The SigLIP2 pair may be plausible, but the overall summary and the accepted soft-edge verdicts should be phrased more conservatively." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 16, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:24643", + "issue:28282", + "issue:29127", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:35141", + "issue:38175", + "issue:40444", + "issue:41720", + "issue:41762", + "issue:43122", + "issue:43295", + "issue:43452", + "issue:43525", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43653", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43819", + "issue:43906", + "issue:43927", + "issue:44361", + "issue:44442", + "issue:44464", + "issue:44466", + "issue:44479", + "issue:44554", + "issue:44568", + "issue:44625", + "issue:44743", + "issue:44811", + "issue:44821", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45005", + "issue:45216", + "issue:45356", + "issue:45440" + ], + "soft_pairs": [ + "issue:43697|issue:45216", + "issue:43531|issue:44821", + "issue:44625|issue:44877", + "issue:43452|issue:44843", + "issue:43819|issue:44811", + "issue:43720|issue:44898", + "issue:43688|issue:45440", + "issue:43653|issue:44568", + "issue:41762|issue:44936", + "issue:44464|issue:44743", + "issue:44466|issue:45005", + "issue:43295|issue:45356", + "issue:24643|issue:29127", + "issue:41720|issue:43906", + "issue:40444|issue:44811", + "issue:43525|issue:43643", + "issue:44361|issue:44442", + "issue:43577|issue:43697", + "issue:38175|issue:43295", + "issue:43653|issue:44361", + "issue:44843|issue:44991", + "issue:44479|issue:44936", + "issue:28282|issue:32090", + "issue:28282|issue:33357", + "issue:28282|issue:35141", + "issue:43122|issue:43906", + "issue:44977|issue:45356", + "issue:30064|issue:33666", + "issue:44464|issue:44977", + "issue:43531|issue:43742", + "issue:43653|issue:43927", + "issue:43577|issue:44554" + ], + "prepared_review_unit_hash": "94ed7c71b5d7e2361920365a8b2a93d9a93b88c255acd0f0d5b410e3b6333623", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37547, + "estimated_input_tokens": 9387, + "estimated_eval_tokens": 19030 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the soft pairs mostly share only broad Transformer-style themes (tokenizers, configs, model loading, regressions), but they describe different models, code paths, and failure modes. I would not merge any of the proposed pairs as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": "No single issue cleanly represents the cluster; the items span unrelated bugs across tokenization, model loading, training, vision, and generation.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global duplicate target because the cluster lacks a single underlying bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Both are regressions, but one is RTDetr output mismatch and the other is Qwen3.5 save_pretrained corruption; different code paths and fixes." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3-MoE sliding window recurrence bug vs AutoImageProcessor URL loading; unrelated subsystems." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Different config propagation issues for Qwen3.5 versus strict Granite speech config loading." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "GGUF tokenizer/model loading breakage is not the same as Mistral regex/offline model_info() behavior." + }, + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "DAC latent reconstruction mismatch is unrelated to Whisper batch_decode skip_special_tokens handling." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "BitNet packed-weight loading and Perceiver interpolation are distinct model-specific bugs." + }, + { + "left": "issue:43688", + "right": "issue:45440", + "accept": false, + "reason": "Auxiliary loss normalization in MoE models is unrelated to DeepSeekV3 native-vs-remote divergence." + }, + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "BigBirdTokenizer special-token registration is unrelated to mdeberta-v3 BOS/EOS add_special_tokens behavior." + }, + { + "left": "issue:41762", + "right": "issue:44936", + "accept": false, + "reason": "Gemma3 ZeRO-3 load failure and trainer.evaluate() after train() are different failure modes." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "Chunked generation with compiled forward is not the same as Qwen3.5 recurrent-state reset in modular_qwen3_5.py." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": false, + "reason": "lm_head serialization/tied-weights issue and translation-model tied-weights issues are related only at a broad level, not the same bug." + }, + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "Custom model processor/tokenizer regression differs from Kimi-K2.5 tokenizer codec/fix_mistral_regex regression." + }, + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "DeepSpeed training weight-dimension error is not the same as LayoutLMv3 box validation messaging." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "Qwen3 auto device mapping cuda assert and an isolated reproduction of another issue are not the same concrete bug." + }, + { + "left": "issue:40444", + "right": "issue:44811", + "accept": false, + "reason": "IterableDataset multi-image finetuning failure is unrelated to Whisper batch_decode special-token decoding." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config is distinct from trust_remote_code returning incomplete AutoConfig fields." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "MLukeTokenizer task AttributeError and FastSpeech2ConformerTokenizer loading failure are different tokenizer implementations." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "BLIP2 dtype preservation and RTDetr output differences are unrelated model behavior bugs." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "SigLIP2 zero probabilities and processor/tokenizer regression involve different models and symptoms." + }, + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and MLukeTokenizer AttributeError are separate tokenizer bugs." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "Offline model_info() behavior in _patch_mistral_regex is not the same as tokenizer loading failure for est-roberta." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Qwen video-input regression and trainer lifecycle failure are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "Generic PyTorch import failure and Trainer _gpu_broadcast_one NoneType error are clearly different." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "Missing PyTorch import and MacOS bus error with community CLIP are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "PyTorch missing import versus embedding reinitialization after resizing are different layers of failure." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "Tokenizer version drift and an isolated reproduction of another issue are not the same bug." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Qwen3.5 flash-attention generation regression and Kimi-K2.5 tokenizer regression involve different model families and paths." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are unrelated vision/training issues." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency is not the same as Qwen3.5 flash-attention generation failure." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3-MoE sliding-window state handling is unrelated to MobileLLM-125M key error on load." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and DiaConfig custom token IDs save/load corruption are different tokenizer/config issues." + }, + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "BLIP2 dtype loading bug and MPS attention correctness issue are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 17, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:32090", + "issue:33666", + "issue:34567", + "issue:36010", + "issue:39692", + "issue:41720", + "issue:43012", + "issue:43257", + "issue:43262", + "issue:43278", + "issue:43299", + "issue:43329", + "issue:43334", + "issue:43452", + "issue:43493", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43577", + "issue:43606", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43742", + "issue:43756", + "issue:43825", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:44361", + "issue:44451", + "issue:44464", + "issue:44479", + "issue:44610", + "issue:44625", + "issue:44821", + "issue:44849", + "issue:44913", + "issue:44964", + "issue:44977", + "issue:45084", + "issue:45200", + "issue:45229", + "issue:45325", + "issue:45357", + "issue:45405" + ], + "soft_pairs": [ + "issue:43329|issue:45325", + "issue:44479|issue:44625", + "issue:43012|issue:43577", + "issue:41720|issue:44464", + "issue:43550|issue:44361", + "issue:43504|issue:43720", + "issue:43452|issue:44451", + "issue:43504|issue:43550", + "issue:32090|issue:33666", + "issue:32090|issue:34567", + "issue:43262|issue:45084", + "issue:43504|issue:44610", + "issue:43278|issue:43688", + "issue:44849|issue:45357", + "issue:45200|issue:45229", + "issue:43334|issue:43572", + "issue:43756|issue:44913", + "issue:43540|issue:43716", + "issue:43493|issue:43825", + "issue:43742|issue:44821", + "issue:33666|issue:36010", + "issue:43525|issue:43572", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:43688|issue:43873", + "issue:43278|issue:43844", + "issue:43540|issue:44977", + "issue:43720|issue:44361", + "issue:43257|issue:43299", + "issue:44964|issue:45405", + "issue:39692|issue:43493", + "issue:43329|issue:43606" + ], + "prepared_review_unit_hash": "ef75676425bce8efaec28f2219f33b26c1e20460a0e842dbe7cbb5e499dddd27", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37319, + "estimated_input_tokens": 9330, + "estimated_eval_tokens": 18916 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose semantic grouping of unrelated Transformers regressions (config-loading, multimodal/video bugs, dtype/offload issues, and compile/runtime errors). None of the soft pairs look like exact duplicate reports; at best they are related by broad theme, not the same concrete bug.", + "confidence": 0.41, + "canonical_issue_reason": "No single exact duplicate stands out, but issue 43334 is the best anchor because it cleanly represents the recurring v5 config-loading regression where a model-specific config is missing a pad token field and loading fails with an AttributeError.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43334 is the most representative issue for the dominant configuration-regression theme in this set: it is specific, reproducible, and clearly tied to a loader breakage in dev versions.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen VL/video-related, but they describe different bugs: undefined video-token helpers in multimodal token counting vs wrong still-image temporal position scaling." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "One is a video-input regression across several Qwen VL models; the other is a Qwen3.5 config propagation bug for num_labels. Different failure modes and fixes." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "Both mention dtype/precision, but one is a PyTorch warning during compile and the other is model/qformer staying float32 after load. Not the same code-path problem." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "Different problems: CUDA assert with auto device mapping for Qwen3 vs inconsistent outputs from chunked generation with compiled forward." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "Unrelated subsystems: SDPA/torch.compile failure in Bamba vs tokenizer AttributeError on tasks in MLukeTokenizer." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Both are loading-related, but one is a legacy field in Beit semantic segmentation config and the other is packed-weight unpacking for BitNet during accelerate loading." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "Both concern model loading, but the concrete bugs differ: gguf_file handling in tokenizer/model loaders vs failure to load a specific pretrained model due to changed behavior." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Different models and different failures: legacy config field loading vs compile/SDPA runtime failure." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "Trainer broadcast TypeError and Qwen2-VL multi-GPU training are too broad and do not describe the same underlying issue." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "Trainer broadcast NoneType error and TrainerState token counter not updating are unrelated Trainer bugs." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "Audio chat-template sampling-rate defaulting and template-node compilation failure are different components and symptoms." + }, + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "One is a legacy preset-loading issue in BEiT; the other is an OmDet-Turbo processor/model input-size mismatch." + }, + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "Both involve training/inference behavior, but one is dtype drift between training and evaluate while the other is auxiliary loss normalization in MoE models." + }, + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "Qwen3.5 hidden-states bug and incorrect visual encoder keys on save_pretrained are distinct regressions." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "Different Gemma 4 issues: text-only mm_token_type_ids defaults vs multi-GPU inference OOM." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Both are missing-pad-token-field config regressions, but they affect different config classes and attribute names; the evidence is too broad to treat them as the same bug." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Smollm3 RoPE-layer behavior and GPTNeoX rotary_pct reload behavior are separate architecture/config issues." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "Different multimodal bugs: Qwen3OmniMoe video processing ValueError vs Mistral-3 image preprocessor/model dtype mismatch." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "SigLIP2 HF-vs-JAX discrepancy and pipeline translation-task error message are unrelated." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "MobileLLM key error on load and AutoImageProcessor-from-URL failure are different loader paths with different root causes." + }, + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Qwen2-VL multi-GPU training and GenerationMixin import failure are not the same underlying bug." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Both are config attribute regressions after v5, but they involve different models and fields; not enough evidence of one shared fix." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "Packed-weight unpacking during accelerate loading and DiaConfig custom token IDs lost on save/load are different bugs." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU offload device mismatch and DiaConfig save/load token ID loss are unrelated." + }, + { + "left": "issue:43688", + "right": "issue:43873", + "accept": false, + "reason": "Aux-loss normalization in MoE models and quantization/offloading behavior are different change areas." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "Embedding dtype drift in evaluate and abnormal gradient growth under DeepSpeed ZeRO-3 are different training issues." + }, + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Both concern Qwen3.5-family behavior, but one is video-input processing and the other is flash-attention generation correctness." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "BitNet packed-weight loading and MLukeTokenizer task AttributeError are unrelated." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both involve Qwen3/MoE loading, but one is accelerate+deepspeed weight conversion and the other is a dev-version loading regression in Qwen3VL MoE. Too different to merge." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "Model loading failure and a version-pin release issue are not the same bug or change." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "SigLIP2 doc example errors and HF-vs-JAX implementation discrepancy are different issues; one is documentation/example correctness, the other is model parity." + }, + { + "left": "issue:43329", + "right": "issue:43606", + "accept": false, + "reason": "Video token counting bug and Bark CPU offload device mismatch are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 18, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:15354", + "issue:33357", + "issue:33666", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:43257", + "issue:43278", + "issue:43295", + "issue:43493", + "issue:43504", + "issue:43540", + "issue:43572", + "issue:43575", + "issue:43582", + "issue:43606", + "issue:43646", + "issue:43653", + "issue:43688", + "issue:43701", + "issue:43746", + "issue:43756", + "issue:43824", + "issue:43844", + "issue:43927", + "issue:43994", + "issue:44060", + "issue:44112", + "issue:44291", + "issue:44361", + "issue:44423", + "issue:44466", + "issue:44488", + "issue:44493", + "issue:44521", + "issue:44554", + "issue:44589", + "issue:44734", + "issue:44792", + "issue:44843", + "issue:44977", + "issue:45084", + "issue:45200", + "issue:45290", + "issue:45405", + "issue:45446", + "issue:45520" + ], + "soft_pairs": [ + "issue:44488|issue:44843", + "issue:44589|issue:45520", + "issue:33666|issue:36331", + "issue:43493|issue:43844", + "issue:43756|issue:44060", + "issue:43646|issue:44466", + "issue:44423|issue:44734", + "issue:43688|issue:43746", + "issue:44521|issue:45290", + "issue:43824|issue:44589", + "issue:33357|issue:33666", + "issue:39692|issue:43295", + "issue:43994|issue:44493", + "issue:43504|issue:43606", + "issue:43504|issue:43653", + "issue:43575|issue:45405", + "issue:43572|issue:44291", + "issue:43927|issue:44361", + "issue:43278|issue:43701", + "issue:44792|issue:45405", + "issue:43257|issue:45405", + "issue:43606|issue:44112", + "issue:15354|issue:38175", + "issue:45084|issue:45520", + "issue:44554|issue:45446", + "issue:43540|issue:45200", + "issue:43582|issue:44977" + ], + "prepared_review_unit_hash": "60577a898ffde87ef031e4d79b015b747d8b15dfea12936bb0d1afc557ba0e81", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 27, + "serialized_chars": 35425, + "estimated_input_tokens": 8857, + "estimated_eval_tokens": 17970 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous. A few pairs share a subsystem or API surface, but none look like the same underlying bug/change, so I reject all soft edges. No PRs are present.", + "confidence": 0.89, + "canonical_issue_reason": "Issue 44521 is the closest representative of the only coherent subtopic in the set (multimodal/chat-template behavior) and is an active user-facing regression, but it is not a duplicate of the other items.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44521 is the best umbrella issue because it is concrete, current, and in a broadly used path; the rest of the set is too mixed to form a better canonical duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Both involve loading/tokenizer-related behavior, but one is a model-loading failure and the other is an offline-mode tokenizer patch bug; different failure modes and code paths." + }, + { + "left": "issue:44589", + "right": "issue:45520", + "accept": false, + "reason": "These are unrelated import/storage/type errors in different areas; no shared bug or change." + }, + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Multi-GPU training for Qwen2-VL and a CustomTrainer signature regression are unrelated." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 implementation discrepancy and ZeRO-3 gradient growth are different correctness issues." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "Both mention model internals, but one is RoPE layer dropping and the other is a tied-weights warning involving Qwen3-Next; not the same bug." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Both touch serialization/tied weights, but one is custom model initialization breakage and the other is device-dependent lm_head serialization; different concrete problems." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Both are server crashes, but one is a multimodal string/.to error and the other is KV-cache tensor indexing; different code paths." + }, + { + "left": "issue:43688", + "right": "issue:43746", + "accept": false, + "reason": "Auxiliary-loss normalization and PEFT checkpoint loading are unrelated." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both use apply_chat_template, but one is wrong assistant mask output for multimodal inputs and the other is a tool-call/no-content crash; distinct bugs." + }, + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "An import error for Qwen2.5-VL and a missing Float8 storage type are unrelated." + }, + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "MacOS bus error on a CLIP model and Qwen2-VL multi-GPU training are different issues." + }, + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "A SigLIP2 docs example bug and a regression in processor/tokenizer access are not the same underlying problem." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "Nonsensical SigLIP2 outputs and unexpected position-id keys point to different model/config issues." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "Legacy-field load failure for Beit and a CPU offload device mismatch in bark-small are unrelated." + }, + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Legacy field loading for Beit and BigBirdTokenizer special-token registration are different bugs." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "Tensor-parallel OOM for a model load and a PEFT version bump are not the same issue." + }, + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "Missing pad_token_idx in a config and an init_empty_weights/_is_hf_initialized argument mismatch are different compatibility problems." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "Config token-ID persistence and MLukeTokenizer AttributeError are unrelated." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "Embedding dtype drift between train/eval and resume_from_checkpoint key mismatch are different regressions." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "A janus image-generation test failure and a PEFT version pin issue are unrelated." + }, + { + "left": "issue:43257", + "right": "issue:45405", + "accept": false, + "reason": "Qwen3 MOE weight conversion under accelerate/deepspeed and a dependency-version bump are not the same bug." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Both are CI/device-mismatch themed, but one is real CPU offload mismatch and the other is a stale GraniteSpeech test; not duplicates." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "JIT scripting/export failure and zero probabilities on SigLIP2 are unrelated." + }, + { + "left": "issue:45084", + "right": "issue:45520", + "accept": false, + "reason": "Template-node compilation error and flash_attn import failure on Python 3.13 are different issues." + }, + { + "left": "issue:44554", + "right": "issue:45446", + "accept": false, + "reason": "MPS attention correctness with mismatched head dims and a PyTorch version check bug for AuxRequest import are unrelated." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Qwen3OmniMoe video processing error and Gemma 4 mm_token_type_ids defaulting are different multimodal bugs." + }, + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "AppleSilicon caching_allocator_warmup TypeError and Qwen3.5 flash-attention generation failure are not the same underlying defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 19, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:37428", + "issue:38175", + "issue:39401", + "issue:40990", + "issue:43232", + "issue:43278", + "issue:43295", + "issue:43334", + "issue:43388", + "issue:43450", + "issue:43493", + "issue:43525", + "issue:43526", + "issue:43572", + "issue:43575", + "issue:43701", + "issue:43824", + "issue:43844", + "issue:43856", + "issue:43873", + "issue:43957", + "issue:43975", + "issue:43994", + "issue:44315", + "issue:44360", + "issue:44442", + "issue:44464", + "issue:44466", + "issue:44514", + "issue:44521", + "issue:44554", + "issue:44561", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44849", + "issue:44898", + "issue:44936", + "issue:44964", + "issue:44987", + "issue:45072", + "issue:45127", + "issue:45198", + "issue:45200", + "issue:45325", + "issue:45356", + "issue:45412" + ], + "soft_pairs": [ + "issue:45072|issue:45198", + "issue:43232|issue:44849", + "issue:43844|issue:44964", + "issue:39401|issue:44568", + "issue:44315|issue:44849", + "issue:37428|issue:43957", + "issue:44554|issue:45356", + "issue:43334|issue:43525", + "issue:43295|issue:44610", + "issue:43493|issue:43701", + "issue:45200|issue:45325", + "issue:44898|issue:45412", + "issue:43873|issue:44464", + "issue:44466|issue:45127", + "issue:40990|issue:44625", + "issue:43388|issue:43526", + "issue:44514|issue:44521", + "issue:43844|issue:44936", + "issue:37428|issue:43824", + "issue:43575|issue:44464", + "issue:43572|issue:44987", + "issue:43295|issue:43450", + "issue:43572|issue:44589", + "issue:44442|issue:45198", + "issue:43824|issue:43975", + "issue:43295|issue:43873", + "issue:38175|issue:45412", + "issue:43278|issue:43856", + "issue:43295|issue:43994", + "issue:44360|issue:44561" + ], + "prepared_review_unit_hash": "9ccc3ce2f4f2b3714fe60080e04f08dc563b68bb5843857dfb2ddc3070e85c87", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36888, + "estimated_input_tokens": 9222, + "estimated_eval_tokens": 18700 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is very heterogeneous: it mixes unrelated tokenizer, multimodal, loading, training, and memory issues. None of the soft pairs looks like the same underlying bug or change, so I rejected all of them.", + "confidence": 0.96, + "canonical_issue_reason": "issue:44521 is the best cluster representative: it is active, well-discussed, and describes a concrete regression in a core multimodal API with clear repro details.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 has the strongest combination of impact, discussion, and reproducibility among this set, even though it does not duplicate the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "Different failures: dtype mismatch in bfloat16 inference vs save_pretrained/tokenization problems in Wav2Vec2." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "Different code paths: generation kwargs sync issue vs Qwen3.5 hidden-states output bug." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "Unrelated problems: ZeRO-3 gradient growth vs model-loading failure for Phi-4 multimodal." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer bugs, but they affect different models and behaviors: offset mapping vs missing BOS/EOS on add_special_tokens." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Liger kernel not applied during model_init is unrelated to Qwen3.5 output_hidden_states behavior." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "ImportError for a flash-attention helper is not the same as torch.device('meta') model-loading breakage." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "MPS attention correctness issue is unrelated to Kimi-K2.5 tokenizer codec/regression handling." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": false, + "reason": "Both mention missing pad_token_id, but they are different model families and distinct config objects, not the same bug." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Processor.tokenizer/images-to-tokenizer regression is unrelated to OmDet-Turbo image-size mismatch." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "SigLIP2 implementation discrepancy is unrelated to resume_from_checkpoint key mismatch." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Gemma 4 token-type defaults and Qwen2.5-VL rope-index scaling are different multimodal config bugs." + }, + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Perceiver interpolation failure is unrelated to RT-DETR memory not being released." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization/offloading behavior is a different problem from chunked generation with compiled forward." + }, + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "lm_head serialization inconsistency is unrelated to LoRA merge collapse with extended vocabulary." + }, + { + "left": "issue:40990", + "right": "issue:44625", + "accept": false, + "reason": "High perplexity on gpt-oss-20b is unrelated to Qwen3.5 num_labels propagation." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Both involve labels, but one drops tuple labels in gather_for_metrics while the other reduces BEiT labels incorrectly." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Same API area, but different symptoms: batched padding=False crash vs all-zero assistant masks for multimodal inputs." + }, + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "ZeRO-3 gradient inflation and trainer.evaluate-after-train failure are unrelated training issues." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Different import/loading failures: flash-attention helper ImportError vs missing Qwen2_5_VLForConditionalGeneration export." + }, + { + "left": "issue:43575", + "right": "issue:44464", + "accept": false, + "reason": "OOM on loading Qwen2-57B with tp is not the same as chunked generation inconsistency with compiled forward." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "StableLmConfig pad_token_idx regression is unrelated to loading physical-intelligence/fast on transformers>=5.1.0." + }, + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Custom processor/tokenizer regression is not the same as batched video processor shape handling." + }, + { + "left": "issue:43572", + "right": "issue:44589", + "accept": false, + "reason": "Missing pad_token_idx in StableLmConfig is unrelated to Float8 storage lookup failure." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "AutoTokenizer loading failure for FastSpeech2ConformerTokenizer is unrelated to Wav2Vec2 save/tokenization failure." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Qwen2_5_VL import failure is unrelated to deepseek-coder detokenization regression." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "Processor/tokenizer regression and quantization offloading bug are different subsystems and different failures." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 zero-probabilities bug is unrelated to RT-DETR memory release behavior." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Embedding dtype drift in eval is not the same as Qwen3 MoE training memory inefficiency." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Custom processor/tokenizer regression is unrelated to SigLIP2 nonsensical outputs." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "DSA indexer/ReLU discussion is unrelated to removal of is_torch_fx_available breaking trust_remote_code models." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 20, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:37428", + "issue:38175", + "issue:39692", + "issue:42915", + "issue:43262", + "issue:43278", + "issue:43335", + "issue:43404", + "issue:43425", + "issue:43450", + "issue:43493", + "issue:43526", + "issue:43550", + "issue:43575", + "issue:43606", + "issue:43611", + "issue:43653", + "issue:43819", + "issue:43824", + "issue:43825", + "issue:43827", + "issue:43901", + "issue:43931", + "issue:44112", + "issue:44351", + "issue:44448", + "issue:44462", + "issue:44493", + "issue:44561", + "issue:44568", + "issue:44610", + "issue:44743", + "issue:44779", + "issue:44849", + "issue:44855", + "issue:44857", + "issue:44991", + "issue:45003", + "issue:45083", + "issue:45127", + "issue:45200", + "issue:45245", + "issue:45290", + "issue:45310", + "issue:45381", + "issue:45478" + ], + "soft_pairs": [ + "issue:43827|issue:43901", + "issue:44849|issue:45003", + "issue:38175|issue:43493", + "issue:42915|issue:43824", + "issue:43653|issue:44112", + "issue:44568|issue:44991", + "issue:43450|issue:43493", + "issue:45127|issue:45245", + "issue:45290|issue:45381", + "issue:43819|issue:45083", + "issue:43450|issue:43575", + "issue:44462|issue:44561", + "issue:44561|issue:44779", + "issue:44610|issue:44857", + "issue:43819|issue:44112", + "issue:43262|issue:43450", + "issue:44448|issue:44743", + "issue:43606|issue:45083", + "issue:43278|issue:43825", + "issue:37428|issue:43425", + "issue:29942|issue:32090", + "issue:45310|issue:45478", + "issue:43819|issue:44493", + "issue:43611|issue:43931", + "issue:39692|issue:43526", + "issue:43335|issue:43550", + "issue:37428|issue:44351", + "issue:43450|issue:43526", + "issue:44855|issue:45083", + "issue:45200|issue:45381", + "issue:43404|issue:45200", + "issue:39692|issue:44493", + "issue:43335|issue:43653", + "issue:44448|issue:44779" + ], + "prepared_review_unit_hash": "97499c222d4baee33989bbca5f48ebb47a1e9a6d038cbaf1b4b0e10d6e95b63d", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37953, + "estimated_input_tokens": 9489, + "estimated_eval_tokens": 19234 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly heterogeneous issue set. The only soft-edge duplicate that is well supported from the packet is the Qwen3.5 MoE from_pretrained pair (issue:45310 / issue:45478); the other candidate pairs look like broader thematic similarities rather than the same underlying bug.", + "confidence": 0.94, + "canonical_issue_reason": "issue:45310 is the best canonical issue because it is the earlier report of the same Qwen3.5 MoE from_pretrained failure and has the same concrete title/scope as issue:45478.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45310 is the best representative issue for this duplicate pair: it states the concrete failure directly, predates issue:45478, and the packet supports treating 45478 as the later near-identical report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "Both are v5/docs-related, but they describe different concrete problems: leftover pipeline references versus return_all_scores docs/behavior." + }, + { + "left": "issue:44849", + "right": "issue:45003", + "accept": false, + "reason": "Different models and different code paths; one is a Qwen3.5 hidden-states bug, the other is unsafe sys.modules access." + }, + { + "left": "issue:38175", + "right": "issue:43493", + "accept": false, + "reason": "Both mention SigLIP2, but the reported failures are different: zero probabilities versus HF/JAX implementation discrepancy." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "Different model families and failure modes; FP8 config failure is unrelated to the Qwen2.5-VL import error." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and GraniteSpeech CI device override are unrelated bugs." + }, + { + "left": "issue:44568", + "right": "issue:44991", + "accept": false, + "reason": "These are separate tokenizer regressions affecting different models and different behaviors." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "A batched video-shape bug is not the same as the SigLIP2 implementation mismatch." + }, + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "LoRA merge/vocab collapse and the category-count runtime error are different defects." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Different multimodal bugs: chat-template tool-call handling versus Qwen2.5-VL video position ids." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "DAC latent/STE mismatch and qwen3_omni_moe feature-length behavior do not describe the same code-path problem." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Video processor batch-shape handling and TP OOM on model load are unrelated." + }, + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "Tokenizer.json loading and removal of is_torch_fx_available affect different subsystems." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 regressions, but one is trust_remote_code compatibility and the other is Deepseek tokenizer correctness." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "OmDet-Turbo processor sizing and LwDetrImageLoss AMP crash are different problems." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "Audio/DAC latent handling is unrelated to GraniteSpeech CI device override behavior." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Different processor bugs: audio sampling-rate defaulting versus batched video output shape." + }, + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Pegasus v4/v5 output difference and qwen3_5 recurrent-state reset are unrelated." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "CPU offload device mismatch is not the same as qwen3_omni_moe feature-length behavior." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "Embedding dtype regression and pipeline error-message behavior are different issues." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Two version-related errors, but with different missing symbols / incompatibility surfaces." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Flash Attention 2 test failures and a Trainer _gpu_broadcast_one NoneType error are unrelated." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Same title and same concrete Qwen3.5 MoE from_pretrained failure; 45478 reads as a later duplicate report of 45310." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "DAC latent/STE mismatch and position-id warnings across many models are different bugs." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "base_model_prefix loading breakage and Qwen3-VL weight-shape mismatch are distinct model-loading problems." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "SigLIP2 documentation/model-processor issues and BEiT fast label reduction are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "SwitchTransformers sparse-layer config and Bamba SDPA torch.compile failure are different defects." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and likely different regressions." + }, + { + "left": "issue:43450", + "right": "issue:43526", + "accept": false, + "reason": "Video batch-shape handling and BEiT label reduction are unrelated." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "Python 3.13 torch.jit parsing and qwen3_omni_moe helper behavior are unrelated." + }, + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Gemma 4 mm_token_type_ids defaulting and Qwen2.5-VL video position ids are different multimodal bugs." + }, + { + "left": "issue:43404", + "right": "issue:45200", + "accept": false, + "reason": "Mistral3 weight tying and Gemma 4 text-only fine-tuning inputs are unrelated." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 example errors and widespread position-id warnings are not the same underlying issue." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "SwitchTransformers sparse-layer creation and BigBirdTokenizer special-token registration are unrelated." + }, + { + "left": "issue:44448", + "right": "issue:44779", + "accept": false, + "reason": "Pegasus generation differences and Deepseek tokenizer regressions are distinct." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The main duplicate call is plausible, but the summary is not fully grounded: issue:45310 is not open in the packet (it is closed), so the canonical-issue rationale overstates the evidence. Please revise any status claims and keep the duplicate verdict limited to what the packet supports." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 21, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:25251", + "issue:29127", + "issue:29942", + "issue:32090", + "issue:33357", + "issue:34689", + "issue:35141", + "issue:37428", + "issue:38175", + "issue:40444", + "issue:41720", + "issue:43065", + "issue:43066", + "issue:43278", + "issue:43329", + "issue:43335", + "issue:43504", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43701", + "issue:43720", + "issue:43723", + "issue:43873", + "issue:43976", + "issue:43994", + "issue:44112", + "issue:44292", + "issue:44448", + "issue:44451", + "issue:44462", + "issue:44466", + "issue:44493", + "issue:44554", + "issue:44568", + "issue:44857", + "issue:44977", + "issue:44987", + "issue:45072", + "issue:45083", + "issue:45229", + "issue:45372", + "issue:45406", + "issue:45412", + "issue:45440", + "issue:45459", + "issue:45491", + "issue:45520" + ], + "soft_pairs": [ + "issue:44977|issue:45229", + "issue:29127|issue:32090", + "issue:44292|issue:45520", + "issue:38175|issue:44493", + "issue:43335|issue:43504", + "issue:43335|issue:45083", + "issue:43550|issue:44112", + "issue:43335|issue:45072", + "issue:44568|issue:45491", + "issue:43720|issue:44112", + "issue:44448|issue:44977", + "issue:40444|issue:43526", + "issue:43723|issue:44554", + "issue:43335|issue:44112", + "issue:34689|issue:35141", + "issue:43606|issue:44857", + "issue:44857|issue:45072", + "issue:45372|issue:45406", + "issue:43873|issue:44493", + "issue:43066|issue:44462", + "issue:43873|issue:45229", + "issue:43278|issue:43329", + "issue:43701|issue:45083", + "issue:29942|issue:35141", + "issue:43550|issue:45083", + "issue:41720|issue:45229", + "issue:37428|issue:44987", + "issue:32090|issue:33357", + "issue:43976|issue:45440", + "issue:44451|issue:45520", + "issue:45372|issue:45459", + "issue:44448|issue:44466", + "issue:43065|issue:45412", + "issue:43065|issue:43994", + "issue:25251|issue:32090" + ], + "prepared_review_unit_hash": "c729571a62cc493c123410759a22d1ffce878ede92ddd5ec3b4a66b72674cd2f", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38319, + "estimated_input_tokens": 9580, + "estimated_eval_tokens": 19416 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All suggested soft edges look like false positives: they share broad language (model loading, attention, quantization, CI, etc.) but describe different failures, code paths, or model families. No duplicate cluster is strong enough to merge.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue emerges; the candidate pairs are mostly unrelated bugs with only superficial subsystem overlap.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a safe global representative because the set is heterogeneous across distinct models, loaders, and runtime errors.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Both are model/inference problems, but one is flash-attention generation corruption and the other is multi-GPU CUDA OOM; different failure modes and fixes." + }, + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "One is a LayoutLMv3 validation/error-message issue, the other is a Trainer broadcast TypeError; unrelated code paths." + }, + { + "left": "issue:44292", + "right": "issue:45520", + "accept": false, + "reason": "Qwen-3 NVFP4 runtime failure and a Python 3.13 flash_attn import KeyError are distinct issues with different causes." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 zero probabilities and unexpected position-id keys are separate model-behavior bugs with no clear shared root cause." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "A SwitchTransformers config edge case and a BEiT pretrained-load legacy-field issue are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:45083", + "accept": false, + "reason": "Sparse-layer construction in SwitchTransformers and a qwen3_omni_moe feature-length helper bug are different code paths." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba is unrelated to a GraniteSpeech CI test stale-device override." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "SwitchTransformers sparse-layer config and dtype mismatch CI failures in other models are not the same bug." + }, + { + "left": "issue:44568", + "right": "issue:45491", + "accept": false, + "reason": "Tokenizer special-token behavior and Gemma3 NaN embeddings under sliding-window attention are unrelated." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "Packed-weight loading failure in BitNet does not match a GraniteSpeech CI device-override test issue." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Pegasus v4/v5 output drift is a model regression, while the other issue is Qwen3.5 flash-attention generation failure." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Qwen2.5-VL multi-image iterable dataset finetuning and BEiT reduce_labels returning one label are different preprocessing bugs." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "AutoTokenizer loading in v5 and MPS attention correctness with mismatched head dims are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:44112", + "accept": false, + "reason": "SwitchTransformers sparse-layer config and GraniteSpeech CI test failure are unrelated." + }, + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Llama 3.2 vision model loading breakage and embedding reinitialization after resizing are distinct issues." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "CPU offload device mismatch and float16 AMP CUDA crash are different runtime/device problems." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "LwDetrImageLoss AMP crash and SwitchTransformers/TimmWrapperModel bfloat16 dtype mismatch are separate failures." + }, + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Gemma4 processor loading blocked by a mistral_common import error is not the same as Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Quantization offloading behavior and unexpected position-id keys are not the same underlying bug." + }, + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Wrong tokenizer decoder type in v5 and AutoTokenizer ignoring tokenizer.json are related to tokenizers broadly but describe different failures." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Quantization offloading and Gemma4 multi-GPU OOM are both memory-related at a high level, but not the same concrete bug." + }, + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Embedding dtype drift in evaluate and a multimodal token-count helper using undefined video variables are unrelated." + }, + { + "left": "issue:43701", + "right": "issue:45083", + "accept": false, + "reason": "resume_from_checkpoint key mismatch and qwen3_omni_moe feature-length behavior are different parts of the codebase." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "Flash Attention 2 test failures and embedding reinitialization after resize are unrelated." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "Bamba SDPA compile failure and qwen3_omni_moe feature-length helper behavior are distinct bugs." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Qwen3 auto device mapping cudaErrorAssert and Gemma4 multi-GPU OOM are different runtime failures." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "Flash attention utility import breakage and loading physical-intelligence/fast are not the same issue." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Trainer _gpu_broadcast_one NoneType error and MacOS bus error with community CLIP are unrelated." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Python version compatibility issue and DeepseekV3MoE divergence from remote implementation are different classes of bugs." + }, + { + "left": "issue:44451", + "right": "issue:45520", + "accept": false, + "reason": "Cannot load a specific tokenizer/model and flash_attn KeyError on Python 3.13 are unrelated." + }, + { + "left": "issue:45372", + "right": "issue:45459", + "accept": false, + "reason": "Gemma4 processor import problem and tokenizer error masking when protobuf is absent are different error paths." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Pegasus output regression and lm_head.weight serialization inconsistency are distinct v4/v5 issues." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and RT-DETR memory not being released are unrelated model implementation issues." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Sam3PixelDecoder dummy Conv2d and SigLIP2 nonsensical outputs are not the same bug." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Pipeline top_k nesting change and Trainer GPU broadcast TypeError are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 22, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:30064", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:35141", + "issue:36010", + "issue:39692", + "issue:42915", + "issue:42994", + "issue:43257", + "issue:43262", + "issue:43278", + "issue:43334", + "issue:43381", + "issue:43421", + "issue:43504", + "issue:43526", + "issue:43531", + "issue:43653", + "issue:43701", + "issue:43756", + "issue:43824", + "issue:43994", + "issue:44186", + "issue:44206", + "issue:44265", + "issue:44279", + "issue:44361", + "issue:44442", + "issue:44464", + "issue:44479", + "issue:44610", + "issue:44743", + "issue:44857", + "issue:44871", + "issue:44977", + "issue:45072", + "issue:45103", + "issue:45198", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45412", + "issue:45440", + "issue:45491" + ], + "soft_pairs": [ + "issue:44186|issue:45198", + "issue:43278|issue:43381", + "issue:43653|issue:44743", + "issue:44265|issue:45198", + "issue:44361|issue:45198", + "issue:43257|issue:43278", + "issue:44871|issue:44977", + "issue:43994|issue:45412", + "issue:43421|issue:45491", + "issue:42915|issue:42994", + "issue:44206|issue:44479", + "issue:43756|issue:45440", + "issue:43504|issue:44857", + "issue:44610|issue:45412", + "issue:29942|issue:30064", + "issue:33357|issue:39692", + "issue:45103|issue:45372", + "issue:32090|issue:33290", + "issue:29127|issue:34567", + "issue:45072|issue:45229", + "issue:43824|issue:44279", + "issue:43381|issue:43701", + "issue:43653|issue:44442", + "issue:43334|issue:43531", + "issue:43526|issue:44186", + "issue:44464|issue:45491", + "issue:43262|issue:44206", + "issue:44977|issue:45245", + "issue:29942|issue:36010", + "issue:29127|issue:33357", + "issue:29127|issue:35141" + ], + "prepared_review_unit_hash": "2bfec5282f4862bf48952da2cb856cb58505e8bf595199b75bb75aab0a579968", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37065, + "estimated_input_tokens": 9267, + "estimated_eval_tokens": 18790 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: the soft-similarity pairs share broad themes (tokenizers, Qwen, vision, trainer, quantization) but not the same concrete code-path bug. I would not merge any of the proposed soft pairs as duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "issue:45198 is the most self-contained, concrete bug report in the set (clear failure mode plus save_pretrained/tokenization reproduction), so it is the best single representative if one must be chosen, though it does not subsume the rest.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45198 is a strong issue to anchor triage because it describes a specific, reproducible runtime failure with a narrow surface area and clear user impact.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "Both are tokenizer failures, but they hit different models and code paths: LayoutLMv2 NER/padding vs Wav2Vec2 save_pretrained/tokenization." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "One is a dtype change between train/eval, the other is a gradient-checkpointing restriction in eval mode; related context, not the same bug." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "BigBird tokenizer special-token registration and Qwen3-MoE recurrent-state resetting are unrelated subsystems and failure modes." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "torch.export with torch_compilable_check is a model export problem; 45198 is a tokenizer/save_pretrained issue." + }, + { + "left": "issue:44361", + "right": "issue:45198", + "accept": false, + "reason": "Different tokenizer/model families and different failures: MLuke task AttributeError vs Wav2Vec2 save/tokenization failure." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "Both involve Qwen/precision-adjacent complaints, but one is MoE weight conversion under accelerate+deepspeed and the other is eval-time dtype drift." + }, + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "Gemma EOS config mismatch and Qwen3.5 flash-attention generation issues are separate model-specific bugs." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 inference nonsense and RT-DETR memory not released are unrelated symptoms and code paths." + }, + { + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "Runtime special-token post-processor updates and Gemma3 NaN embeddings from sliding-window attention are different tokenizer/model bugs." + }, + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Both mention quantization, but one is Qwen3Moe with FineGrainedFP8Config and the other is quantized model saving; not the same concrete defect." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Feature extractor unsupported-arg crash and Qwen VL video regression are distinct input/architecture problems." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Both are model implementation divergences, but for different models and different missing behaviors; not mergeable as one fix." + }, + { + "left": "issue:43504", + "right": "issue:44857", + "accept": false, + "reason": "Beit pretrained-load legacy field handling and LwDetr AMP/CUDA loss crash are unrelated." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor image-size mismatch and RT-DETR memory retention are different bugs with no shared code-path." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention 2 test failures and void segmentation-map processing are separate CI/runtime issues." + }, + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "Both involve vision-language models, but one is a MacOS bus error on CLIP and the other is a SigLIP2 doc example with model/processor and quantization errors." + }, + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "auto_docstring annotation handling and Gemma 4 processor loading/import errors are unrelated packaging/runtime problems." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "Trainer broadcast NoneType and deepspeed+adafactor OOM are different training failures." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 error messaging and TrainerState token-count tracking are unrelated issues." + }, + { + "left": "issue:45072", + "right": "issue:45229", + "accept": false, + "reason": "dtype mismatch in bfloat16 inference and multi-GPU CUDA OOM are not the same bug." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "Missing Qwen2.5-VL import and a generic transformers dependency issue are too broad to treat as the same failure." + }, + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Gradient checkpointing in eval mode and resume-from-checkpoint key mismatch do not share the same root cause." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "BigBird tokenizer special-token registration and AutoTokenizer failing to load FastSpeech2ConformerTokenizer are different tokenizer-loading bugs." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Qwen3-VL pad_token_id loading failure and Qwen3-MoE sliding-window behavior are separate model/config issues." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and LayoutLMv2 tokenizer crashes are unrelated preprocessing bugs." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency and Gemma3 NaN embeddings from mixed-length batching are different generation paths." + }, + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sampling-rate defaulting and unsupported center-arg crash are unrelated processor bugs." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3.5 flash-attention generation and category-count overflow are entirely different failures." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention test failures and GenerationMixin import failure are separate issues." + }, + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "LayoutLMv3 error messaging and CLIP MacOS bus error are unrelated." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "LayoutLMv3 box-validation messaging and embedding reinitialization after resize are different code paths and symptoms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 23, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:29942", + "issue:33290", + "issue:33666", + "issue:34567", + "issue:34689", + "issue:35141", + "issue:39692", + "issue:41628", + "issue:42915", + "issue:43232", + "issue:43316", + "issue:43381", + "issue:43388", + "issue:43493", + "issue:43525", + "issue:43526", + "issue:43575", + "issue:43611", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:43723", + "issue:43761", + "issue:43827", + "issue:43873", + "issue:43931", + "issue:43994", + "issue:44077", + "issue:44079", + "issue:44188", + "issue:44206", + "issue:44387", + "issue:44442", + "issue:44479", + "issue:44610", + "issue:44743", + "issue:44811", + "issue:44857", + "issue:44898", + "issue:44964", + "issue:45003", + "issue:45072", + "issue:45216", + "issue:45290", + "issue:45372", + "issue:45440", + "issue:45479", + "issue:45491" + ], + "soft_pairs": [ + "issue:43761|issue:44079", + "issue:44442|issue:45072", + "issue:42915|issue:43931", + "issue:43526|issue:44857", + "issue:43232|issue:43388", + "issue:43526|issue:44610", + "issue:44479|issue:45290", + "issue:44743|issue:44811", + "issue:43723|issue:45479", + "issue:45003|issue:45440", + "issue:43575|issue:43701", + "issue:43931|issue:45216", + "issue:43646|issue:44077", + "issue:43761|issue:44188", + "issue:43316|issue:43525", + "issue:43575|issue:44387", + "issue:43994|issue:44206", + "issue:44964|issue:45440", + "issue:29942|issue:33666", + "issue:39692|issue:45491", + "issue:44442|issue:44898", + "issue:33290|issue:34567", + "issue:43673|issue:44743", + "issue:34689|issue:43611", + "issue:43381|issue:43873", + "issue:43493|issue:43827", + "issue:44077|issue:44479", + "issue:33290|issue:35141", + "issue:33290|issue:34689", + "issue:41628|issue:45372" + ], + "prepared_review_unit_hash": "5ea9d4784489ce40c2dbdf05b0015ee7a815f5979c09f5a438c9eaf9d68a8ff0", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36717, + "estimated_input_tokens": 9180, + "estimated_eval_tokens": 18616 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs mostly connect unrelated issues that share only broad keywords (loading, OOM, config, cache, etc.) rather than the same bug. I would not merge any of the candidate pairs.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "Different bugs: CLIPVisionModel hidden_states regression vs ModelOutput key assignment behavior." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer loading failure and bfloat16 dtype mismatch are unrelated problems." + }, + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Different models and failures: Qwen3Moe FP8 config vs Qwen3-VL weight shape mismatch." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug is unrelated to LwDetrImageLoss AMP crash." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwargs handling and metric label gathering are distinct code paths." + }, + { + "left": "issue:43526", + "right": "issue:44610", + "accept": false, + "reason": "Different processor bugs: label reduction vs image size mismatch." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "Video input regression and chat template tool-call crash are separate issues." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Qwen recurrent-state reset and Whisper batch_decode token skipping are unrelated." + }, + { + "left": "issue:43723", + "right": "issue:45479", + "accept": false, + "reason": "Tokenizer loading error and sequence-classification zero-loss bug do not match." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "sys.modules access bug and DeepseekV3MoE divergence are unrelated." + }, + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "Tensor-parallel OOM and resume_from_checkpoint key mismatch are different failures." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Both involve model loading/saving, but the concrete bugs differ: shape mismatch vs bad checkpoint save." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "Custom model initialization regression is not the same as patchtsmixer post_init handling." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "Hidden_states regression and attention-kernel divergence under compile are unrelated." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Different config APIs for different models; not the same missing-attribute bug." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both mention OOM, but one is tensor-parallel load OOM and the other is int4 reserved-memory growth." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 bad outputs and LasrFeatureExtractor center-arg crash are separate issues." + }, + { + "left": "issue:44964", + "right": "issue:45440", + "accept": false, + "reason": "Phi-4 multimodal loading and DeepseekV3MoE divergence are unrelated." + }, + { + "left": "issue:29942", + "right": "issue:33666", + "accept": false, + "reason": "Flash Attention 2 test failures and Qwen2-VL multi-GPU training are not the same bug." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "Documentation/example errors are unrelated to Gemma3 NaN embeddings." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "Tokenizer load failure and Perceiver non-default-resolution failure are different problems." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "Adafactor/DeepSpeed OOM is unrelated to TrainerState token counter not updating." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Both mention cache, but they concern different models and different cache/state logic." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Two separate model-loading regressions with different causes and versions." + }, + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Gradient checkpointing eval-mode restriction is unrelated to quantization offloading." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "SigLIP2 JAX parity and docs still using pipeline() are unrelated." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "patchtsmixer post_init handling and Qwen video-input regression do not share a code path." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "OOM in DeepSpeed AdaFactor and embedding reinit after resize are unrelated." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "Different failures: optimizer OOM vs model-loading breakage." + }, + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "Both are import errors, but for different symbols, modules, and processor-loading flows." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 24, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:33290", + "issue:34689", + "issue:36010", + "issue:36331", + "issue:39692", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43295", + "issue:43316", + "issue:43381", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43643", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43749", + "issue:43761", + "issue:43825", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:44077", + "issue:44162", + "issue:44190", + "issue:44206", + "issue:44442", + "issue:44479", + "issue:44496", + "issue:44514", + "issue:44534", + "issue:44617", + "issue:44792", + "issue:44855", + "issue:44871", + "issue:44877", + "issue:44913", + "issue:45127", + "issue:45137", + "issue:45200" + ], + "soft_pairs": [ + "issue:43720|issue:44442", + "issue:43749|issue:43931", + "issue:43643|issue:44913", + "issue:43873|issue:44871", + "issue:39692|issue:44206", + "issue:44190|issue:44496", + "issue:33290|issue:36331", + "issue:43381|issue:43856", + "issue:44855|issue:45137", + "issue:34689|issue:36010", + "issue:43316|issue:44877", + "issue:43540|issue:44514", + "issue:44162|issue:44792", + "issue:43388|issue:43688", + "issue:42175|issue:43316", + "issue:43404|issue:43441", + "issue:16998|issue:22355", + "issue:25251|issue:28282", + "issue:43316|issue:43716", + "issue:43065|issue:44617", + "issue:41950|issue:43475", + "issue:43295|issue:43825", + "issue:45127|issue:45200", + "issue:43761|issue:44479", + "issue:44077|issue:44534" + ], + "prepared_review_unit_hash": "0f3b53f93390986d3682abf2eaa5de210c407358b4438bcb9354c12892273305", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 25, + "serialized_chars": 35154, + "estimated_input_tokens": 8789, + "estimated_eval_tokens": 17834 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a heterogeneous grab-bag of unrelated Transformers issues; none of the soft pairs look like the same underlying bug/change closely enough to merge. The most representative issue is the high-activity core loading regression around FSDP CPU RAM efficient loading.", + "confidence": 0.34, + "canonical_issue_reason": "issue:43749 is the strongest anchor: it has the highest discussion/inbound activity and describes a core loading-path regression with broad impact and a clear bug report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 is the best overall representative because it is a substantive, widely impactful runtime bug rather than a niche model-specific, docs, or API-edge case report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43720", + "right": "issue:44442", + "accept": false, + "reason": "Different subsystems and failures: BitNet packed-weight loading vs AutoTokenizer loading a specific tokenizer class." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "Both involve loading, but one is FSDP CPU-efficient loading and the other is a Qwen3-VL shape mismatch; not the same bug." + }, + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Unrelated config issues: trust_remote_code missing fields vs GPTNeoX rotary_pct persistence on reload." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading behavior is unrelated to Gemma-3 eos_token_id config inconsistency." + }, + { + "left": "issue:39692", + "right": "issue:44206", + "accept": false, + "reason": "Docs example errors for SigLIP2 are not the same as LasrFeatureExtractor passing an unsupported argument." + }, + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Local dataset loading in a training script is unrelated to an unrecognized model/config loading failure." + }, + { + "left": "issue:33290", + "right": "issue:36331", + "accept": false, + "reason": "OOM during Adafactor+DeepSpeed training is a different problem from a CustomTrainer signature mismatch." + }, + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "Gradient checkpointing in eval mode and Qwen3 MoE memory usage are different code paths and symptoms." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Python 3.13 DeBERTa import parsing bug is unrelated to DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "Llama 3.2 model loading regression is not the same as failing to import GenerationMixin from a moved module." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Gemma3TextConfig API inconsistency is different from strict config rejection for granite_speech." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Qwen3OmniMoe video-processing ValueError is unrelated to Qwen2_5_VL batched chat-template padding behavior." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "ESM2 model breakage is not the same as a Janus test failure around image generation." + }, + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Metric gathering drops tuple labels; auxiliary-loss normalization in OLMoE/GPT Oss is a different training-loss issue." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Missing TensorFlow from a torch extra install has nothing to do with Gemma3TextConfig API discrepancies." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "These are different model bugs: lm_head tying in Mistral3 versus FlashAttention failure in Ministral-3." + }, + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "A model_max_length question for DeBERTa-V3 is unrelated to a missing transformers.onnx module import." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Pipeline top_k output-shape behavior is unrelated to an AutoModel import error requiring PyTorch." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "Config API discrepancy and Mistral-3 image-preprocessor dtype mismatch are distinct issues." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "A dummy Conv2d in Sam3PixelDecoder is not the same as CUDA OOM in Sam3Video." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Video-classification pipeline processor lookup and missing fpn_position_embeddings are different Sam3/video bugs." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "Custom processor.tokenizer regression is unrelated to an incorrect translation-task error message." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "LoRA merge collapse with extended vocab is a different model-training issue from Gemma 4 mm_token_type_ids defaults." + }, + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression is unrelated to Qwen video-input regressions." + }, + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "patchtsmixer post_init validation and non-persistent buffer initialization are different v5 behavior changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 25, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:42617", + "issue:43065", + "issue:43295", + "issue:43388", + "issue:43452", + "issue:43525", + "issue:43531", + "issue:43550", + "issue:43575", + "issue:43582", + "issue:43606", + "issue:43646", + "issue:43650", + "issue:43701", + "issue:43742", + "issue:43761", + "issue:43827", + "issue:43873", + "issue:43906", + "issue:43931", + "issue:43994", + "issue:44060", + "issue:44190", + "issue:44220", + "issue:44242", + "issue:44464", + "issue:44479", + "issue:44496", + "issue:44521", + "issue:44534", + "issue:44610", + "issue:44734", + "issue:44792", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44938", + "issue:44964", + "issue:44977", + "issue:45245", + "issue:45290", + "issue:45362", + "issue:45381", + "issue:45406", + "issue:45542" + ], + "soft_pairs": [ + "issue:43550|issue:43701", + "issue:44734|issue:45406", + "issue:43295|issue:44871", + "issue:43606|issue:43701", + "issue:43452|issue:44496", + "issue:43646|issue:44913", + "issue:43906|issue:45290", + "issue:43761|issue:44242", + "issue:43575|issue:43742", + "issue:42617|issue:43575", + "issue:43931|issue:44060", + "issue:44521|issue:45381", + "issue:43575|issue:43873", + "issue:44479|issue:44734", + "issue:44792|issue:44977", + "issue:32090|issue:35141", + "issue:44464|issue:44871", + "issue:43525|issue:43531", + "issue:44933|issue:45362", + "issue:43065|issue:44610", + "issue:43827|issue:43994", + "issue:44534|issue:44913", + "issue:41720|issue:45245", + "issue:44938|issue:45542", + "issue:43650|issue:44220", + "issue:44190|issue:44964", + "issue:43388|issue:43582" + ], + "prepared_review_unit_hash": "7e46caf48aa19e73a253f83321bbcfb32838b198fd003a6b30db31a00cd1de64", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 27, + "serialized_chars": 35604, + "estimated_input_tokens": 8901, + "estimated_eval_tokens": 18058 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the issues span unrelated models, regressions, and subsystems. None of the suggested soft pairs look like the same underlying bug or change, so all should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No clear canonical issue: the items are distinct bug reports across different code paths, models, and failure modes, with no strong duplicate center.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue stands out as a global representative because the set is not a cohesive duplicate cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "Different failures: torch.compile+SDPA crash vs resume_from_checkpoint key mismatch." + }, + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "Different areas: KV-cache continuation indexing vs Gemma4Processor missing _tokenizer." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "Both are regression/config issues, but one is processor/tokenizer API breakage and the other is eos_token_id mismatch." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "CPU offload device mismatch is unrelated to checkpoint resume key mismatch." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "gguf_file loading paths and unrecognized-model config errors are different model-loading problems." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom initialization breakage is not the same as GPTNeoX rotary_pct not persisting on reload." + }, + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "An isolated reproduction of another issue is not the same as apply_chat_template crashing on tool-call messages." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "CLIPVision hidden_states regression is unrelated to load balancing loss handling in MoE routing." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "OOM on tensor-parallel load and key error on loading MobileLLM are different load-time problems." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "3d_parallel.py failure is unrelated to Qwen2-57B tensor-parallel OOM." + }, + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Weight-shape mismatch loading Qwen3-VL differs from tied-weights warning on Qwen3-Next." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "All-zero assistant masks in chat templating is distinct from Qwen2.5-VL video vision_position_ids regression." + }, + { + "left": "issue:43575", + "right": "issue:43873", + "accept": false, + "reason": "Tensor-parallel OOM and quantization offload behavior are not the same bug." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression for Qwen VL models is unrelated to serve KV-cache continuation indexing." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "Failed janus image-generation test is not the same as Qwen3.5 flash-attention generation bug." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer _gpu_broadcast_one NoneType error and embedding reinit on post_init are different bugs." + }, + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Chunked generation with compiled forward is unrelated to eos_token_id configuration inconsistency." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Missing pad_token_id in Llama4Config and sliding_window issues in Qwen3-MoE are different model config bugs." + }, + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Missing import from image_utils is unrelated to Qwen3.5 chat crashes." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo input-size mismatch are different model-specific issues." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "Docs still referencing pipeline() is unrelated to SigLIP2 nonsensical outputs." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk in v5 is not the same as rotary_pct resetting on reload." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3 auto device mapping cuda assert and category-count limit error are unrelated runtime failures." + }, + { + "left": "issue:44938", + "right": "issue:45542", + "accept": false, + "reason": "Python 3.14 load failure is not the same as missing TensorFlow backend when only tensorboard is installed." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "ADD THE DATA is not a meaningful duplicate of _torch_extract_fbank_features()." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Local dataset loading in an example script is unrelated to loading Phi-4-multimodal-instruct." + }, + { + "left": "issue:43388", + "right": "issue:43582", + "accept": false, + "reason": "gather_for_metrics tuple-label truncation is unrelated to Apple Silicon warmup TypeError." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 26, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41628", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43116", + "issue:43295", + "issue:43335", + "issue:43404", + "issue:43479", + "issue:43540", + "issue:43550", + "issue:43643", + "issue:43644", + "issue:43761", + "issue:43854", + "issue:43976", + "issue:44016", + "issue:44077", + "issue:44112", + "issue:44186", + "issue:44190", + "issue:44220", + "issue:44246", + "issue:44279", + "issue:44355", + "issue:44466", + "issue:44496", + "issue:44683", + "issue:44734", + "issue:44743", + "issue:44792", + "issue:44855", + "issue:44871", + "issue:44877", + "issue:44913", + "issue:45003", + "issue:45127", + "issue:45216", + "issue:45276", + "issue:45335", + "issue:45357", + "issue:45372", + "issue:45375", + "issue:45446" + ], + "soft_pairs": [ + "issue:43761|issue:44743", + "issue:45127|issue:45357", + "issue:44355|issue:44855", + "issue:43644|issue:44913", + "issue:44077|issue:44877", + "issue:43540|issue:44734", + "issue:43976|issue:44855", + "issue:44496|issue:45372", + "issue:43295|issue:45276", + "issue:43550|issue:44190", + "issue:43335|issue:44855", + "issue:43404|issue:45216", + "issue:43116|issue:44190", + "issue:41950|issue:44016", + "issue:39692|issue:44190", + "issue:43644|issue:44871", + "issue:36246|issue:41628", + "issue:43479|issue:45375", + "issue:44186|issue:44734", + "issue:36246|issue:38617", + "issue:38175|issue:43065", + "issue:44246|issue:45003", + "issue:44016|issue:44220", + "issue:44466|issue:44913", + "issue:44683|issue:45446", + "issue:42175|issue:44279", + "issue:44112|issue:44792", + "issue:43854|issue:44792", + "issue:43295|issue:45335", + "issue:43643|issue:44496" + ], + "prepared_review_unit_hash": "6612d019d4a883a74825af21ff3eadfe6a609fa6f817e46079492f5915863ae8", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36997, + "estimated_input_tokens": 9250, + "estimated_eval_tokens": 18756 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: most pairs share only broad subsystem similarity (imports, serialization, multimodal, config handling) but not the same concrete bug. I would not merge any of the soft edges, and there is no clear canonical issue/PR for the whole set.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "Both are regression-style model bugs, but one is CLIPVision hidden states and the other is Qwen3.5 recurrent state reset; different code paths." + }, + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "Both involve tied/serialized weights, but they affect different models and different save/merge paths; not the same bug." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "One is compiled Python file errors, the other is a Python 3.13 torch.jit indentation parsing issue; unrelated failures." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Both concern state persistence on reload, but one is non-persistent buffers and the other is a specific GPTNeoX config field; too different to merge." + }, + { + "left": "issue:44077", + "right": "issue:44877", + "accept": false, + "reason": "Both mention config handling, but one is a patchtsmixer post_init policy issue and the other is granite_speech loading under strict config; different bugs." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Qwen3OmniMoe video-input validation and transformers serve KV-cache indexing are unrelated code paths." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "One is a Python/version compatibility report, the other is a JIT parsing/import syntax problem; not the same underlying issue." + }, + { + "left": "issue:44496", + "right": "issue:45372", + "accept": false, + "reason": "Both are model-loading failures, but one is missing model_type and the other is a processor import dependency break; different root causes." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Both mention embedding/token resizing, but one is a processor/tokenizer regression and the other is Gemma4 embedding propagation; not the same concrete change." + }, + { + "left": "issue:43550", + "right": "issue:44190", + "accept": false, + "reason": "Torch.compile/SDPA model failure and local dataset loading in an image-classification script are unrelated." + }, + { + "left": "issue:43335", + "right": "issue:44855", + "accept": false, + "reason": "SwitchTransformers sparse-layer config bug is unrelated to the Python 3.13 torch.jit parsing issue." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Both involve model weight handling, but one is Mistral3 lm_head tying and the other is Qwen3.5 save_pretrained checkpoint contents; not mergeable as one fix." + }, + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "A multi-label classification example bug and a local dataset loading issue in a different script are unrelated example problems." + }, + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Video-classification pipeline processor lookup and a notebook syntax error are completely different classes of bug." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "SigLIP2 docs/example issues and image-classification script dataset loading are unrelated example failures." + }, + { + "left": "issue:43644", + "right": "issue:44871", + "accept": false, + "reason": "One is a global buffer serialization regression; the other is a Gemma3 eos_token_id config mismatch. Same broad area, different bugs." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but one is a missing class in a model submodule and the other is a missing top-level AutoImageProcessor export; different symbols and causes." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Both are config-field handling issues, but one is default vision/audio config initialization and the other is a missing deepstack_visual_indexes field in Qwen3_5MoeVisionConfig." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Tokenizer padding/crash in LayoutLMv2 and KV-cache continuation in transformers serve are unrelated." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Both are import errors, but they affect different modules and different missing names; no shared underlying fix is evident." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "Unexpected zero probabilities in SigLIP2 and a dummy Conv2d in Sam3PixelDecoder are unrelated model-specific bugs." + }, + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "Import-time slowness and unsafe sys.modules access are related only loosely; not the same concrete bug or fix." + }, + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "Notebook syntax error and _torch_extract_fbank_features() failure are unrelated." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Both concern serialization/reload behavior, but one is tied-weight serialization and the other is a GPTNeoX config default reset; different fixes." + }, + { + "left": "issue:44683", + "right": "issue:45446", + "accept": false, + "reason": "Compiled flex_attention on torch>=2.9 and a PyTorch version check for AuxRequest import are different compatibility bugs." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "TensorFlow missing from a torch extra install and a generic dependency issue are not the same dependency bug." + }, + { + "left": "issue:44112", + "right": "issue:44792", + "accept": false, + "reason": "CI stale device override failure in GraniteSpeech and janus image-generation test failure are different test issues." + }, + { + "left": "issue:43854", + "right": "issue:44792", + "accept": false, + "reason": "Model loading failure for GLM-4.7-Flash and a janus generate-images test failure are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Both are resize_token_embeddings regressions, but they hit different model architectures and different internal embeddings." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "A trust_remote_code config-field loss and an unrecognized-model error are separate loading issues with different root causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 27, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:41950", + "issue:42907", + "issue:43295", + "issue:43299", + "issue:43441", + "issue:43479", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43577", + "issue:43650", + "issue:43673", + "issue:43824", + "issue:43854", + "issue:43901", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44242", + "issue:44279", + "issue:44297", + "issue:44336", + "issue:44351", + "issue:44368", + "issue:44462", + "issue:44464", + "issue:44496", + "issue:44521", + "issue:44683", + "issue:44779", + "issue:44871", + "issue:44877", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45020", + "issue:45042", + "issue:45081", + "issue:45200", + "issue:45216", + "issue:45245", + "issue:45278", + "issue:45325", + "issue:45357", + "issue:45405", + "issue:45542" + ], + "soft_pairs": [ + "issue:43299|issue:45216", + "issue:42907|issue:43441", + "issue:44016|issue:44062", + "issue:44112|issue:44964", + "issue:44871|issue:45245", + "issue:43673|issue:44336", + "issue:43976|issue:44987", + "issue:43526|issue:43901", + "issue:44496|issue:45042", + "issue:43824|issue:44933", + "issue:43976|issue:44279", + "issue:43299|issue:45357", + "issue:43577|issue:45216", + "issue:43901|issue:44496", + "issue:44075|issue:44368", + "issue:44521|issue:45325", + "issue:43854|issue:44964", + "issue:44683|issue:45542", + "issue:44242|issue:45200", + "issue:44464|issue:45245", + "issue:43976|issue:45405", + "issue:44351|issue:45278", + "issue:41950|issue:43650", + "issue:43479|issue:43525", + "issue:41950|issue:45020", + "issue:44496|issue:44877", + "issue:44462|issue:45081", + "issue:44297|issue:44462", + "issue:43550|issue:44964", + "issue:44279|issue:45405", + "issue:43299|issue:43854", + "issue:43295|issue:44779", + "issue:44462|issue:44779" + ], + "prepared_review_unit_hash": "5212e2e145c5ba8d397fad818a7bc701447cdd0a5d1bc0842a59f4bedc4bdd31", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37579, + "estimated_input_tokens": 9395, + "estimated_eval_tokens": 19046 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is a loose collection of unrelated Transformers issues, mostly version-specific regressions and model-loading bugs. None of the soft pairs look like true duplicates or mergeable PR-equivalents, so I would reject all soft edges.", + "confidence": 0.2, + "canonical_issue_reason": "issue:43824 is the strongest representative issue here: it is concrete, well-scoped, has the most discussion/inbound references, and clearly describes a reproducible import failure with a specific model class.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the best single issue to keep as the representative bug report because it is precise, high-signal, and appears to have broader community impact than the other items.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Different regressions on different Qwen model families: one is loading Qwen3VL MoE models, the other is save_pretrained corruption for Qwen3.5." + }, + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Both mention Ministral, but one is about saving dequantized models while the other is a FlashAttention loading/runtime failure; not the same bug." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "Notebook syntax error and AddedToken keyword collision are unrelated failures in different code paths." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "A stale CI test in GraniteSpeech and a Phi-4 multimodal loading failure are distinct issues." + }, + { + "left": "issue:44871", + "right": "issue:45245", + "accept": false, + "reason": "Gemma eos_token_id mismatch and a categories-limit runtime error are unrelated." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Chunked_prefill cache handling and ANSI formatting in loading_report are different subsystems and symptoms." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "Python version compatibility and loading physical-intelligence/fast are not the same underlying bug." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "BeitImageProcessorFast label reduction is unrelated to a docs mismatch about return_all_scores." + }, + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "One is an unrecognized model/config issue; the other is an image processor torchvision dependency regression." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Different import failures: missing Qwen2_5_VL export versus a nonexistent image_utils import." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "A Python support issue and a generic dependency issue are too broad and distinct to be duplicates." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Qwen3VL loading and Qwen3.5 visual-encoder save_pretrained corruption are separate regressions." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "Blip2 dtype propagation and Qwen3.5 checkpoint saving are unrelated." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Documentation drift for return_all_scores is not the same as a model auto-mapping/load failure." + }, + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "SGD optimizer args being ignored and a tie_word_embeddings warning are unrelated." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "Assistant mask generation for multimodal chat templates is unrelated to Qwen2.5-VL rope index scaling." + }, + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Both are model-loading issues, but they affect different models and failure points; no evidence they share the same code path." + }, + { + "left": "issue:44683", + "right": "issue:45542", + "accept": false, + "reason": "Compiled flex_attention on torch>=2.9 and a missing TensorFlow backend import are unrelated." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "Load-balancing loss omission and Gemma 4 mm_token_type_ids defaulting are different training-time issues." + }, + { + "left": "issue:44464", + "right": "issue:45245", + "accept": false, + "reason": "Chunked generation inconsistency and a categories cardinality limit error are unrelated." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "Python compatibility and an unreleased PEFT version bump are different dependency problems." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "A specific missing HybridCache export may be one symptom of broader import breakage, but the evidence is too weak to call it the same bug as the generic multi-import failure." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "Video-classification processor lookup and a vague 'ADD THE DATA' issue are not related." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Phi4MultimodalConfig default initialization and a Llama4Config pad_token_id AttributeError are separate model config bugs." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "A specific video-classification pipeline bug and a broad remote_code regression are not the same concrete issue." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Unrecognized model loading and strict granite_speech config validation are different failures." + }, + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer JSON loading and a Mistral regex patch crash are unrelated tokenizer issues." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Tokenizer class metadata mismatch on save and AutoTokenizer ignoring tokenizer.json are different load/save bugs." + }, + { + "left": "issue:43550", + "right": "issue:44964", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure and Phi-4 multimodal loading are unrelated." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "A generic transformers dependency complaint and a specific unreleased PEFT minimum version bump are not duplicates." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "Different model families and different failure modes; no shared code path is evident." + }, + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different models and report different incorrect behaviors." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Deepseek tokenizer correctness regression is not the same as AutoTokenizer ignoring repository tokenizer.json." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 28, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:42175", + "issue:42994", + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43295", + "issue:43317", + "issue:43352", + "issue:43441", + "issue:43450", + "issue:43475", + "issue:43525", + "issue:43650", + "issue:43673", + "issue:43723", + "issue:43761", + "issue:43784", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44162", + "issue:44246", + "issue:44261", + "issue:44263", + "issue:44279", + "issue:44291", + "issue:44297", + "issue:44466", + "issue:44521", + "issue:44617", + "issue:44623", + "issue:44671", + "issue:44683", + "issue:44749", + "issue:44779", + "issue:44898", + "issue:44987", + "issue:45250", + "issue:45276", + "issue:45290", + "issue:45397", + "issue:45542" + ], + "soft_pairs": [ + "issue:38175|issue:44617", + "issue:39692|issue:43901", + "issue:43650|issue:45290", + "issue:44016|issue:44291", + "issue:44466|issue:45276", + "issue:43441|issue:44683", + "issue:43450|issue:44279", + "issue:42994|issue:43317", + "issue:43065|issue:44898", + "issue:43723|issue:44162", + "issue:43262|issue:44521", + "issue:44261|issue:44263", + "issue:44623|issue:45397", + "issue:43122|issue:43650", + "issue:44623|issue:44749", + "issue:43784|issue:45542", + "issue:44671|issue:44779", + "issue:43673|issue:43761", + "issue:44987|issue:45542", + "issue:43901|issue:43994", + "issue:44297|issue:45397", + "issue:42175|issue:43976", + "issue:43475|issue:44016", + "issue:43824|issue:43976", + "issue:43901|issue:44246", + "issue:43352|issue:45250", + "issue:43525|issue:43881", + "issue:43295|issue:44617" + ], + "prepared_review_unit_hash": "34618173c6223d8165cb271fc63780ab31b2be3e6568b867fcdde7c6efadd74d", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35249, + "estimated_input_tokens": 8813, + "estimated_eval_tokens": 17882 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All soft pairs are rejected: they share broad keywords or subsystems, but each pair points to a different concrete bug, regression, or docs issue. The most representative issue in this set is the SigLIP2 output-quality report, which is the clearest self-contained user-facing bug.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43994 is the clearest, most self-contained bug report: it names the model, the wrong behavior, and the affected entry points (AutoModel and pipeline).", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43994 is the strongest standalone issue in this set because it gives a concrete end-user symptom and a broad repro surface, making it the best representative issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "SigLIP2 zero-probability outputs vs SAM3 video CUDA OOM are unrelated failures." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "Both mention docs/examples, but one is a SigLIP2 example bug and the other is a pipeline-docs behavior mismatch." + }, + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "Generic placeholder issue vs apply_chat_template crash on tool-call assistant messages; no shared underlying bug." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "Notebook syntax error is unrelated to the init_empty_weights TypeError regression." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "Both touch weights/serialization-adjacent behavior, but one is inconsistent lm_head serialization and the other is resize_token_embeddings not propagating." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "Both involve Flash Attention, but one is model support gating and the other is a compiled flex_attention failure on newer torch." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Video processor batching shape bug vs dependency issue; different layers and symptoms." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Quantized model saving failure and dequantized offload loading failure are different code paths." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder is unrelated to Perceiver interpolation behavior." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "Tokenizer loading regression vs ESM2 model breakage; not the same underlying issue." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Audio chat-template sampling-rate default and multimodal assistant_mask all-zero bug are different apply_chat_template problems." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "MLA layernorm precision issue and GlmMoeDsaIndexer torch.split return-value issue are unrelated." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "processor.save_pretrained missing files is distinct from gemma-4 zero3 from_pretrained failure." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Tokenizer behavior regression is unrelated to the placeholder 'ADD THE DATA' issue." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "Save-pretrained file omission vs Chinese-reported performance slowdown after upgrade are different bugs." + }, + { + "left": "issue:43784", + "right": "issue:45542", + "accept": false, + "reason": "Missing nn import in sentence-transformers is unrelated to TensorBoard/TensorFlow backend installation behavior." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression and Deepseek tokenizer regression are different model/tokenizer bugs." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "Generation cache missing in chunked_prefill vs CLIP hidden_states output regression are separate code paths." + }, + { + "left": "issue:44987", + "right": "issue:45542", + "accept": false, + "reason": "Model loading failure on transformers>=5.1.0 is unrelated to missing TensorFlow backend after pip install '.[torch]'." + }, + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs mention of return_all_scores is not the same as SigLIP2 producing nonsensical outputs." + }, + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer save metadata mismatch and gemma-4 zero3 loading failure do not describe the same bug." + }, + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "Missing TensorFlow in the torch extra and Python-version compatibility failure are different dependency issues." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "SAM3 video encoder attribute error is unrelated to a notebook syntax error." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "Missing Qwen2_5_VL import and Python version support regression are unrelated." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "Pipeline docs mismatch vs import being slow sometimes are not the same problem." + }, + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "Specific model lacking Flash Attention 2.0 support is not the same as the generic Flash Attention 2.0 issue/request." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Llama4Config missing pad_token_id and glm-4v-9b loading failed are different attribute/loading failures." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "Processor.tokenizer regression with image passing is unrelated to SAM3 video CUDA OOM." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 29, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42491", + "issue:42757", + "issue:43066", + "issue:43299", + "issue:43441", + "issue:43450", + "issue:43526", + "issue:43531", + "issue:43540", + "issue:43596", + "issue:43673", + "issue:43976", + "issue:43994", + "issue:44162", + "issue:44188", + "issue:44220", + "issue:44242", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44351", + "issue:44373", + "issue:44462", + "issue:44485", + "issue:44496", + "issue:44561", + "issue:44589", + "issue:44617", + "issue:44661", + "issue:44749", + "issue:44779", + "issue:44857", + "issue:44871", + "issue:44877", + "issue:44987", + "issue:45003", + "issue:45200", + "issue:45276", + "issue:45292", + "issue:45356", + "issue:45362", + "issue:45375", + "issue:45397", + "issue:45459", + "issue:45542" + ], + "soft_pairs": [ + "issue:44589|issue:45362", + "issue:43450|issue:44617", + "issue:44188|issue:44336", + "issue:41628|issue:43976", + "issue:41950|issue:44220", + "issue:44295|issue:45397", + "issue:42491|issue:43299", + "issue:44246|issue:44496", + "issue:44336|issue:44373", + "issue:44661|issue:45542", + "issue:44462|issue:45356", + "issue:43526|issue:45003", + "issue:43994|issue:45276", + "issue:43531|issue:45362", + "issue:36246|issue:42757", + "issue:36246|issue:44351", + "issue:43673|issue:44561", + "issue:44336|issue:44485", + "issue:44162|issue:45200", + "issue:44242|issue:44857", + "issue:44246|issue:44871", + "issue:44295|issue:44749", + "issue:44779|issue:45292", + "issue:44877|issue:45375", + "issue:36246|issue:43596", + "issue:43441|issue:43540", + "issue:44987|issue:45459", + "issue:43066|issue:44779" + ], + "prepared_review_unit_hash": "c2ba531fa00f668233b267b3f6d2f1ffb8696230ac230bd6865849558a52c2a4", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 28, + "serialized_chars": 35863, + "estimated_input_tokens": 8966, + "estimated_eval_tokens": 18188 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is highly heterogeneous: most pairs are only broad Transformers v5 regressions or entirely unrelated model/pipeline bugs, so no soft edge looks like the same concrete issue. If forced to pick a representative, issue 41628 is the broadest user-facing import-regression report.", + "confidence": 0.27, + "canonical_issue_reason": "Issue 41628 is the cleanest, most generic top-level API breakage report in the set (a direct import failure from `transformers`), so it is the least-wrong representative of the cluster\u2019s loose v5 import/regression theme.", + "canonical_pr_reason": null, + "best_issue_reason": "41628 is the most broadly applicable and user-visible failure mode among these issues, making it the best canonical issue candidate despite the overall heterogeneity.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Different failures: a storage/type import/runtime error versus a Qwen chat crash. Same ecosystem, not the same bug." + }, + { + "left": "issue:43450", + "right": "issue:44617", + "accept": false, + "reason": "One is a batched video processor shape bug; the other is CUDA OOM in Sam3Video. Not the same underlying issue." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "Diverging attention kernels under `torch.compile` is unrelated to ANSI code output from loading_report." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "ImportError for `AutoImageProcessor` and a Python version compatibility report are different problem classes." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Video pipeline choosing image processors and `_torch_extract_fbank_features()` failures are unrelated code paths." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "Buffer access on `position_ids` and Zero3 `from_pretrained` breakage are different bugs." + }, + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Both involve Qwen3 MoE, but one is HF4.x LoRA compatibility and the other is v5 loading breakage for Qwen3VL MoE; not mergeable as one bug." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Slow import and unrecognized model/config loading are not the same underlying failure." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "Terminal ANSI escape output is unrelated to a wrong docstring for `position_ids`." + }, + { + "left": "issue:44661", + "right": "issue:45542", + "accept": false, + "reason": "Tokenizer mapping-name handling and missing TensorFlow backend installation are unrelated." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different models and different failure modes; too broad to treat as one bug." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and unsafe `sys.modules` access are unrelated." + }, + { + "left": "issue:43994", + "right": "issue:45276", + "accept": false, + "reason": "Nonsensical model outputs and resize-token-embedding propagation are different issues." + }, + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to a Qwen3.5-35B chat crash." + }, + { + "left": "issue:36246", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and different root causes." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Missing `Qwen2_5_VLImageProcessor` and missing `HybridCache` are separate export regressions." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Chunked-prefill cache behavior and removal of `is_torch_fx_available` break different code paths." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "ANSI output noise and GLM-5 RoPE implementation are unrelated." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 breakage and Gemma 4 `mm_token_type_ids` defaults are different model-specific bugs." + }, + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "Missing load-balancing loss and float16 AMP crash are unrelated training/runtime failures." + }, + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Import slowness and Gemma eos-token-id inconsistency are not the same bug." + }, + { + "left": "issue:44295", + "right": "issue:44749", + "accept": false, + "reason": "`position_ids` buffer access and data-filtering slowdown after upgrade are unrelated." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer regression and `resize_token_embeddings` not updating output embeddings are different code paths." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Strict config rejection for granite_speech and a silently dropped Qwen config field are similar only at a high level, not the same concrete defect." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "An import error and a DeepSpeed ZeRO3/BertModel index error are unrelated." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "Ministral flash-attention failure and Qwen3OmniMoe video-input ValueError are different bugs." + }, + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Loading a specific model and masking tokenizer decode errors when protobuf is absent are unrelated." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but they concern different models and different decoder/codec issues, so they should not be merged." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 30, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:41628", + "issue:41950", + "issue:42673", + "issue:42886", + "issue:43065", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43295", + "issue:43381", + "issue:43408", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43606", + "issue:43646", + "issue:43653", + "issue:43673", + "issue:43704", + "issue:43901", + "issue:43931", + "issue:44060", + "issue:44077", + "issue:44162", + "issue:44246", + "issue:44297", + "issue:44336", + "issue:44361", + "issue:44368", + "issue:44466", + "issue:44496", + "issue:44623", + "issue:44734", + "issue:44749", + "issue:44869", + "issue:44933", + "issue:44977", + "issue:44995", + "issue:45003", + "issue:45137", + "issue:45200", + "issue:45229", + "issue:45278", + "issue:45292", + "issue:45335", + "issue:45356", + "issue:45459" + ], + "soft_pairs": [ + "issue:43901|issue:45003", + "issue:41628|issue:41950", + "issue:42886|issue:45229", + "issue:44995|issue:45200", + "issue:43257|issue:45137", + "issue:43065|issue:43408", + "issue:43232|issue:44336", + "issue:44496|issue:45003", + "issue:43262|issue:44246", + "issue:43295|issue:45292", + "issue:44060|issue:44077", + "issue:43475|issue:43540", + "issue:44466|issue:45335", + "issue:43653|issue:44162", + "issue:44368|issue:44977", + "issue:44933|issue:45459", + "issue:43441|issue:43606", + "issue:43673|issue:44734", + "issue:44869|issue:45356", + "issue:43232|issue:43673", + "issue:43931|issue:44368", + "issue:44297|issue:44623", + "issue:43646|issue:44162", + "issue:43673|issue:45278", + "issue:43381|issue:45292", + "issue:43381|issue:45335", + "issue:42673|issue:43704", + "issue:44361|issue:44749" + ], + "prepared_review_unit_hash": "ae9788d5886e100cfb50f20a3865eadf72b0c49c1e740f1075b739b94d0932d9", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35530, + "estimated_input_tokens": 8883, + "estimated_eval_tokens": 18022 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose topical grouping, not a real duplicate set. The only clear duplicate pair is the Qwen3ForCausalLM VRAM leak report; the rest differ by model, API surface, or failure mode.", + "confidence": 0.89, + "canonical_issue_reason": "issue:42673 is the original, more detailed report of the Qwen3ForCausalLM VRAM leak; issue:43704 repeats the same title/symptoms and reads as the duplicate.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42673 is the clearest canonical issue in the cluster because it is the true duplicate anchor and has the fuller report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch for text classification vs unsafe sys.modules access in modeling_utils; unrelated bugs." + }, + { + "left": "issue:41628", + "right": "issue:41950", + "accept": false, + "reason": "Both mention image processors, but one is an import error and the other is a video pipeline lookup bug; different failure modes." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline cache loading for tokenizers is unrelated to Gemma4 multi-GPU CUDA OOM." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "GlmMoeDsa stale indexer cache on second forward is not the same as Gemma4 mm_token_type_ids defaults." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both involve DeepSpeed, but one is MOE weight conversion and the other is a ZeRO3 deque error; different code paths." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "SAM3 dummy Conv2d vs sam3_video/sam3_tracker config warning are distinct model issues." + }, + { + "left": "issue:43232", + "right": "issue:44336", + "accept": false, + "reason": "Generation kwargs after sync_gpus is unrelated to ANSI output in loading_report." + }, + { + "left": "issue:44496", + "right": "issue:45003", + "accept": false, + "reason": "Unrecognized model/config loading error is unrelated to sys.modules access in modeling_utils." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio processor sampling-rate default is unrelated to slow import timing." + }, + { + "left": "issue:43295", + "right": "issue:45292", + "accept": false, + "reason": "Custom processor.tokenizer regression is not the same bug as resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Tied-weights warning bug and patchtsmixer post_init deprecation are unrelated." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "SAM 3 Video missing attribute vs Qwen3OmniMoe video ValueError are different model failures." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Serialization inconsistency of tied weights is different from resize_token_embeddings not affecting decoder.embed_tokens." + }, + { + "left": "issue:43653", + "right": "issue:44162", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug is unrelated to ESM2 breakage." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "Tie_word_embeddings warning during LoRA fine-tuning is not the same as flash-attention generation failure." + }, + { + "left": "issue:44933", + "right": "issue:45459", + "accept": false, + "reason": "Missing import from image_utils and protobuf decode-error masking are different import/exception issues." + }, + { + "left": "issue:43441", + "right": "issue:43606", + "accept": false, + "reason": "FlashAttention failure in Ministral-3 is unrelated to Bark-small CPU offload device mismatch." + }, + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Missing GenerationMixin cache in chunked_prefill and server KV-cache tensor indexing are both cache-related but not the same concrete bug." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Whisper trailing replacement-character crash is unrelated to Kimi-K2.5 codec handling." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "Generation kwargs sync_gpus issue and missing cache in chunked_prefill are separate generation bugs." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Qwen3-VL weight-shape mismatch is unrelated to tie_word_embeddings warnings." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Tokenizer save_pretrained class mismatch and processor save_pretrained missing files are different serialization defects." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Generic custom model initialization breakage does not match the ESM2-specific failure." + }, + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "Generation cache bug is not the same as broad import errors after version upgrade." + }, + { + "left": "issue:43381", + "right": "issue:45292", + "accept": false, + "reason": "Gradient checkpointing in eval mode is unrelated to resize_token_embeddings behavior." + }, + { + "left": "issue:43381", + "right": "issue:45335", + "accept": false, + "reason": "Gradient checkpointing in eval mode is unrelated to t5gemma embedding resizing." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": true, + "reason": "Same Qwen3ForCausalLM VRAM leak, same title, same symptom, and the later issue is a duplicate report." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "MLukeTokenizer AttributeError and Chinese import/filtering slowdown are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is well grounded in the packet. The canonical duplicate choice for issue:42673 vs issue:43704 is supported by identical titles and the fuller earlier report, and the soft-edge verdicts stay conservative overall. I don\u2019t see any overstated evidence that would require rejection." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 31, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42222", + "issue:42371", + "issue:42831", + "issue:42898", + "issue:42994", + "issue:43232", + "issue:43377", + "issue:43475", + "issue:43479", + "issue:43482", + "issue:43525", + "issue:43643", + "issue:43673", + "issue:43701", + "issue:43873", + "issue:44038", + "issue:44077", + "issue:44079", + "issue:44164", + "issue:44188", + "issue:44222", + "issue:44242", + "issue:44351", + "issue:44568", + "issue:44623", + "issue:44661", + "issue:44749", + "issue:44792", + "issue:44869", + "issue:44933", + "issue:44945", + "issue:45003", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45200", + "issue:45230", + "issue:45310", + "issue:45362", + "issue:45375", + "issue:45381", + "issue:45397", + "issue:45478" + ], + "soft_pairs": [ + "issue:28282|issue:30990", + "issue:44077|issue:45003", + "issue:44749|issue:45310", + "issue:45230|issue:45397", + "issue:44038|issue:44623", + "issue:42994|issue:43482", + "issue:43475|issue:45381", + "issue:43475|issue:43643", + "issue:43701|issue:44749", + "issue:45081|issue:45103", + "issue:42371|issue:45092", + "issue:45375|issue:45478", + "issue:41628|issue:44933", + "issue:45310|issue:45375", + "issue:42994|issue:43525", + "issue:43475|issue:44661", + "issue:43873|issue:44945", + "issue:42222|issue:45200", + "issue:42831|issue:44222", + "issue:45362|issue:45375", + "issue:45310|issue:45397", + "issue:43232|issue:44188", + "issue:41628|issue:44351", + "issue:44222|issue:45310", + "issue:42994|issue:44164", + "issue:43232|issue:44792", + "issue:44038|issue:45310", + "issue:30990|issue:35141", + "issue:43479|issue:44079", + "issue:43377|issue:44242", + "issue:36010|issue:41628", + "issue:43673|issue:44188", + "issue:42898|issue:44568", + "issue:43479|issue:44869" + ], + "prepared_review_unit_hash": "2e1580dc417ca862b9b6712231ac31fa5693f1a23d2fe69909c16c95c79e8d50", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37538, + "estimated_input_tokens": 9385, + "estimated_eval_tokens": 19026 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: it mixes many unrelated import, config, generation, tokenizer, quantization, and model-specific regressions. No soft pair is close enough to merge as the same underlying bug/change.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a safe canonical duplicate because the items span distinct failure modes and different code paths.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45310 is the most narrowly scoped, concrete loader regression in the set, so it is the best standalone representative, but it does not subsume the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "PyTorch-missing import error vs Sentence Transformers hanging during load; different symptoms and code paths." + }, + { + "left": "issue:44077", + "right": "issue:45003", + "accept": false, + "reason": "Optional post_init policy change vs unsafe sys.modules access; unrelated bugs." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "Data filtering slowdown after upgrade vs Qwen3.5 from_pretrained failure; not the same issue." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "Generic bug report vs Gemma-4 zero3 from_pretrained failure; no shared concrete defect." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe loading bug vs processor.save_pretrained missing files; different subsystems." + }, + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Quantized model saving failure vs Qwen2.5-GGUF loading failure; unrelated." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "SAM 3 video attribute error vs qwen2.5-vl video position-id bug; different models and paths." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "SAM 3 VisionEncoder output bug vs AutoConfig trust_remote_code field loss; unrelated." + }, + { + "left": "issue:43701", + "right": "issue:44749", + "accept": false, + "reason": "resume_from_checkpoint key mismatch vs performance regression in data filtering; not the same bug." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Mistral tokenizer patch crash vs auto_docstring annotation crash; unrelated code paths." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "TF32 settings warning vs remote-code meta-init incompatibility; different problems." + }, + { + "left": "issue:45375", + "right": "issue:45478", + "accept": false, + "reason": "Missing vision-config field in strict handling vs Qwen3.5 Moe from_pretrained error; related family but not the same defect." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "AutoImageProcessor export error vs missing image_utils import; similar theme, different symbols and fixes." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "Both concern Qwen3.5-family models, but one is a from_pretrained loader failure and the other is a missing config field; not mergeable as one bug." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Quantized save failure vs Llama4Config missing pad_token_id; unrelated." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "SAM 3 attribute error vs add-new-model-like/tokenizer mapping failure; no shared code path." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Quantization offloading issue vs pipeline-parallel output mismatch; different runtime behaviors." + }, + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "vitpose model breakage vs Gemma 4 token-type defaulting; unrelated model families." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "FP8 accuracy regression vs FP8 save_pretrained moe; different failure modes." + }, + { + "left": "issue:45362", + "right": "issue:45375", + "accept": false, + "reason": "Qwen3.5-35B chat crash vs missing deepstack_visual_indexes field; not the same bug." + }, + { + "left": "issue:45310", + "right": "issue:45397", + "accept": false, + "reason": "Qwen3.5 Moe from_pretrained error vs Gemma-4 zero3 from_pretrained issue; unrelated models." + }, + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Generation kwargs update bug vs torch.compile attention-kernel divergence; different code paths." + }, + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "AutoImageProcessor import error vs HybridCache import error; both imports, but not the same missing symbol or fix." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "FP8 save_pretrained moe vs Qwen3.5 Moe from_pretrained error; unrelated." + }, + { + "left": "issue:42994", + "right": "issue:44164", + "accept": false, + "reason": "Quantized saving failure vs extra_state save/from_pretrained handling; different persistence bugs." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Generation sync_gpus kwargs bug vs janus test failure; no shared underlying defect." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Qwen3-VL-Moe bug vs Qwen3.5 Moe from_pretrained error; same broad family, but not the same concrete failure." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "Loading stall vs embedding resizing/post_init reinitialization; unrelated." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "Phi4Multimodal config defaults vs ModelOutput key assignment; different object semantics." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "MIMI batched-vs-single padding-mask bug vs MoE load-balancing loss omission; unrelated." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "GenerationMixin import missing vs AutoImageProcessor import missing; both are import regressions, but they concern different exports and fixes." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "GenerationMixin cache missing in chunked prefill vs torch.compile branching divergence; not the same code-path problem." + }, + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "v5 clean_up_tokenization_spaces behavior change vs missing BOS/EOS when add_special_tokens=True; different tokenizer regressions." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Config initialization bug vs Whisper word timestamp decode crash; unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 32, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:41628", + "issue:42175", + "issue:42757", + "issue:42831", + "issue:42898", + "issue:43278", + "issue:43295", + "issue:43479", + "issue:43644", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:43874", + "issue:43976", + "issue:44060", + "issue:44077", + "issue:44188", + "issue:44206", + "issue:44222", + "issue:44242", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44442", + "issue:44448", + "issue:44483", + "issue:44484", + "issue:44514", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44779", + "issue:44933", + "issue:44964", + "issue:44977", + "issue:45092", + "issue:45103", + "issue:45125", + "issue:45137", + "issue:45276", + "issue:45310", + "issue:45356", + "issue:45406", + "issue:45459" + ], + "soft_pairs": [ + "issue:44964|issue:45137", + "issue:43976|issue:44933", + "issue:42175|issue:44933", + "issue:43784|issue:43824", + "issue:43278|issue:45137", + "issue:44623|issue:45310", + "issue:44188|issue:44373", + "issue:44060|issue:45125", + "issue:44734|issue:45103", + "issue:42898|issue:45356", + "issue:43723|issue:44568", + "issue:45137|issue:45276", + "issue:42898|issue:44779", + "issue:43874|issue:45092", + "issue:44077|issue:44242", + "issue:42898|issue:44448", + "issue:41628|issue:45459", + "issue:44222|issue:44297", + "issue:43479|issue:43644", + "issue:42898|issue:43295", + "issue:44442|issue:45459", + "issue:44222|issue:44623", + "issue:42831|issue:44623", + "issue:44206|issue:45103", + "issue:22355|issue:28282", + "issue:44297|issue:45310", + "issue:41628|issue:42757", + "issue:44483|issue:45406", + "issue:44222|issue:44484", + "issue:44263|issue:44360", + "issue:43723|issue:44977", + "issue:44263|issue:45092", + "issue:44514|issue:45103", + "issue:44295|issue:44623" + ], + "prepared_review_unit_hash": "44cde44f61b66aba35f1964e9338ffb340a6e9acfd71528fb171ec472ba92471", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37829, + "estimated_input_tokens": 9458, + "estimated_eval_tokens": 19172 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose semantic neighborhood of unrelated Transformers issues, not true duplicates. The shared wording (tokenizer, save_pretrained, import, v5 regression, multimodal) is superficial and usually points to different code paths or different models/components.", + "confidence": 0.96, + "canonical_issue_reason": "issue:42898 is the broadest, most general regression in the set and has the clearest cross-cutting impact; it is also more central than the highly model-specific or packaging-only reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42898 is the best representative issue for the cluster because it describes a concrete Transformers v5 behavior regression with wider applicability than the other narrowly scoped bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Different problems: Phi-4 multimodal loading vs a DeepSpeed ZeRO3 deque/indexing failure." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version/backend support issue vs an import name missing from image_utils; unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Packaging/backend dependency omission vs missing image_utils import; not the same bug." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Sentence-transformers import NameError vs missing Qwen2.5-VL class export; different failure modes." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Training/eval dtype mismatch vs DeepSpeed ZeRO3 empty deque error; unrelated code paths." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Processor save_pretrained file emission bug vs Qwen3.5 MoE from_pretrained regression." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "Attention kernel divergence under torch.compile vs a docstring issue; not a duplicate." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Incorrect tied-weights warning vs missing _tp_plan for tensor parallelism; separate model bugs." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Serving KV-cache indexing crash vs auto_docstring crash from future annotations; different subsystems." + }, + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Tokenizer cleanup behavior change vs Kimi codec handling / warning regression; not the same underlying bug." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "AutoTokenizer loading failure vs BOS/EOS not being added; both tokenizer-related but distinct." + }, + { + "left": "issue:45137", + "right": "issue:45276", + "accept": false, + "reason": "ZeRO3 deque/index failure vs resize_token_embeddings not updating Gemma4 embeddings; unrelated." + }, + { + "left": "issue:42898", + "right": "issue:44779", + "accept": false, + "reason": "clean_up_tokenization_spaces regression vs Deepseek tokenizer output regression; different tokenizer bugs." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "Missing image-patch method vs remote-code/meta-init incompatibility; different multimodal issues." + }, + { + "left": "issue:44077", + "right": "issue:44242", + "accept": false, + "reason": "patchtsmixer post_init API restriction vs MoE load-balancing loss omission; unrelated." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "Tokenization cleanup behavior change vs Pegasus output mismatch; not the same defect." + }, + { + "left": "issue:41628", + "right": "issue:45459", + "accept": false, + "reason": "Missing AutoImageProcessor import vs protobuf-related tokenizer error masking; different import paths." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "FP8 save_pretrained/MoE issue vs tokenizer_class metadata mismatch; unrelated." + }, + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Phi4MultimodalConfig default initialization bug vs non-persistent buffer junk in v5; different layers of the stack." + }, + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "Tokenization cleanup regression vs processor.tokenizer / image passing regression; separate processor changes." + }, + { + "left": "issue:44442", + "right": "issue:45459", + "accept": false, + "reason": "FastSpeech2ConformerTokenizer loading failure vs hidden tokenizer errors when protobuf is absent; not a duplicate." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "FP8 save_pretrained/MoE bug vs processor.save_pretrained missing files; different save paths and objects." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "FineGrainedFP8 accuracy problem vs processor.save_pretrained file omission; unrelated." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "Unsupported center argument in feature extractor vs auto_docstring annotations crash; different failures." + }, + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Missing transformers.onnx module import vs AutoModel requiring PyTorch; not the same import error." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Tokenizer config metadata mismatch vs Qwen3.5 MoE from_pretrained error; separate issues." + }, + { + "left": "issue:41628", + "right": "issue:42757", + "accept": false, + "reason": "AutoImageProcessor import failure vs is_offline_mode import failure from huggingface_hub; unrelated imports." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "chat/completions request handling vs Gemma4Processor missing _tokenizer; different serve failures." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "FP8 save_pretrained/MoE regression vs max_shard_size default question; not a duplicate." + }, + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "torch.split return values issue vs DSA indexer lacking ReLU; same area, but different concrete bug." + }, + { + "left": "issue:44295", + "right": "issue:45459", + "accept": false, + "reason": "position_ids buffer read error vs protobuf import masking tokenizer errors; unrelated." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "GlmMoeDsaIndexer split behavior vs InternVL2 meta-initialization incompatibility; different components." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "Batched apply_chat_template padding bug vs auto_docstring future-annotations crash; unrelated." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "buffer registration/read error vs missing processor save files; different bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 33, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:30990", + "issue:33357", + "issue:38175", + "issue:39401", + "issue:41628", + "issue:42371", + "issue:42757", + "issue:42831", + "issue:42886", + "issue:42907", + "issue:42994", + "issue:43066", + "issue:43122", + "issue:43232", + "issue:43408", + "issue:43475", + "issue:43479", + "issue:43576", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44117", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44373", + "issue:44442", + "issue:44485", + "issue:44488", + "issue:44514", + "issue:44521", + "issue:44704", + "issue:44743", + "issue:44871", + "issue:44913", + "issue:44945", + "issue:44991", + "issue:45092", + "issue:45245", + "issue:45278", + "issue:45290", + "issue:45310", + "issue:45357" + ], + "soft_pairs": [ + "issue:43479|issue:44913", + "issue:43723|issue:44871", + "issue:44373|issue:44485", + "issue:44261|issue:45092", + "issue:43723|issue:44991", + "issue:30990|issue:38175", + "issue:43723|issue:44442", + "issue:45092|issue:45357", + "issue:42886|issue:42994", + "issue:42831|issue:45310", + "issue:41628|issue:43824", + "issue:42831|issue:44038", + "issue:43576|issue:43784", + "issue:43122|issue:44521", + "issue:42757|issue:45278", + "issue:44263|issue:44373", + "issue:44117|issue:44704", + "issue:42886|issue:43066", + "issue:43122|issue:44514", + "issue:30990|issue:33357", + "issue:44704|issue:45290", + "issue:43232|issue:45245", + "issue:42371|issue:44263", + "issue:42907|issue:44945", + "issue:43232|issue:44743", + "issue:44295|issue:44297", + "issue:43976|issue:44945", + "issue:39401|issue:44488", + "issue:42907|issue:43937", + "issue:43408|issue:43475" + ], + "prepared_review_unit_hash": "09652ddf11c18f0946e8e7e087db6ed28985595bd08192c20c6f5ab5045f217e", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 35658, + "estimated_input_tokens": 8915, + "estimated_eval_tokens": 18086 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose theme of Transformers v5 regressions, but the paired items point to different concrete bugs, models, or API surfaces. I would not merge any of the soft-edge pairs as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "Issue 43723 is the best representative issue for the cluster because it captures the broad tokenizer-loading regression theme seen in several nearby reports, even though the overall cluster is not a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "43723 is the most generally representative issue title in this set for v5 loading regressions, especially around tokenizer / from_pretrained behavior.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Different bugs: default sub-config initialization vs rotary_pct not persisting on reload." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Both involve config/loading behavior, but one is a tokenizer-loading regression and the other is an eos_token_id inconsistency in Gemma-3." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Docstring fix for position_ids is unrelated to GLM-5 RoPE implementation behavior." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Different model families and code paths: MLA layernorm epsilon precision issue vs InternVL2 meta-init compatibility." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer load failures, but the reports describe different models and likely different root causes." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence Transformers loading hang and SIGLIP2 zero probabilities are unrelated." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "AutoTokenizer failing for a specific tokenizer is not the same as the broader tokenizer-loading regression in 43723." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen-related, but one is meta initialization incompatibility and the other is save_pretrained writing wrong visual encoder keys." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Offline cache tokenizer loading and quantized model saving are different operations and failure modes." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "FineGrainedFP8 accuracy regression is not the same bug as Qwen3.5 from_pretrained loading failure." + }, + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "Different missing imports for different symbols; same broad API-surface area, but not the same underlying bug." + }, + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "These are different Qwen/transformers issues with different symptoms and likely different fixes." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "transformers env command and sentence-transformers import NameError are unrelated." + }, + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Tokenization differences across versions and all-zero assistant masks are distinct behaviors in different code paths." + }, + { + "left": "issue:42757", + "right": "issue:45278", + "accept": false, + "reason": "A missing huggingface_hub symbol and a broad set of import errors after upgrade are not the same concrete issue." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "torch.split return handling in GlmMoeDsaIndexer is unrelated to a wrong docstring for position_ids." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "TOKENIZER_MAPPING_NAMES returning None and AutoProcessor kwargs forwarding are different loading bugs." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Offline tokenizer cache loading is unrelated to wrong tokenizer decoder type in v5." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "General tokenization changes are not the same as batched chat template crash with padding=False." + }, + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "Sentence Transformers loading hang and MacOS bus error on CLIP model are unrelated failures." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "AutoProcessor kwargs forwarding and chat_template crash with tool-call messages affect different code paths." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Generation kwargs update logic and category-cardinality runtime error are unrelated." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "TF32 settings guidance and GlmMoeDsaIndexer split behavior are unrelated." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Saving dequantized models and incorrect output under pipeline parallelism are different bugs." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both touch generation/caching, but one is a sync_gpus kwargs issue and the other is recurrent state reset in modular_qwen3_5.py." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "Reading position_ids after buffer registration and tokenizer_class mismatch on save_pretrained are unrelated." + }, + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Python version compatibility and pipeline-parallel output correctness are not the same issue." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Offset_mapping bug in Qwen3 tokenizer and failure to load a specific model checkpoint are not the same underlying problem." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Quantized model saving and invalid GenerationConfig for GLM-5 are unrelated." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "Model-type warning on load and missing fpn_position_embeddings are different SAM 3 issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 34, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:30990", + "issue:36296", + "issue:38617", + "issue:39401", + "issue:41628", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:42757", + "issue:42831", + "issue:42907", + "issue:43064", + "issue:43116", + "issue:43122", + "issue:43278", + "issue:43441", + "issue:43452", + "issue:43550", + "issue:43673", + "issue:43784", + "issue:43824", + "issue:43937", + "issue:43992", + "issue:44038", + "issue:44222", + "issue:44261", + "issue:44295", + "issue:44297", + "issue:44351", + "issue:44360", + "issue:44373", + "issue:44485", + "issue:44521", + "issue:44625", + "issue:44704", + "issue:44779", + "issue:44829", + "issue:44912", + "issue:44945", + "issue:44977", + "issue:45072", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45446", + "issue:45478" + ], + "soft_pairs": [ + "issue:44625|issue:44829", + "issue:29127|issue:30990", + "issue:41628|issue:45278", + "issue:44360|issue:44485", + "issue:43116|issue:45200", + "issue:39401|issue:44521", + "issue:44038|issue:44297", + "issue:43122|issue:44779", + "issue:44261|issue:44373", + "issue:42371|issue:42831", + "issue:44038|issue:44295", + "issue:42907|issue:43452", + "issue:43992|issue:44704", + "issue:36296|issue:43116", + "issue:44038|issue:44222", + "issue:43122|issue:44977", + "issue:43278|issue:44945", + "issue:45216|issue:45478", + "issue:43824|issue:44351", + "issue:42907|issue:44222", + "issue:42757|issue:44351", + "issue:42907|issue:44912", + "issue:42222|issue:43441", + "issue:43784|issue:45446", + "issue:42673|issue:45072", + "issue:43064|issue:43673", + "issue:43550|issue:43784", + "issue:38617|issue:45278", + "issue:22355|issue:29127", + "issue:43937|issue:44485" + ], + "prepared_review_unit_hash": "02fef8d29295dea7d9c150fa3e082e69e8aaee1c808fe7307d971b1d6117ea8d", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36239, + "estimated_input_tokens": 9060, + "estimated_eval_tokens": 18376 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No true duplicate pairs stand out here; this is a noisy mix of unrelated Transformers bugs. I selected the broadest umbrella issue as the best representative of the cluster.", + "confidence": 0.62, + "canonical_issue_reason": "Issue 45278 is the broadest umbrella in the set, covering multiple import errors after upgrading to 5.5.0, so it is the closest representative issue for this noisy cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45278 is the most general and cluster-representative report; it can subsume several of the import-error style items better than the more specific bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "Different model/task bugs: num_labels propagation vs flash-attention training collapse." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "Different subsystems and symptoms: layoutlmv3 error messaging vs sentence-transformers loading hang." + }, + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "Both are import-related, but one is a specific missing symbol and the other is a broad multi-import regression; not the same concrete bug." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "Unrelated topics: DSA indexer/ReLU vs GLM-5 RoPE implementation." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Different functionality: example-script multi-label empty results vs Gemma 4 multimodal token-id defaults." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Different tokenizer bugs: offset_mapping for Qwen3 vs multimodal assistant_masks in apply_chat_template." + }, + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "One is a Qwen3-VL-Moe model issue; the other is tokenizer save metadata mismatch. Different bugs." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenization regressions, but they affect different models and likely different code paths." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "Precision/config bug vs a docstring issue; not the same underlying change." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 API guidance and FineGrainedFP8 accuracy are unrelated." + }, + { + "left": "issue:44038", + "right": "issue:44295", + "accept": false, + "reason": "Different failures: Qwen3-VL-Moe compatibility vs position_ids buffer access." + }, + { + "left": "issue:42907", + "right": "issue:43452", + "accept": false, + "reason": "Both involve save/load workflows, but one is dequantized MoE saving and the other is gguf_file loading with Auto* APIs." + }, + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "Different call sites and bugs: missing embed_tokens.weight in UMT5 vs kwargs not forwarded in AutoProcessor." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor-parallel training bug is unrelated to the multi-label classification example issue." + }, + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Both touch MoE/FP8 areas, but one is a general Qwen3-VL-Moe issue and the other is an FP8 save_pretrained bug; not clearly the same bug." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but they concern different models and do not look like one concrete fix." + }, + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Embedding dtype mismatch during evaluate is unrelated to incorrect outputs under pipeline parallelism." + }, + { + "left": "issue:45216", + "right": "issue:45478", + "accept": false, + "reason": "Same model family, but one is a save_pretrained regression and the other is a from_pretrained load error; different code paths." + }, + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "Two separate import errors for different symbols; broad API breakage, but not the same underlying bug." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both are save_pretrained-related MoE/quantization reports, but the model and failure modes differ too much to treat as duplicates." + }, + { + "left": "issue:42757", + "right": "issue:44351", + "accept": false, + "reason": "Different missing imports from different packages/symbols." + }, + { + "left": "issue:42907", + "right": "issue:44912", + "accept": false, + "reason": "Save/dequantize issues versus MXFP4 load fallback are different problems." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "Vitpose model breakage is unrelated to Ministral-3 FlashAttention failures." + }, + { + "left": "issue:43784", + "right": "issue:45446", + "accept": false, + "reason": "Importing sentence-transformers NameError is unrelated to the flex_attention version-check bug." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "VRAM leak in Qwen3ForCausalLM threads is unrelated to dtype mismatches in inference." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Different training/runtime areas: FSDP2 optimizer state corruption vs generation cache missing in chunked prefill." + }, + { + "left": "issue:43550", + "right": "issue:43784", + "accept": false, + "reason": "torch.compile/SDPA model failure is unrelated to the sentence-transformers import NameError." + }, + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "Both are import errors, but one is a specific missing internal symbol and the other is a broad upgrade regression; not a duplicate pair." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "No overlap in bug area: ONNX import failure vs LayoutLMv3 error-message clarity." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "GLM-5 generation config validation and GLM-5 RoPE implementation are related only by model name, not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 35, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:33453", + "issue:38617", + "issue:39401", + "issue:41628", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:43116", + "issue:43408", + "issue:43421", + "issue:43450", + "issue:43493", + "issue:43577", + "issue:43582", + "issue:43742", + "issue:43825", + "issue:43827", + "issue:43874", + "issue:43906", + "issue:43931", + "issue:43937", + "issue:43976", + "issue:43986", + "issue:44038", + "issue:44075", + "issue:44117", + "issue:44220", + "issue:44222", + "issue:44261", + "issue:44291", + "issue:44410", + "issue:44484", + "issue:44496", + "issue:44779", + "issue:44829", + "issue:44936", + "issue:44945", + "issue:44977", + "issue:45071", + "issue:45081", + "issue:45083", + "issue:45216", + "issue:45310", + "issue:45405", + "issue:45446", + "issue:45478", + "issue:45542" + ], + "soft_pairs": [ + "issue:43116|issue:43493", + "issue:42371|issue:43874", + "issue:44075|issue:44936", + "issue:44222|issue:45478", + "issue:43116|issue:43825", + "issue:44038|issue:45478", + "issue:43976|issue:45446", + "issue:39401|issue:44779", + "issue:43931|issue:44945", + "issue:33453|issue:39401", + "issue:45083|issue:45446", + "issue:43937|issue:44977", + "issue:43827|issue:45446", + "issue:42673|issue:43906", + "issue:44291|issue:45071", + "issue:43450|issue:43986", + "issue:45405|issue:45446", + "issue:44829|issue:45216", + "issue:44484|issue:45310", + "issue:39401|issue:43421", + "issue:38617|issue:41628", + "issue:44117|issue:45081", + "issue:42673|issue:43582", + "issue:44410|issue:45310", + "issue:43408|issue:43742", + "issue:42673|issue:43577", + "issue:42222|issue:45542", + "issue:38617|issue:44220", + "issue:42371|issue:44261", + "issue:43937|issue:44496" + ], + "prepared_review_unit_hash": "c4d9d1f92af415baa20bd048b2ecb62fce42c9b599670e7a70da04b83a226089", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 36030, + "estimated_input_tokens": 9008, + "estimated_eval_tokens": 18272 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a false-positive grouping: the items share broad themes like tokenizers, Qwen, loading, or v5 regressions, but the concrete bugs and code paths are different. No soft edge looks safe to merge, and there are no PRs in the set.", + "confidence": 0.83, + "canonical_issue_reason": "No single canonical issue emerges; the set is a loose mix of unrelated bugs, so any canonical choice would be arbitrary.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced, issue:45216 is the closest thing to a representative item because it is a concrete version-bound regression report, but it does not unify the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "Different bugs and models: example script multi-label output vs SigLIP2 implementation discrepancy." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "Unrelated topics: TF32 settings vs a missing GLM46V image-processor method." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "Different failure modes: optimizer args ignored vs trainer.evaluate() failing after train()." + }, + { + "left": "issue:44222", + "right": "issue:45478", + "accept": false, + "reason": "Both mention MoE, but one is FP8 save_pretrained and the other is a from_pretrained loading error; not the same concrete bug." + }, + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "Example-script classification bug vs pipeline deprecation/error-message issue; unrelated paths." + }, + { + "left": "issue:44038", + "right": "issue:45478", + "accept": false, + "reason": "Same family name only; the reported symptoms and likely code paths are different." + }, + { + "left": "issue:43976", + "right": "issue:45446", + "accept": false, + "reason": "Python version compatibility issue vs a PyTorch version check bug; unrelated." + }, + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Both tokenizer issues, but different models and distinct symptoms (offset mapping vs incorrect results)." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Model weight-shape mismatch vs pipeline parallelism producing wrong outputs; different problems." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Generic tokenizer loading regression vs Qwen3 offset_mapping bug; not the same underlying defect." + }, + { + "left": "issue:45083", + "right": "issue:45446", + "accept": false, + "reason": "Feature-extraction helper behavior vs a flex_attention import/version check; unrelated." + }, + { + "left": "issue:43937", + "right": "issue:44977", + "accept": false, + "reason": "GenerationConfig validation error vs flash-attention generation malfunction; different code paths." + }, + { + "left": "issue:43827", + "right": "issue:45446", + "accept": false, + "reason": "Docs still mentioning pipeline() vs a runtime import/version check bug; unrelated." + }, + { + "left": "issue:42673", + "right": "issue:43906", + "accept": false, + "reason": "VRAM leak in multithreaded inference vs an isolated reproduction of a different issue; no evidence they are the same bug." + }, + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "init_empty_weights/_is_hf_initialized TypeError vs PretrainedConfig type-checking breakage; different failures." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "Batched video processor shape bug vs crash from missing torchvision during AutoProcessor loading; unrelated." + }, + { + "left": "issue:45405", + "right": "issue:45446", + "accept": false, + "reason": "Released-version bump concern vs PyTorch import/version check bug; unrelated." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "Training degeneracy under flash_attention_3 vs a save_pretrained checkpoint regression; different behaviors." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "save_pretrained shard-size question vs Qwen3.5 MoE from_pretrained error; unrelated." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Qwen3 offset mapping bug vs runtime post-processor update behavior; related area only, not same bug." + }, + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Different import errors with different missing symbols and likely different fixes." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer registry None handling vs Mistral regex patch crash; different root causes." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "VRAM leak in model use vs Apple Silicon TypeError in warmup helper; unrelated." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "Missing projections in qwen3next vs Qwen3.5 MoE loading error; not the same defect." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "SAM3 tracker/video model warning vs MobileLLM loading key error; unrelated." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "VRAM leak vs incorrect dtype propagation in Blip2 loading; different bugs." + }, + { + "left": "issue:42222", + "right": "issue:45542", + "accept": false, + "reason": "VitPose model breakage vs undefined tf backend error; no connection." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "ImportError for a config symbol vs an audio feature-extraction helper issue; unrelated." + }, + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 behavior settings vs missing rms_norm_eps in MLA layernorm; different subsystems." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "Invalid generation config vs unrecognized model/config loading failure; not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 36, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42617", + "issue:42757", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43097", + "issue:43122", + "issue:43335", + "issue:43408", + "issue:43441", + "issue:43452", + "issue:43531", + "issue:43673", + "issue:43874", + "issue:43931", + "issue:43976", + "issue:43986", + "issue:44038", + "issue:44117", + "issue:44261", + "issue:44263", + "issue:44315", + "issue:44368", + "issue:44410", + "issue:44483", + "issue:44484", + "issue:44485", + "issue:44493", + "issue:44704", + "issue:44829", + "issue:44933", + "issue:44987", + "issue:45003", + "issue:45071", + "issue:45103", + "issue:45245", + "issue:45278", + "issue:45341", + "issue:45372", + "issue:45375", + "issue:45459", + "issue:45464", + "issue:45478", + "issue:45542" + ], + "soft_pairs": [ + "issue:44483|issue:45464", + "issue:42175|issue:42222", + "issue:42617|issue:45341", + "issue:44933|issue:45278", + "issue:44484|issue:45478", + "issue:43066|issue:43122", + "issue:43673|issue:45341", + "issue:43097|issue:44493", + "issue:42222|issue:45459", + "issue:44410|issue:45478", + "issue:43874|issue:44261", + "issue:43441|issue:44829", + "issue:43986|issue:44484", + "issue:43012|issue:43408", + "issue:44263|issue:44485", + "issue:41628|issue:45071", + "issue:45071|issue:45542", + "issue:45003|issue:45341", + "issue:43097|issue:43452", + "issue:43986|issue:45341", + "issue:44117|issue:44987", + "issue:43931|issue:44410", + "issue:43335|issue:45071", + "issue:43408|issue:44368", + "issue:43931|issue:44038", + "issue:43874|issue:44263", + "issue:44829|issue:45245", + "issue:43986|issue:45372", + "issue:43976|issue:45071", + "issue:43531|issue:45341", + "issue:43064|issue:44315", + "issue:38617|issue:42757", + "issue:44261|issue:44485", + "issue:44410|issue:45375", + "issue:44704|issue:45103" + ], + "prepared_review_unit_hash": "7407466d1c0568032398e29de58392d79961549310deaeff42c48d9fb4813de7", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38050, + "estimated_input_tokens": 9513, + "estimated_eval_tokens": 19282 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a set of similarly worded but distinct regressions/import failures. I would not merge any of the soft-edge pairs; the best triage anchor is the broad import-regression report, not a true duplicate set.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the broadest, most umbrella-style report of upgrade-related import failures, so it is the best triage anchor, but it does not duplicate the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44483", + "right": "issue:45464", + "accept": false, + "reason": "Both mention chat/completions failures, but one is a generic v5.3 API break and the other is Qwen3.5-0.8B streaming behavior; not the same bug." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "TensorFlow backend packaging vs broken VitPose model files; unrelated problems." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "A script execution failure and a testing utility bug are unrelated." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "A specific missing import from image_utils is not the same as a broad post-upgrade import-error report." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "Save/sharding behavior and Qwen3.5-Moe loading errors are different code paths." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but one is decoder-type metadata and the other is changed tokenization output; too different to merge." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "Generation cache behavior and a testing utility bug are unrelated." + }, + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Removed config field vs unexpected position-id keys are different symptoms and code paths." + }, + { + "left": "issue:42222", + "right": "issue:45459", + "accept": false, + "reason": "VitPose model breakage and protobuf-related tokenizer error masking are unrelated." + }, + { + "left": "issue:44410", + "right": "issue:45478", + "accept": false, + "reason": "Different Qwen model families and different failure modes." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "Image-token counting bug in GLM46V vs RMS-epsilon precision issue in MLA are distinct." + }, + { + "left": "issue:43441", + "right": "issue:44829", + "accept": false, + "reason": "Both involve flash attention, but one is a model-loading failure and the other is degenerate training; not the same concrete bug." + }, + { + "left": "issue:43986", + "right": "issue:44484", + "accept": false, + "reason": "AutoProcessor/torchvision crash is unrelated to max_shard_size behavior." + }, + { + "left": "issue:43012", + "right": "issue:43408", + "accept": false, + "reason": "A numerical warning during bfloat16 compilation is unrelated to a model-type mismatch warning." + }, + { + "left": "issue:44263", + "right": "issue:44485", + "accept": false, + "reason": "Torch.split indexing bug and RoPE implementation concern are separate GLM issues." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "Missing AutoImageProcessor import and PretrainedConfig type-checking regression are unrelated." + }, + { + "left": "issue:45071", + "right": "issue:45542", + "accept": false, + "reason": "Type-checking regression and missing TensorFlow backend wiring are unrelated." + }, + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Unsafe sys.modules access and a testing_utils bug do not describe the same fault." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "Removed tie_embeddings behavior and gguf_file loading failures are different issues." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "Video-model processor crash and testing_utils bug are unrelated." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "Tokenizer mapping returning None may contribute to loading failures, but the model-specific load error is not clearly the same bug." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Weight-shape mismatch for Qwen3-VL vs missing projections in Qwen3Next are different model/code-path problems." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "SwitchTransformers sparse-layer construction and PretrainedConfig type checking are unrelated." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "Model-type warning and tie_word_embeddings warning are different warnings with different causes." + }, + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "Both are Qwen3-VL related, but one is a shape mismatch and the other is a transformers-version/MoE compatibility issue." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "Different GLM bugs: missing image-patch helper vs torch.split return handling." + }, + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "FlashAttention training degradation and category-cardinality limit are unrelated." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "Processor loading without torchvision and mistral_common import failure are unrelated dependency issues." + }, + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "Python-version incompatibility and PretrainedConfig type checking are different regressions." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and a testing utility bug are unrelated." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "Distributed optimizer-state corruption and Liger Kernel application timing are different training issues." + }, + { + "left": "issue:38617", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but they are missing different symbols from different packages." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "RMS-epsilon precision bug and GLM-5 RoPE implementation concern are separate issues." + }, + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Missing projections in Qwen3Next and a missing vision-config field are unrelated." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "Passing kwargs to cached_file and auto_docstring annotation handling are different helper bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 37, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:36683", + "issue:38617", + "issue:42757", + "issue:42886", + "issue:42907", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43097", + "issue:43208", + "issue:43299", + "issue:43352", + "issue:43381", + "issue:43502", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43582", + "issue:43618", + "issue:43761", + "issue:43824", + "issue:43825", + "issue:43867", + "issue:43873", + "issue:43901", + "issue:43906", + "issue:43937", + "issue:43976", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44062", + "issue:44117", + "issue:44230", + "issue:44351", + "issue:44368", + "issue:44380", + "issue:44410", + "issue:44483", + "issue:44484", + "issue:44514", + "issue:44655", + "issue:44683", + "issue:45290", + "issue:45341", + "issue:45381", + "issue:45479", + "issue:45538" + ], + "soft_pairs": [ + "issue:44062|issue:44117", + "issue:43582|issue:45341", + "issue:38617|issue:44351", + "issue:43986|issue:44117", + "issue:44038|issue:44410", + "issue:43502|issue:43992", + "issue:43097|issue:43873", + "issue:43992|issue:44484", + "issue:43986|issue:45290", + "issue:43937|issue:45538", + "issue:43066|issue:43906", + "issue:38617|issue:43097", + "issue:43825|issue:44655", + "issue:43352|issue:44380", + "issue:43097|issue:43901", + "issue:43012|issue:43550", + "issue:43012|issue:44368", + "issue:42886|issue:43381", + "issue:43208|issue:43550", + "issue:42907|issue:44655", + "issue:44380|issue:45479", + "issue:43525|issue:44117", + "issue:43352|issue:43976", + "issue:43064|issue:44380", + "issue:43208|issue:43526", + "issue:44483|issue:44514", + "issue:43618|issue:44380", + "issue:43618|issue:43761", + "issue:36683|issue:43582", + "issue:43012|issue:44683", + "issue:44483|issue:45290", + "issue:42757|issue:43824", + "issue:44038|issue:44230", + "issue:44483|issue:45381", + "issue:38617|issue:43502", + "issue:43299|issue:43867", + "issue:43901|issue:44655" + ], + "prepared_review_unit_hash": "f7a729ad872cee7bf1c3403eab50404b1bf78cd5b8254cc2770efcabd9faa567", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38528, + "estimated_input_tokens": 9632, + "estimated_eval_tokens": 19520 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set does not form a single duplicate cluster; the items span unrelated import errors, model-specific regressions, tokenizer/pipeline issues, and training/runtime bugs. The soft-similarity links are mostly superficial topic overlaps, not the same underlying defect.", + "confidence": 0.97, + "canonical_issue_reason": "No clear canonical issue: the issues are heterogeneous and do not share one concrete bug or fix path.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good global representative because the cluster is not cohesive enough to anchor on one bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "Different failures: AddedToken keyword collision vs tokenizer mapping returning None during from_pretrained." + }, + { + "left": "issue:43582", + "right": "issue:45341", + "accept": false, + "reason": "Unrelated: Apple Silicon caching allocator TypeError vs a testing_utils bug." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and different call paths." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "Different areas: AutoProcessor/video-model crash without torchvision vs tokenizer mapping assumptions." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "Both involve Qwen-family models, but they describe different model loading/projection issues." + }, + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "Different symptoms: unintended API calls with local_files_only vs missing embed_tokens.weight in UMT5Encoder loading." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "Tie-embeddings API removal and quantized offloading behavior are unrelated." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "Model weight loading bug vs shard-size question for save_pretrained; no shared code-path defect." + }, + { + "left": "issue:43986", + "right": "issue:45290", + "accept": false, + "reason": "Both touch chat/processor flows, but one is missing torchvision and the other is a tool-call templating crash." + }, + { + "left": "issue:43937", + "right": "issue:45538", + "accept": false, + "reason": "GenerationConfig validation for GLM-5 is unrelated to CLIPTokenizer max length." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "Wrong tokenizer decoder type is not the same as an isolated reproduction of a different issue." + }, + { + "left": "issue:38617", + "right": "issue:43097", + "accept": false, + "reason": "Different deprecations/import surface: missing configuration_utils symbol vs removed tie_embeddings_and_encoder_decoder." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "Pipeline docs/message issue vs inability to save Pipeline objects; different behavior and fix." + }, + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "FlashAttention support error for a Nemotron model is unrelated to GPT2 attention scaling under SDPA/FlashAttention." + }, + { + "left": "issue:43097", + "right": "issue:43901", + "accept": false, + "reason": "Removed config API vs stale TextClassificationPipeline docs; not the same bug." + }, + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "Different warnings/errors: bf16 compile warning vs Bamba torch.compile SDPA failure." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "Both mention warnings, but one is a PyTorch precision warning and the other is a tie_word_embeddings config warning." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Offline cache loading and gradient checkpointing in eval mode are unrelated." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "xLSTM training bugs and Bamba SDPA compile failure affect different models and code paths." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Saving dequantized models and saving Pipeline objects are different persistence problems." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "Attention scaling backend bug is unrelated to sequence-classification zero-loss behavior." + }, + { + "left": "issue:43525", + "right": "issue:44117", + "accept": false, + "reason": "Missing Llama4Config attribute and tokenizer mapping None are different configuration/loading defects." + }, + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "FlashAttention support for one model is unrelated to Python version compatibility." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "Distributed optimizer-state corruption and GPT2 attention scaling are unrelated." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "xLSTM training bugs and BeitImageProcessorFast label reduction are unrelated." + }, + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "OpenAI-style chat/completions endpoint rejection is not the same as apply_chat_template crashing on batched tool-call input." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "CLIP attentions regression and GPT2 attention scaling are different model-specific issues." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": false, + "reason": "Both are CLIP regressions, but one drops attentions and the other drops hidden_states; not enough evidence of one shared bug." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "Gemma3Config missing vocab_size and Apple Silicon allocator TypeError are unrelated." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "bf16 warning vs compiled flex_attention failure are separate backend/precision issues." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Different request paths: /v1/chat/completions API handling vs tokenizer/template crash on tool-call messages." + }, + { + "left": "issue:42757", + "right": "issue:43824", + "accept": false, + "reason": "Import errors for different missing names from different modules and packages." + }, + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "Qwen3-VL-Moe loading issue and fp8 support request are not the same defect." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "Chat/completions request handling is unrelated to Qwen2.5-VL video vision_position_ids." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "Missing configuration_utils symbol and unwanted API calls despite local_files_only concern different mechanisms." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Qwen3VL-Moe loading regression and sorted-state_dict load error are different model-loading problems." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Docs mismatch for return_all_scores is unrelated to Pipeline save_pretrained behavior." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 38, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:28282", + "issue:33453", + "issue:36010", + "issue:36296", + "issue:36683", + "issue:41669", + "issue:42757", + "issue:42890", + "issue:42994", + "issue:43010", + "issue:43064", + "issue:43066", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43575", + "issue:43618", + "issue:43756", + "issue:43867", + "issue:43931", + "issue:43937", + "issue:43976", + "issue:43992", + "issue:44077", + "issue:44112", + "issue:44188", + "issue:44220", + "issue:44242", + "issue:44246", + "issue:44263", + "issue:44360", + "issue:44380", + "issue:44484", + "issue:44485", + "issue:44492", + "issue:44655", + "issue:44857", + "issue:44938", + "issue:44964", + "issue:45071", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45357", + "issue:45375", + "issue:45381", + "issue:45440", + "issue:45478" + ], + "soft_pairs": [ + "issue:43756|issue:44485", + "issue:42994|issue:44655", + "issue:36683|issue:43475", + "issue:43867|issue:45216", + "issue:44220|issue:44655", + "issue:43066|issue:45381", + "issue:43352|issue:43937", + "issue:44077|issue:44380", + "issue:36683|issue:44077", + "issue:44263|issue:45440", + "issue:43064|issue:44492", + "issue:43976|issue:44938", + "issue:36683|issue:43525", + "issue:36010|issue:42757", + "issue:44188|issue:44380", + "issue:43618|issue:45216", + "issue:36683|issue:44242", + "issue:43992|issue:45310", + "issue:43010|issue:45278", + "issue:36683|issue:43352", + "issue:43352|issue:43931", + "issue:41669|issue:44246", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:43992|issue:45478", + "issue:44938|issue:45071", + "issue:43867|issue:45357", + "issue:43010|issue:43976", + "issue:44938|issue:45381", + "issue:44360|issue:45440", + "issue:43618|issue:44077", + "issue:36296|issue:43575", + "issue:43867|issue:44484", + "issue:43618|issue:44857", + "issue:42890|issue:44964", + "issue:28282|issue:33453", + "issue:42890|issue:44112" + ], + "prepared_review_unit_hash": "aa39ae4710c11f5f1aafc12c005651cef821deefac86a2c357340618d99c93ca", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 37, + "serialized_chars": 38190, + "estimated_input_tokens": 9548, + "estimated_eval_tokens": 19352 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated bug reports across model loading, saving, attention, config, and CI. The soft pairs share surface keywords but not the same concrete defect, so none should be merged as duplicates. No PRs were provided.", + "confidence": 0.79, + "canonical_issue_reason": "No single true duplicate target stands out; if one issue must serve as an umbrella, issue:45278 is the broadest open report about widespread import regressions after an upgrade.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the best global issue candidate only as a broad tracker for import failures, but it is not a real canonical duplicate for this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Different bugs: Smollm3 RoPE-layer mismatch vs a save_pretrained shard-size question." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Both involve saving, but one is quantized model saving and the other is Pipeline.save_pretrained; different code paths." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Missing config attribute in Gemma3 vs missing output field in SAM 3 Video; different models and failures." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "Load-time state_dict ordering error vs a save_pretrained regression; opposite operations and different root causes." + }, + { + "left": "issue:44220", + "right": "issue:44655", + "accept": false, + "reason": "Feature extraction bug vs pipeline saving bug; no shared concrete defect." + }, + { + "left": "issue:43066", + "right": "issue:45381", + "accept": false, + "reason": "Tokenizer decoder type regression vs Qwen2.5-VL video position-id issue; unrelated." + }, + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "FlashAttention support check in Nemotron vs invalid GenerationConfig in GLM-5; different validation failures." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "patchtsmixer post_init policy vs GPT2 attention scaling under SDPA/FlashAttention; different subsystems and bugs." + }, + { + "left": "issue:36683", + "right": "issue:44077", + "accept": false, + "reason": "Different model/config issues: Gemma3 missing vocab_size vs patchtsmixer post_init handling." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "GlmMoeDsaIndexer torch.split issue vs DeepseekV3MoE implementation divergence; not the same defect." + }, + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "FSDP2/PEFT optimizer-state bug vs a typo in cache strategies; no overlap in underlying change." + }, + { + "left": "issue:43976", + "right": "issue:44938", + "accept": false, + "reason": "Both are version-compatibility reports, but one is Python 3.9+ and the other Python 3.14; different compatibility breaks." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Both mention missing config fields, but Gemma3 vocab_size and Llama4 pad_token_id are different models and different attributes." + }, + { + "left": "issue:36010", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but for different symbols from different modules; not the same underlying regression." + }, + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Attention-related, but one is torch.compile branching divergence and the other is GPT2 scaling ignored under backend dispatch." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "CLIPOutput attentions missing and Qwen3.5 save_pretrained regression are unrelated bugs." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "Gemma3 config attribute error vs load-balancing loss omission; different features and code paths." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "UMT5Encoder missing embed_tokens weight vs Qwen3.5 MoE from_pretrained error; different loading failures." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Cache update no_grad decoration request vs broad import errors after upgrade; unrelated." + }, + { + "left": "issue:36683", + "right": "issue:43352", + "accept": false, + "reason": "Different model-specific issues: Gemma3 config field missing vs Nemotron FlashAttention support error." + }, + { + "left": "issue:43352", + "right": "issue:43931", + "accept": false, + "reason": "Unsupported FlashAttention vs weight-shape mismatch during load; distinct bugs." + }, + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "One is about removing import * for startup cost, the other about intermittent import slowness; related topic but not the same defect." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "Sorted state_dict loading error vs missing deepstack_visual_indexes being dropped by strict config handling; different root causes." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "Gemma3 config attribute issue vs FSDP2 optimizer-state corruption on nonzero ranks; unrelated." + }, + { + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "Both are Qwen3.5 loading problems, but they concern different model variants and different failure modes." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Python 3.14 import failure vs PretrainedConfig type-checking regression; not the same bug." + }, + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "One is load failure from sorted state_dict, the other is save_pretrained writing wrong visual encoder keys." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "Cache no_grad API change vs Python version incompatibility; unrelated." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Python 3.14 load failure and Qwen2.5-VL vision_position_ids bug are separate regressions." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "DSA indexer missing ReLU vs DeepseekV3MoE divergence from remote implementation; different functionality." + }, + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "CLIPOutput attentions regression vs patchtsmixer post_init restriction; unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Both mention tensor parallelism, but one is a training correctness bug and the other is expected OOM for a very large model." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "Load error when state_dict is sorted vs a question about max_shard_size default; no shared defect." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "Missing attentions in CLIPOutput vs float16 AMP crash in LwDetrImageLoss; different areas entirely." + }, + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Stale device override CI test vs Phi-4 multimodal load failure; unrelated." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "PyTorch-missing ImportError for AutoModel vs tokenizer-loading regression; not the same issue." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Both mention stale device override tests, but the actual failing models and test cases are different." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 39, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:30990", + "issue:31795", + "issue:33357", + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38175", + "issue:38617", + "issue:42548", + "issue:42757", + "issue:42831", + "issue:42890", + "issue:43010", + "issue:43065", + "issue:43232", + "issue:43299", + "issue:43502", + "issue:43576", + "issue:43618", + "issue:43673", + "issue:43723", + "issue:43824", + "issue:43827", + "issue:43874", + "issue:44062", + "issue:44075", + "issue:44222", + "issue:44230", + "issue:44315", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44373", + "issue:44387", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44655", + "issue:44749", + "issue:44871", + "issue:44961", + "issue:44977", + "issue:45071", + "issue:45125", + "issue:45341", + "issue:45375", + "issue:45478" + ], + "soft_pairs": [ + "issue:43618|issue:44977", + "issue:43502|issue:44062", + "issue:43010|issue:44075", + "issue:42890|issue:45341", + "issue:43723|issue:44556", + "issue:44222|issue:45125", + "issue:44387|issue:44556", + "issue:43010|issue:43065", + "issue:43010|issue:43232", + "issue:42548|issue:44351", + "issue:45125|issue:45478", + "issue:44355|issue:44749", + "issue:45125|issue:45375", + "issue:30990|issue:31795", + "issue:43502|issue:44485", + "issue:44075|issue:44871", + "issue:43010|issue:44368", + "issue:44315|issue:44360", + "issue:42890|issue:44373", + "issue:38175|issue:43502", + "issue:43299|issue:44556", + "issue:43502|issue:44961", + "issue:44355|issue:44655", + "issue:44222|issue:44230", + "issue:44355|issue:45071", + "issue:42831|issue:43874", + "issue:42890|issue:43232", + "issue:33453|issue:34689", + "issue:42757|issue:43576", + "issue:33453|issue:36296", + "issue:43827|issue:44961", + "issue:43576|issue:43824", + "issue:43576|issue:45071", + "issue:43673|issue:44492", + "issue:38617|issue:43576", + "issue:33357|issue:33453" + ], + "prepared_review_unit_hash": "102bd00af91a52fed393764fcd2a8f11a9e55fd5e987b99f836cd37f3d1f048b", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 37891, + "estimated_input_tokens": 9473, + "estimated_eval_tokens": 19202 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a grab-bag of Transformers v5 regressions and unrelated model/test/docs issues. The only clear common thread is broad loading/import breakage, but none of the soft pairs look like true duplicates.", + "confidence": 0.69, + "canonical_issue_reason": "44556 is the broadest representative of the shared theme: checkpoint reload breaks after upgrading from v4.57 to v5.x. It best serves as an umbrella issue for the loading-regression subset.", + "canonical_pr_reason": null, + "best_issue_reason": "44556 is the most globally representative issue in this set because it captures the main v5 migration/loading regression pattern without being overly model-specific.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43618", + "right": "issue:44977", + "accept": false, + "reason": "Different failures: missing CLIPOutput attentions vs Qwen3.5 generation with flash-attention." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "Unrelated bugs: offline-mode network requests vs AddedToken constructor argument collision." + }, + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "Different code paths: cache update no_grad behavior vs SGD optimizer args not being used." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "Both are test-related, but one is flaky seeds in SamHQ integration tests and the other is a bug in testing_utils.py." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Both are loading regressions, but one is tokenizer loading and the other is checkpoint reload after v5 upgrade; not the same concrete bug." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "Different MoE/FP8 issues: save_pretrained behavior vs missing tensor-parallel plan." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "Int4 quantization OOM is a memory/quantization issue, not a checkpoint reload regression." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "Cache update semantics and a dummy Conv2d in Sam3PixelDecoder are unrelated." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Both touch generation utilities, but the specific bugs are different: no_grad on cache update vs sync_gpus kwargs handling." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different symbols and modules." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Same model family, but one is missing _tp_plan and the other is a broader from_pretrained failure; not the same bug." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Compiled-file errors and a Chinese-described performance slowdown are unrelated." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Both are Qwen3.5 MoE-related, but one is tensor-parallel metadata and the other is a missing config field dropped by strict validation." + }, + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Sentence Transformers loading stall and a forward-docstring issue are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Offline network requests and GLM-5 RoPE implementation are different subsystems and symptoms." + }, + { + "left": "issue:44075", + "right": "issue:44871", + "accept": false, + "reason": "Optimizer argument handling and Gemma eos_token_id config mismatch are unrelated." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "Cache update decoration and tie_word_embeddings warning are different issues." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Liger Kernel application in model_init and DSA indexer ReLU are unrelated." + }, + { + "left": "issue:42890", + "right": "issue:44373", + "accept": false, + "reason": "Test seed flakiness and a wrong docstring are not the same defect." + }, + { + "left": "issue:38175", + "right": "issue:43502", + "accept": false, + "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to local_files_only network access." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Both are load regressions, but for different model families and code paths." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "Local-files-only network leakage and the unrelated 'racoon' issue do not match." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Compiled Python-file errors and pipeline save_pretrained support are unrelated." + }, + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "Both mention FP8/MoE, but one is save_pretrained and the other is inference support; different bugs." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "Compiled-file runtime errors and PretrainedConfig type-checking are unrelated." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "FineGrainedFP8 accuracy drift and a missing image-processor method are different defects." + }, + { + "left": "issue:42890", + "right": "issue:43232", + "accept": false, + "reason": "Flaky tests due to seeding and generation kwargs after sync_gpus are unrelated." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Both are model-loading regressions, but for different models and different failure modes." + }, + { + "left": "issue:42757", + "right": "issue:43576", + "accept": false, + "reason": "ImportError for is_offline_mode and a broken transformers env command are unrelated." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading regression and tensor-parallel training bug are not the same issue." + }, + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Docs still referencing pipeline() and 'racoon' are unrelated." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "Broken env command and missing Qwen2_5_VLForConditionalGeneration import are different failures." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "Generation cache missing in chunked_prefill and a cache-strategy typo are not the same bug." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "Importing layer_type_validation and the env command failure are unrelated." + }, + { + "left": "issue:33357", + "right": "issue:33453", + "accept": false, + "reason": "Different regressions: MacOS bus error on CLIP vs tokenizer loading regression." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 40, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:31795", + "issue:33453", + "issue:36296", + "issue:38617", + "issue:41628", + "issue:41669", + "issue:42371", + "issue:42548", + "issue:42890", + "issue:43012", + "issue:43352", + "issue:43408", + "issue:43526", + "issue:43576", + "issue:43673", + "issue:43901", + "issue:43976", + "issue:44038", + "issue:44077", + "issue:44246", + "issue:44261", + "issue:44273", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44704", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:44961", + "issue:45003", + "issue:45071", + "issue:45092", + "issue:45125", + "issue:45230", + "issue:45341", + "issue:45468", + "issue:45478", + "issue:45542" + ], + "soft_pairs": [ + "issue:44961|issue:45468", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:41628|issue:42548", + "issue:36296|issue:43901", + "issue:44485|issue:44961", + "issue:36296|issue:43526", + "issue:36296|issue:43012", + "issue:44273|issue:45003", + "issue:41628|issue:44355", + "issue:44261|issue:44360", + "issue:44273|issue:45542", + "issue:44623|issue:45230", + "issue:43576|issue:44556", + "issue:41669|issue:43673", + "issue:44393|issue:45071", + "issue:44038|issue:45125", + "issue:44556|issue:45092", + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44704|issue:44908", + "issue:44246|issue:44273", + "issue:43408|issue:44315", + "issue:44297|issue:45230", + "issue:43576|issue:44908", + "issue:43352|issue:44393", + "issue:44908|issue:45071", + "issue:43976|issue:44393", + "issue:44295|issue:45230", + "issue:41669|issue:44351", + "issue:41669|issue:45341", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:44315|issue:44829", + "issue:44393|issue:45478", + "issue:44077|issue:44861", + "issue:31795|issue:33453" + ], + "prepared_review_unit_hash": "ae419bdddcfe4001e061f812d8f51e72be5ffe7a7fd5aa86252487dc5e24613b", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 38, + "serialized_chars": 38384, + "estimated_input_tokens": 9596, + "estimated_eval_tokens": 19448 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: it mixes import/export regressions, lazy-loading/performance problems, model-specific bugs, docs issues, and unrelated training/runtime errors. None of the soft pairs look like the same concrete bug or change.", + "confidence": 0.91, + "canonical_issue_reason": "No single issue is representative; the items do not form one duplicate set and span unrelated subsystems and failure modes.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good global canonical for this cluster because the themes are too diverse and the pairwise similarities are mostly superficial.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44961", + "right": "issue:45468", + "accept": false, + "reason": "Unrelated titles: a nonsensical issue title vs a Gemma-4 audio positional-encoding bug." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Documentation confusion about forward args vs an import error for a missing symbol." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "Both involve v5 imports, but one is a broken env command and the other is a missing cache class export." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Completely unrelated: scheduler kwargs vs an unrelated placeholder issue title." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "Both are import errors, but for different top-level symbols and likely different missing exports." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "Tensor-parallel training bug vs pipeline docs mismatch; different subsystem and failure mode." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "RoPE implementation discussion is unrelated to the placeholder issue." + }, + { + "left": "issue:36296", + "right": "issue:43526", + "accept": false, + "reason": "Tensor parallelism bug vs BeitImageProcessorFast label-reduction bug." + }, + { + "left": "issue:36296", + "right": "issue:43012", + "accept": false, + "reason": "Training parallelism bug vs bfloat16 compilation warning; no shared code-path." + }, + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Both mention loading/import behavior, but one is a vague lazy-loading failure and the other is a specific sys.modules access bug; not clearly the same defect." + }, + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "Importing a symbol from transformers vs errors in compiled Python files; unrelated." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "MLA RMS norm epsilon precision issue vs a DSA indexer ReLU omission." + }, + { + "left": "issue:44273", + "right": "issue:45542", + "accept": false, + "reason": "Lazy-loading problem vs undefined TF backend when only tensorboard is installed." + }, + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "Processor save_pretrained missing files vs a generic bug report with no concrete overlap." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "v5 env-command breakage vs checkpoint reload incompatibility; similar release era, but different concrete failures." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "Import-time slowdown from model imports vs missing GenerationMixin cache in chunked prefill." + }, + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Qwen3-VL bbox output issue vs PretrainedConfig type-checking regression; different code paths." + }, + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Both mention Qwen3-Moe, but one is a general v5/Qwen3-VL-Moe bug and the other is a missing tensor-parallel plan for Qwen3_5Moe." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both are checkpoint/loading compatibility issues, but one is generic version upgrade reload failure and the other is remote-code/meta-init incompatibility; too broad to call duplicates." + }, + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 API guidance vs a typo in cache strategies." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Test flakiness from missing set_seed vs scheduler kwargs handling." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "AutoProcessor cached_file kwargs bug vs scheduler kwargs bug; different APIs and behavior." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": false, + "reason": "Import performance complaint vs lazy-loading failure; related theme, not the same bug." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "Model-type warning in SAM3 vs Liger Kernel not applied with model_init." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "Tokenizer save_pretrained mismatch vs generic bug report." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash Attention 2 unsupported in Nemotron vs Qwen3-VL bbox hallucination/error." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility issue vs a vision-language output bug." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "position_ids buffer access error vs generic bug report." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Liger Kernel application problem vs degenerate training with flash_attention_3; distinct training issues." + }, + { + "left": "issue:44351", + "right": "issue:44861", + "accept": false, + "reason": "Missing HybridCache export vs tied-weights key AttributeError." + }, + { + "left": "issue:44360", + "right": "issue:45003", + "accept": false, + "reason": "DSA indexer implementation issue vs sys.modules access in modeling_utils." + }, + { + "left": "issue:44945", + "right": "issue:44961", + "accept": false, + "reason": "Pipeline parallelism output corruption vs placeholder issue title." + }, + { + "left": "issue:45003", + "right": "issue:45071", + "accept": false, + "reason": "Unsafe sys.modules access vs PretrainedConfig typing regression." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import-star performance regression vs a typo in cache strategy docs/code." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Docs confusion in model.forward vs tokenizer loading regression; no shared bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 41, + "review_unit_count": 41, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-364", + "nodes": [ + "issue:31515", + "issue:31795", + "issue:33453", + "issue:34689", + "issue:36296", + "issue:36683", + "issue:37428", + "issue:38617", + "issue:41669", + "issue:42371", + "issue:42890", + "issue:42913", + "issue:43066", + "issue:43298", + "issue:43502", + "issue:43519", + "issue:43525", + "issue:43572", + "issue:43576", + "issue:43606", + "issue:43618", + "issue:43704", + "issue:43723", + "issue:43827", + "issue:44230", + "issue:44261", + "issue:44263", + "issue:44273", + "issue:44315", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44655", + "issue:44861", + "issue:44908", + "issue:44998", + "issue:45092", + "issue:45230", + "issue:45290", + "issue:45310", + "issue:45341", + "issue:45468" + ], + "soft_pairs": [ + "issue:43827|issue:44908", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:44655|issue:44861", + "issue:43576|issue:44273", + "issue:44360|issue:45468", + "issue:45230|issue:45310", + "issue:44273|issue:44908", + "issue:41669|issue:43704", + "issue:43519|issue:44485", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31795|issue:37428", + "issue:44315|issue:45092", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704", + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:42890|issue:43704", + "issue:42913|issue:43066", + "issue:44908|issue:44998", + "issue:44861|issue:45290", + "issue:44998|issue:45341", + "issue:44492|issue:44998", + "issue:43618|issue:44998", + "issue:44556|issue:44998", + "issue:43525|issue:44861", + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:31515|issue:31795", + "issue:44230|issue:45310", + "issue:31515|issue:33453", + "issue:31515|issue:36683", + "issue:43298|issue:44230" + ], + "prepared_review_unit_hash": "91b8a4b00e517b7f2d11a8fcd7dd8383ff091f008d7a742f64cbed4a58c7a950", + "component_budget": { + "node_count": 364, + "item_count": 364, + "soft_pair_count": 1360, + "serialized_chars": 701640, + "estimated_input_tokens": 175410, + "estimated_eval_tokens": 351076 + }, + "budget": { + "node_count": 45, + "item_count": 45, + "soft_pair_count": 41, + "serialized_chars": 37507, + "estimated_input_tokens": 9377, + "estimated_eval_tokens": 19010 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a noisy mix of Transformers v5 loading/tokenizer/config regressions and several unrelated model-specific bugs. There is no strong exact-duplicate pair; the best anchor is a broad checkpoint reload compatibility issue, but confidence is low-moderate.", + "confidence": 0.43, + "canonical_issue_reason": "Issue 44556 is the broadest, clearest representative of the main cluster theme: checkpoint reload incompatibility after upgrading to v5. It is more general than the model-specific loading reports and closer to a canonical tracker issue.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44556 is the best single issue to anchor this set because it captures the overarching v4-to-v5 loading regression theme that recurs across several nearby reports, even though the cluster itself is heterogeneous.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Different subsystems: pipeline-removal docs issue vs scheduler keyword handling." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "Import-star performance/cleanup issue is unrelated to inverse_sqrt scheduler kwargs." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading regression and Qwen3-VL bbox output bug are different code paths." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Saving Pipeline objects and tied-weight-key crash are separate save-path bugs." + }, + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Broken env command and lazy loading failure are different features and failure modes." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "Unrelated model internals: DSA indexer ReLU vs Gemma audio positional encoding." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "Generic bug report vs concrete Qwen3.5 MoE from_pretrained error; not the same bug." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading problem is unrelated to lr_scheduler_kwargs handling." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "Model import cleanup and VRAM leak in dataloader threads are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Timestamp calculation in Qwen3VL and GLM-5 RoPE implementation are different model-specific bugs." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Timestamp calculation and scheduler kwargs are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Doc confusion in forward args does not match a model-loading regression." + }, + { + "left": "issue:31795", + "right": "issue:37428", + "accept": false, + "reason": "Documentation issue and flash-attention import error are unrelated." + }, + { + "left": "issue:44315", + "right": "issue:45092", + "accept": false, + "reason": "Both involve model creation/loading, but Liger kernel application and remote-code meta-init incompatibility are distinct code paths." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak in multi-threaded inference is unrelated to RoPE implementation." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "Both mention positional-style internals, but they are different models and different bugs." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 API guidance and VRAM leak are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "Local-files-only network leakage and timestamp math are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Timestamp calculation bug and processor.save_pretrained missing files are different features." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Different model bugs: timestamp math vs VRAM leak." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return handling and Gemma audio encoding are unrelated." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "Missing rms_norm_eps in MLA and Gemma audio positional encoding are different implementation bugs." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch is unrelated to Gemma audio positional encoding." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "Flaky integration test seeding and VRAM leak are unrelated." + }, + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both are tokenizer-related v5 reports, but they describe different symptoms and likely different fixes." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "Scheduler kwargs bug and unrelated placeholder issue." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "Tied-weight-key crash and chat template tool-call crash are distinct code paths." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "Placeholder unemployment issue is unrelated to testing_utils." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "Cache-strategy typo and unrelated placeholder issue." + }, + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "CLIPOutput attentions regression is unrelated to the placeholder issue." + }, + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Checkpoint reload regression and unrelated placeholder issue." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "FP8 inference support request and processor.save_pretrained missing files are unrelated." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "Checkpoint loading slowness and missing layer_type_validation import are different failures." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "FP8 support request is unrelated to Qwen3.5 MoE from_pretrained error." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Slow from_pretrained loading and tensor-parallel training bug are unrelated." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Checkpoint load performance and forward-argument docs confusion are unrelated." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Slow loading and tokenizer-loading regression are different symptoms, not the same bug." + }, + { + "left": "issue:31515", + "right": "issue:36683", + "accept": false, + "reason": "Slow checkpoint loading and Gemma3Config missing vocab_size are unrelated." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Audio-model batching support and FP8 inference support are separate requests." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-26350-2", + "nodes": [ + "issue:26350", + "issue:33260" + ], + "soft_pairs": [ + "issue:26350|issue:33260" + ], + "prepared_review_unit_hash": "614238314873a4bb7aa5a575268b45ebd5ff33c459a8a435a18facfb0a911fd9", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are both community-contribution issues, but they target different features (Flash Attention 2 vs GGUF) and different code paths, so they should not be deduplicated.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a canonical representative here; the two issues are unrelated feature requests despite similar wording.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:26350", + "right": "issue:33260", + "accept": false, + "reason": "Different requested features with different underlying implementations; the shared phrasing about supporting more architectures is too generic to imply the same bug/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "adc76524be98365e6a2c4a601a5dc3947fc7c5ea63d96672bffec0a5377b18cf", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster is dominated by near-duplicate i18n translation-request issues. The three placeholder-titled issues look effectively identical; the Turkish-specific issue is related thematically but not the same concrete request.", + "confidence": 0.91, + "canonical_issue_reason": "Issue #43834 is the earliest of the near-identical placeholder issues, with the same title/body pattern as the others, so it is the cleanest representative of the duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "#43834 is the best global issue candidate because it most clearly represents the repeated template issue and is earlier than the other near-identical copies.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": false, + "reason": "Turkish-specific translation request vs a generic placeholder translation issue; same theme, but not the same concrete request." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": false, + "reason": "Different concrete target language/request: Turkish versus a generic placeholder template, so this is not a true duplicate." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": false, + "reason": "The Turkish issue is more specific than the generic placeholder issue; they match on broad topic only, not the same underlying request." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": true, + "reason": "These two look like the same template issue: same placeholder title and near-identical body, with no evidence of a meaningful difference." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": true, + "reason": "Near-identical placeholder translation issues with no visible substantive distinction." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": true, + "reason": "Same placeholder title/body pattern and extremely high similarity indicate the same underlying issue." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and the soft-edge verdicts are conservative. The three placeholder-titled issues are supported as near-identical by very high similarity scores and matching titles, and selecting the earliest as canonical is reasonable. The Turkish-specific issue is treated as related but not identical, which is appropriately cautious given the visible title difference and lack of direct evidence tying it to the same concrete request." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:36817|issue:41211", + "issue:36817|issue:43671", + "issue:37015|issue:43837", + "issue:37015|issue:44121", + "issue:37015|issue:43909", + "issue:37015|issue:42503", + "issue:37015|issue:45027", + "issue:41211|issue:43671", + "issue:42503|issue:43837", + "issue:42503|issue:43671", + "issue:42503|issue:42738", + "issue:42503|issue:45027", + "issue:42738|issue:45027", + "issue:43671|issue:43837", + "issue:43671|issue:45027", + "issue:43671|issue:45177", + "issue:43671|issue:44121", + "issue:43671|issue:43909", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:43837|issue:44121", + "issue:43909|issue:44205", + "issue:44121|issue:45322", + "issue:43837|issue:45027", + "issue:43837|issue:45177" + ], + "prepared_review_unit_hash": "d17088d4f578a396881eee9dc4a2740d788586f17c38678bff8400e5529ffa78", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16009, + "estimated_input_tokens": 4003, + "estimated_eval_tokens": 8262 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16009, + "estimated_input_tokens": 4003, + "estimated_eval_tokens": 8262 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are not duplicates; they are separate model-request issues spanning different model families and even different modalities (text, audio, OCR, vision).", + "confidence": 0.98, + "canonical_issue_reason": "No single issue cleanly represents the whole cluster as a duplicate set. If one must be chosen as the broadest umbrella, issue 42738 is the most generic, but it still only covers a subset of the requests.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 42738 is the broadest and least specific title ('BERT-like models with RoPE'), so it is the closest to an umbrella representative, though the cluster is too heterogeneous for true deduplication.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "Different model requests: EuroBert vs DEIMv2; same broad area, but not the same underlying change." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBert config support vs Qwen3-TTS support are unrelated models and modalities." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT and Qwen3-ASR are different model additions with different code paths." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT vs OpenAI weight-sparse transformer are distinct architectures and requests." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT and LFM2.5 Audio 1.5B are not the same model or feature." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "NeoBERT and ModernVBERT are separate model requests; related theme, not duplicate." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT vs Voxtral TTS support are different models and different functionality." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 and Qwen3-TTS are unrelated model additions." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT models and Qwen3-ASR support are distinct requests, not one concrete fix." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT vs Qwen3-TTS spans different model families and modality." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "Both mention BERT-like ideas, but ModernVBERT models and generic RoPE BERT-like support are not the same request." + }, + { + "left": "issue:42503", + "right": "issue:45027", + "accept": false, + "reason": "ModernVBERT models and Voxtral TTS support are unrelated." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "BERT-like RoPE models vs Voxtral TTS support are different classes of model changes." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "Both are Qwen3 proposals, but TTS and ASR are different modalities and code paths." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-TTS and Voxtral TTS are different model integrations, not one duplicate request." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "Audio TTS support and DeepSeek-OCR2 are unrelated model requests." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "Qwen3-TTS and weight-sparse transformer support are different changes." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-TTS and LFM2.5 Audio are different audio models, not the same bug/change." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-ASR and LFM2.5 Audio are different speech-model requests." + }, + { + "left": "issue:43837", + "right": "issue:45322", + "accept": false, + "reason": "Qwen3-ASR support and EUPE encoder support are unrelated model additions." + }, + { + "left": "issue:43837", + "right": "issue:44121", + "accept": false, + "reason": "ASR support and weight-sparse transformer support are not the same code-path problem." + }, + { + "left": "issue:43909", + "right": "issue:44205", + "accept": false, + "reason": "LFM2.5 Audio and SAM3-LiteText are different model families and modalities." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "Weight-sparse transformer support and EUPE encoder support are separate architecture requests." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-ASR and Voxtral-4B-TTS-2603 are different speech-model integrations." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "ASR support and DeepSeek-OCR2 are unrelated model requests." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "d01e8031a2f96939f16383ae9fec1210926b19760a394a383cfb608fb6d94ffe", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6291, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6291, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are only superficially related (mostly around configs, norms, losses, or tokenizer warnings), but they describe different bugs/questions in different code paths. I would not merge any of the soft-edge pairs as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "None of these is a clear canonical duplicate target; if one must be chosen as the most concrete user-facing bug report, issue 44031 stands out because it describes a specific regression affecting all tokenizers after a version change.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "One asks whether Llama4TextL2Norm should actually be RMSNorm; the other reports an incorrect type hint for ModernBERTConfig.norm_eps. Different models and different defect types." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Mask2Former ignore_value not being used is a model-specific loss-parameter bug; the other is a broader kwargs-forwarding issue in loss calculation. Related area, but not the same bug." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "Mask2Former loss handling and tokenizer regex warnings are unrelated code paths and symptoms." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "Mask2Former ignore_value behavior and a typo in a quantization attribute name are completely different issues." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Different model/components and different defect classes: unused ignore_value vs an incorrect type hint." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "A generic kwargs-loss-forwarding bug is not the same as a ModernBERTConfig type annotation error." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "ModernBERTConfig norm_eps typing and tokenizer regex warnings affect different subsystems with different root causes." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "An incorrect type hint and a typo in a quantization operation name are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "6b41eb3638e1b4e28bb5458b24ab4af0d1c99e63e0b298d439579ad33d321e1b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues are not duplicates: one is a generic request about block decoding, while the other is a model-specific request for DeepSeek v3.2 support.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Different underlying topics: decoding behavior vs. support for a specific model/version. No indication they refer to the same bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "dc9e5582b81ded1c85adc936663f831ea11c5a88f8e424f5f149599cf7b596e7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues are superficially related through Trainer/custom trainer behavior, but they describe different underlying problems: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They should not be treated as duplicates.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Different bug reports with different goals: one asks to revisit `remove_unused_column` handling, the other asks to account for custom trainers in FLOPS estimation. Shared Trainer context is too broad to merge them as the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "6ab6852a2579e0cdbdbd428acca630119553107b42cbaf7989979c93280a98fc", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1611, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1611, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related only by the Qwen naming family, but they request different capabilities: one adds Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration, the other asks for AutoModelForSequenceClassification support for Qwen3.5. They do not look like the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the issues target different model variants and different auto classes, so they are separate feature requests.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43684 is the broader model-registration request and is the closest representative of the cluster, but it is not a duplicate of 44405.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different Qwen variants and different target heads (conditional generation vs sequence classification); not the same underlying bug/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "98aee86260290997919fd829372c8126987b9bca726e983070a6802d80bf190e", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4265, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4265, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Three issues share the same very vague title (\u201chuggingface \u201d), but there isn\u2019t enough concrete content to confirm they are the same underlying bug. The lone outlier (\u201cFett\u201d) looks unrelated. No PRs are present.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43845 is the best canonical issue by default: it is one of the duplicate-looking \u201chuggingface \u201d reports, is the earliest among them, and has a fuller body than 43859.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43845 is the best cluster representative because it is the earliest of the near-identical huggingface-titled issues and appears at least as informative as the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Different titles (\u201cFett\u201d vs \u201chuggingface \u201d) and no shared targets/files or other concrete evidence of the same bug." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "Different titles and no substantive body/detail evidence tying them to the same underlying issue." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "Different titles and no concrete overlap beyond generic soft similarity." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "Titles match, but the available metadata is too thin to prove they describe the same specific bug rather than two separate vague reports." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "Same broad title, but no explicit issue target, filename, or detailed description to confirm a shared underlying problem." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "Same broad title, but the cluster evidence is insufficient to safely merge them as duplicates of one concrete issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "6d883e2697d2ef0dd0c36fd597ac8d3c143ba2fb4c2368c8bbb87e57341f76c3", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5794, + "estimated_input_tokens": 1449, + "estimated_eval_tokens": 3154 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5794, + "estimated_input_tokens": 1449, + "estimated_eval_tokens": 3154 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is mostly low-information/spam issues with no clear shared bug or feature request; they do not look like duplicates of one another.", + "confidence": 0.97, + "canonical_issue_reason": "Issue #44371 is a reasonable anchor only because it is one of the earliest low-information spam tickets in the set; there is no substantive duplicate signal tying the issues together.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue here is a strong canonical duplicate target; #44371 is the least-bad representative by chronology, but the cluster does not contain a meaningful underlying issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": false, + "reason": "Both are spam-like placeholders, but there is no evidence they describe the same underlying bug or change." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": false, + "reason": "Same as above: identical low-quality titles are not enough to treat them as duplicates of a concrete issue." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "One is spam-like and the other is a generic documentation suggestion; they are not the same issue." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "The titles are unrelated; one is spam and the other is a strange named-item suggestion, not a duplicate bug report." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "A documentation enhancement request and a spam placeholder do not indicate the same underlying problem." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Generic documentation request versus spam placeholder; no shared concrete issue." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": false, + "reason": "Both are spam-like placeholders, but there is no concrete shared bug/change to merge them on." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "Unrelated titles and no shared issue target or context." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "Unrelated titles and no shared issue target or context." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "a39b788154ed82dba4a68484e50f7f6fb4df5a6478ec99a494a29eb832b866a7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1645, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1645, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two closed issues are highly similar and likely refer to the same proposed energy-optimization idea, but the evidence is limited to title/body similarity with no explicit linkage, so the relationship should be treated as unresolved rather than a firm duplicate.", + "confidence": 0.87, + "canonical_issue_reason": "#45145 is the better canonical issue because it has the same title and slightly stronger linkage signal (one inbound reference), making it the more established thread.", + "canonical_pr_reason": null, + "best_issue_reason": "#45145 is the best global issue representative for the same reason: it appears to be the more established of two near-identical issue threads.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": false, + "reason": "The issues have identical titles and similar bodies, but the packet only provides soft_similarity evidence, no explicit cross-links, and deterministic_accept is false. That supports a likely duplicate, not a confident duplicate classification, so this edge should remain unresolved or be treated as low-confidence duplicate-likely." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The canonical issue choice is reasonably grounded, but the soft-edge verdict is too strong for the evidence in the packet. The pair only has soft_similarity support, no explicit linkage, and deterministic_accept is false, so calling them a \"clear duplicate\" / \"almost certainly duplicates\" overstates confidence. Please downgrade to a more conservative duplicate-likelihood assessment or leave the edge unresolved." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 13, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues describe the same missing Gemma 4 sequence-classification support; the later, more specific open issue is the better cluster representative.", + "confidence": 0.94, + "canonical_issue_reason": "issue #45373 is the clearest canonical tracker: it is open, explicitly names the missing class `Gemma4ForSequenceClassification`, and states the module-level gap relative to Gemma 2/3.", + "canonical_pr_reason": null, + "best_issue_reason": "issue #45373 best captures the concrete request and likely remaining work, while #45295 is a shorter earlier duplicate with less detail and is already closed.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-13", + "nodes": [ + "pull_request:39895", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45170", + "pull_request:45213", + "pull_request:45435", + "pull_request:45454", + "pull_request:45489" + ], + "soft_pairs": [ + "pull_request:39895|pull_request:43424", + "pull_request:42668|pull_request:43823", + "pull_request:43251|pull_request:43823", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:44827", + "pull_request:43424|pull_request:45435", + "pull_request:43424|pull_request:43823", + "pull_request:43823|pull_request:45454", + "pull_request:43823|pull_request:45435", + "pull_request:43823|pull_request:43995", + "pull_request:43995|pull_request:45435", + "pull_request:43995|pull_request:45213", + "pull_request:43995|pull_request:44827", + "pull_request:44827|pull_request:45489", + "pull_request:44827|pull_request:45170", + "pull_request:43823|pull_request:44827", + "pull_request:45213|pull_request:45435", + "pull_request:44827|pull_request:45213", + "pull_request:43823|pull_request:45213", + "pull_request:44827|pull_request:45435", + "pull_request:45454|pull_request:45489", + "pull_request:43823|pull_request:45489", + "pull_request:43251|pull_request:43254" + ], + "prepared_review_unit_hash": "483dbb8c46708baf8cb704092db786ffbce9f3fca891548ec90f9a7ef6dc3f5f", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 23, + "serialized_chars": 37543, + "estimated_input_tokens": 9386, + "estimated_eval_tokens": 19028 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 23, + "serialized_chars": 37543, + "estimated_input_tokens": 9386, + "estimated_eval_tokens": 19028 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster is mostly a mix of unrelated PRs spanning model additions, docs, tests, and refactors. The only clear duplicate pair is the two `fixed_cross_entropy` PRs targeting the same issue and code path; the rest should stay separate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43251 is the best canonical choice for the `fixed_cross_entropy` fix: it explicitly targets issue 43240, updates the same loss helper as 43254, and has the more complete/intentional implementation and discussion. The other items are unrelated to each other.", + "best_issue_reason": null, + "best_pr_reason": "PR 43251 is the strongest overall PR in the set because it is a focused, well-scoped fix with an explicit issue link and clear behavior change. Most other PRs are unrelated feature or refactor work, or test/doc-only changes.", + "soft_edge_verdicts": [ + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "One adds VideoPrism support; the other adds an Executorch dynamic-shape test. Different models and different code paths." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Processor robustness changes for existing models versus adding a new MobileLLM model. Not the same bug or change." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss helper kwargs fix versus MobileLLM model creation. Unrelated." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "`fixed_cross_entropy` kwargs support is unrelated to Mistral4 test/model refactoring." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "Executorch dynamic-shape export tests and Mistral4 fixes are different features in different subsystems." + }, + { + "left": "pull_request:43424", + "right": "pull_request:45435", + "accept": false, + "reason": "Dynamic-shape export testing is unrelated to Whisper tokenizer bounds checking." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "Different model/export areas with no shared underlying bug." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45454", + "accept": false, + "reason": "MobileLLM model addition versus Gemma4 training behavior fix; no shared code-path problem." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45435", + "accept": false, + "reason": "MobileLLM model creation and Whisper tokenizer indexing are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "New model addition versus Falcon output-interface refactor; different changes." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45435", + "accept": false, + "reason": "Falcon output-capture refactor is not the same issue as Whisper tokenization bounds handling." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Falcon refactor and model-creation skill content are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon output-interface refactor and Mistral4 test fixes are different code paths." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45489", + "accept": false, + "reason": "Mistral4 fixes and Gemma3n/Gemma4 cache-sharing alignment are different model families and bugs." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 test fixes do not match the CLIP-like `layrnorm` rename/correction PR." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "MobileLLM model addition and Mistral4 fixes are unrelated." + }, + { + "left": "pull_request:45213", + "right": "pull_request:45435", + "accept": false, + "reason": "A new-model skill PR and a Whisper tokenizer bugfix are not the same underlying change." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45213", + "accept": false, + "reason": "Mistral4 bugfixes and a model-creation skill are unrelated." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45435", + "accept": false, + "reason": "Mistral4 test fixes and Whisper tokenizer indexing fixes are unrelated." + }, + { + "left": "pull_request:45454", + "right": "pull_request:45489", + "accept": false, + "reason": "Both touch Gemma4-related files, but one is about training text-only samples and the other is about cache-sharing alignment; they fix different concrete problems." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45489", + "accept": false, + "reason": "MobileLLM addition and Gemma3n/Gemma4 cache-sharing alignment are unrelated." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Both PRs fix the same `fixed_cross_entropy` helper to support additional kwargs/weight/label_smoothing for issue 43240, and they modify the same function on the same code path." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative. The only clearly mergeable duplicate pair is 43251/43254, which both target the same `fixed_cross_entropy` helper and the same issue. The other pair rejections are consistent with the listed titles/files and do not overclaim shared bugs or mergeability." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "7d5f44780d748776aece7817aedfcd212a3ef371ed53752c634e79f5fc8bc80b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs are about VibeVoice, but they are not the same change: one is the base model integration and the other is a later realtime extension with additional tokenizer/generation work. They overlap heavily in shared model scaffolding, but the concrete goals differ enough that they should not be merged as duplicates.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #42781 is the broader, newer VibeVoice-related PR and appears to extend the base implementation with realtime support and the acoustic tokenizer, so it is the better representative of the cluster.", + "best_issue_reason": null, + "best_pr_reason": "PR #42781 is the most complete and current VibeVoice PR in the set, covering the broader feature set beyond the initial implementation.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "40546 implements core VibeVoice support, while 42781 adds VibeVoice Realtime plus acoustic tokenizer and generation changes. They share the same model family and some files, but they are different concrete changes and not safely mergeable as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "ac8c1b7a26caaa5d1d6cb181c5fd7df7885f8086155ce24b695016620f15f8d3", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are unrelated: one adds TimesFM covariate-based forecasting, while the other adds DINOv3 image-classification support. They do not appear to fix the same bug or implement the same change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the PRs target different model families, different code paths, and different features.", + "best_issue_reason": null, + "best_pr_reason": "PR 41224 is the better standalone representative because it is a narrower, self-contained feature addition with a single clear model capability.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Different models and different functionality: TimesFM covariate forecasting versus DINOv3 image classification. They are not the same underlying change and should not be merged as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "88e60f926a84fb13a881f1700f827def51f6a2034775286ab5e8f054384e362c", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5929, + "estimated_input_tokens": 1483, + "estimated_eval_tokens": 3222 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5929, + "estimated_input_tokens": 1483, + "estimated_eval_tokens": 3222 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests are about adding DEIMv2 support to Transformers. PR #44339 is the more complete, later implementation and appears to supersede the smaller initial addition in #41356.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #44339 is the stronger canonical PR because it is the later, more complete DEIMv2 integration: it adds the model, loss, auto-mapping, conversion script, and related docs/tests, while #41356 is an earlier partial model/image-processor addition.", + "best_issue_reason": null, + "best_pr_reason": "PR #44339 is the best representative of the cluster because it most fully implements the DEIMv2 feature and includes the broader set of integration changes expected for a merged model addition.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": true, + "reason": "Same underlying feature addition for DEIMv2 support, with substantial overlap in model files, docs, and tests; #44339 looks like the expanded successor to the earlier partial PR." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target the same DEIMv2 issue, share several core files, and #44339 clearly adds broader integration pieces beyond #41356. The soft-edge verdict is conservative because it says the later PR 'looks like' an expanded successor rather than asserting a definitive relationship." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "a03af63a220b332b5d92c9a22f4904217612bf2f7168cc89b8c71fd7603b6c74", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6722, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6722, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate model-addition PRs (DeepSeek OCR vs Isaac) that share only generic registration/docs scaffolding, not the same underlying change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the diffs implement different models with different configs, processors, tests, and docs, so they are not duplicates.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a global duplicate representative; each is a distinct model implementation and should be triaged independently.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Different model families and code paths (DeepSeek OCR vs Isaac); overlap is limited to shared Transformers registration/doc plumbing, which is not enough to treat them as the same change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42166-2", + "nodes": [ + "pull_request:42166", + "pull_request:45426" + ], + "soft_pairs": [ + "pull_request:42166|pull_request:45426" + ], + "prepared_review_unit_hash": "4e67818c4df0b90c59cfcccedacdc9b6215e7976cc4b08f75f1d6af750cad2d5", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5408, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5408, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are pull requests adding different model integrations: InternVLFlash vs AXK1. They share only common registration/boilerplate files, not the same underlying change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: these are unrelated feature additions for different models, so they should not be clustered as duplicates.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a good global representative of the other; if forced to pick a cluster anchor, PR #42166 has more review activity, but it is not a duplicate of #45426.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42166", + "right": "pull_request:45426", + "accept": false, + "reason": "Reject: they add different models and different codepaths (InternVLFlash vs AXK1). Shared auto-registration files are generic boilerplate, not evidence of the same change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42227-4", + "nodes": [ + "pull_request:42227", + "pull_request:43291", + "pull_request:43665", + "pull_request:44815" + ], + "soft_pairs": [ + "pull_request:42227|pull_request:43291", + "pull_request:42227|pull_request:43665", + "pull_request:42227|pull_request:44815", + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "128e96617927b4e100cff7e108ba48b6b7e4ecd9f70126e4ab65c49174dce506", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10170, + "estimated_input_tokens": 2543, + "estimated_eval_tokens": 5342 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10170, + "estimated_input_tokens": 2543, + "estimated_eval_tokens": 5342 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The four PRs are not duplicates of one another. Two are Whisper-related, but they fix different areas (ASR pipeline language return vs tokenizer/test behavior). The CLIP/ViT test PR and the FP8 dequant PR are unrelated to the others.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 42227 is the cleanest representative: it has an explicit issue target, a focused code-path fix in the Whisper ASR pipeline, and matching test coverage.", + "best_issue_reason": null, + "best_pr_reason": "PR 42227 is the strongest standalone PR in the set because it is a concrete bug fix with a clear user-facing behavior change and direct issue linkage.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42227", + "right": "pull_request:43291", + "accept": false, + "reason": "Both are Whisper-related, but they fix different problems in different code paths: ASR pipeline return-language plumbing vs tokenizer decode/test behavior." + }, + { + "left": "pull_request:42227", + "right": "pull_request:43665", + "accept": false, + "reason": "Unrelated areas: Whisper ASR pipeline vs CLIP/ViT test-file changes. No shared underlying bug or mergeable combined fix." + }, + { + "left": "pull_request:42227", + "right": "pull_request:44815", + "accept": false, + "reason": "Different subsystems and bugs: Whisper return-language handling vs FP8 dequant/loading logic. Not the same change." + }, + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "Whisper tokenizer/test work does not match the CLIP/ViT test edits; these are separate and not mergeable as one fix." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "No common concrete bug or code path: Whisper decode behavior vs FP8 quantization/loading. Unrelated PRs." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "CLIP/ViT test changes are unrelated to the FP8 dequant fix; they do not address the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "458ead408e6ff51ed837d143544655806020a3b67970cdb8757b0856add250ec", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two maintenance PRs for GitHub Actions upgrades, but they target different action families and change different workflows. They overlap in broad CI infrastructure, not in the same concrete bug or mergeable change set.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 42975 is the better representative because it has the clearer, more specific goal (Node 24 compatibility) and covers a broader set of workflow action-version bumps.", + "best_issue_reason": null, + "best_pr_reason": "PR 42975 is the most suitable canonical PR for the cluster: its title and diff make the maintenance intent explicit, while the other PR is a separate dependency bump focused on docker/build-push-action and related workflow updates.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "Do not merge as duplicates: 42975 updates checkout/cache/upload-artifact versions for Node 24 compatibility, while 42976 bumps docker/build-push-action and other workflow actions to latest versions. They are related maintenance changes but not the same concrete change or code-path fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-10", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451", + "pull_request:43838", + "pull_request:43973", + "pull_request:44601", + "pull_request:45157", + "pull_request:45283", + "pull_request:45490", + "pull_request:45532" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:42978|pull_request:45490", + "pull_request:43448|pull_request:43451", + "pull_request:43448|pull_request:45490", + "pull_request:43448|pull_request:45532", + "pull_request:43451|pull_request:45490", + "pull_request:43451|pull_request:45532", + "pull_request:43838|pull_request:45283", + "pull_request:43973|pull_request:45283", + "pull_request:44601|pull_request:45532", + "pull_request:45157|pull_request:45532", + "pull_request:45283|pull_request:45532", + "pull_request:45490|pull_request:45532" + ], + "prepared_review_unit_hash": "3495eeacad70709a7e9b98b84524250dae72090dbbf87f752acf60563196a188", + "component_budget": { + "node_count": 10, + "item_count": 10, + "soft_pair_count": 14, + "serialized_chars": 32085, + "estimated_input_tokens": 8022, + "estimated_eval_tokens": 16300 + }, + "budget": { + "node_count": 10, + "item_count": 10, + "soft_pair_count": 14, + "serialized_chars": 32085, + "estimated_input_tokens": 8022, + "estimated_eval_tokens": 16300 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated model-addition and infrastructure PRs that share boilerplate docs/auto-registry edits. I found no true duplicate pairs worth merging.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR fits the cluster: the substantive changes span different models and unrelated infra work, and the overlaps are mostly shared documentation/registry boilerplate.", + "best_issue_reason": null, + "best_pr_reason": "45532 is the closest thing to a representative only because it has the strongest soft link and active discussion, but it still does not duplicate the rest of the cluster.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "Different model additions: ViT NEPA vs Molmo. Shared docs/auto-registry files are boilerplate, not the same change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "Different model additions: ViT NEPA vs Molmo2. No shared concrete code-path or bug fix." + }, + { + "left": "pull_request:42978", + "right": "pull_request:45490", + "accept": false, + "reason": "Different model additions: ViT NEPA vs CTSM. Overlap is limited to generic docs/auto-registration edits." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are distinct model implementations with different files and behavior; not the same underlying change." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo vs CTSM are unrelated model additions; shared boilerplate files do not indicate duplication." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45532", + "accept": false, + "reason": "Molmo vs SLANet are different model support PRs; they only share registry/docs plumbing." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo2 and CTSM are separate model additions with different implementations and targets." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45532", + "accept": false, + "reason": "Molmo2 vs SLANet are unrelated model support changes despite some shared auto-mapping boilerplate." + }, + { + "left": "pull_request:43838", + "right": "pull_request:45283", + "accept": false, + "reason": "Qwen3-ASR model support and Qwen3.5 GGUF loading support are different changes: one adds a model, the other adds loader mapping/quantization support." + }, + { + "left": "pull_request:43973", + "right": "pull_request:45283", + "accept": false, + "reason": "Lfm2 Audio model support vs Qwen3.5 GGUF loading support are different concrete code paths and features." + }, + { + "left": "pull_request:44601", + "right": "pull_request:45532", + "accept": false, + "reason": "Pipeline-parallel loading infrastructure and SLANet model support are unrelated; shared files are incidental registry/util updates." + }, + { + "left": "pull_request:45157", + "right": "pull_request:45532", + "accept": false, + "reason": "PrismML GGUF dequantization support vs SLANet model support are different feature areas and code paths." + }, + { + "left": "pull_request:45283", + "right": "pull_request:45532", + "accept": false, + "reason": "Qwen3.5 GGUF loading support and SLANet model support do not fix the same underlying problem." + }, + { + "left": "pull_request:45490", + "right": "pull_request:45532", + "accept": false, + "reason": "CTSM and SLANet are distinct model-addition PRs; the shared files are generic docs/auto-registration boilerplate, not a duplicate fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-3", + "nodes": [ + "pull_request:43488", + "pull_request:43532", + "pull_request:45476" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532", + "pull_request:43488|pull_request:45476", + "pull_request:43532|pull_request:45476" + ], + "prepared_review_unit_hash": "42f5c34ba74a7caf1287e059191967d4327d2221323a541a38d492f4270b4d21", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7468, + "estimated_input_tokens": 1867, + "estimated_eval_tokens": 3990 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7468, + "estimated_input_tokens": 1867, + "estimated_eval_tokens": 3990 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three unrelated throwaway PRs that all share the same \"[don't merge]\"/test-noise pattern, but they change different code paths: packaging/version checks, CI workflow plumbing, and a PR-CI caller plus tokenizer/test junk. The shared vit test file looks incidental rather than a common underlying fix.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43532 is the most coherent representative of the cluster: it has a single CI/workflow-oriented theme and more substantial, connected edits than the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 43532 is the best representative because its changes are most internally consistent around CI workflow behavior, whereas the other two are narrower or more obviously synthetic checks.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "Different underlying changes: 43488 is about setup/dependency/version formatting, while 43532 rewires self-comment CI and related utilities. Shared vit test edits look incidental." + }, + { + "left": "pull_request:43488", + "right": "pull_request:45476", + "accept": false, + "reason": "43488 targets packaging/version/dependency metadata; 45476 adds a PR-CI workflow caller and unrelated tokenizer/test edits. Not the same concrete bug or change." + }, + { + "left": "pull_request:43532", + "right": "pull_request:45476", + "accept": false, + "reason": "Both touch CI-adjacent files, but they address different mechanisms: self-comment CI versus PR CI caller setup. The overlap in tests is not enough to treat them as one change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "06899421bbc6a9f495591f0a33989aac0c0d17bd8e1ce2002360b382f478d260", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests add new, similarly named segmentation pipelines and share a doc/registry scaffolding pattern, but they implement different features and code paths: one is for Promptable Concept Segmentation, the other for Promptable Visual Segmentation. They are related, not duplicates.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "If a representative PR must be chosen, #43613 is the broader one: it adds the visual segmentation pipeline plus more model/test integrations and touches a wider set of supporting files.", + "best_issue_reason": null, + "best_pr_reason": "#43613 is the better cluster representative because it is broader and more complete, but it should not be treated as a duplicate of #43612 since it targets a different pipeline and model path.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Different underlying feature sets and code paths: concept segmentation vs visual segmentation. Shared docs/registry files and similar pipeline scaffolding are not enough to merge these as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "003548d77de3a1260dd8af1fdb16080c8ae8bd408f6713e4a0f1695bc16bf598", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two separate Trainer extensions in the same file: one adds a hook for gradient-accumulation loss scaling, the other adds a hook for signature-column filtering. They touch different code paths and are not duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43663 is the cleaner, self-contained API extension and is the better representative of this cluster if one must be chosen.", + "best_issue_reason": null, + "best_pr_reason": "PR #43663 is the most suitable standalone change: it refactors column-signature handling into an overrideable method without altering training behavior.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Both modify trainer.py, but they address unrelated concerns: gradient-accumulation loss scaling vs. dataloader column filtering. They do not fix the same concrete bug or change the same behavior." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "079e389db8d8793a091a9ed93cc581121bb98faf128b46629530ee8e281698b9", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs point at the same serve.py TypeAdapter runtime-annotation crash, with 43842 being the most direct and narrowly scoped fix.", + "confidence": 0.73, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43842 most directly fixes the reported NameError by keeping TypeAdapter out of runtime evaluation when pydantic is absent, with a minimal targeted diff and matching issue linkage.", + "best_issue_reason": null, + "best_pr_reason": "PR 43842 is the best standalone fix: it addresses the concrete crash path in serve.py with the least extra churn and the clearest bug-specific intent.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": true, + "reason": "Both appear to fix the same serve.py TypeAdapter NameError in the CLI, using alternative annotation/runtime-import handling for the same crash path." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "Same file, same TypeAdapter NameError, and both change the import/annotation strategy to avoid evaluating TypeAdapter at runtime when pydantic is absent." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: all three PRs target src/transformers/cli/serve.py and the TypeAdapter NameError/runtime-annotation issue, with 43842 clearly the narrowest fix. The soft-edge verdicts are conservative enough, and the pairs look mergeable as alternative fixes for the same bug." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43757-3", + "nodes": [ + "pull_request:43757", + "pull_request:45500", + "pull_request:45506" + ], + "soft_pairs": [ + "pull_request:45500|pull_request:45506", + "pull_request:43757|pull_request:45500", + "pull_request:43757|pull_request:45506" + ], + "prepared_review_unit_hash": "89e25286137d69b5eece3fecc1d687959f9116531fdbbe39cef479e20daed9bf", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7756, + "estimated_input_tokens": 1939, + "estimated_eval_tokens": 4134 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7756, + "estimated_input_tokens": 1939, + "estimated_eval_tokens": 4134 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs are about GPT-OSS GGUF loading, but only 45506 looks like the complete end-state implementation. 45500 is a draft follow-up and 43757 is a narrower fallback-only stopgap.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "45506 is the strongest canonical PR: it adds the GGUF metadata mapping, loader support, and tests, is not draft, and has review activity. 45500 is draft and 43757 only provides a fallback.", + "best_issue_reason": null, + "best_pr_reason": "45506 best represents the full fix for GPT-OSS GGUF support; it is the most complete and mergeable implementation among the three.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45500", + "right": "pull_request:45506", + "accept": true, + "reason": "Same underlying GPT-OSS GGUF loading work, same tracking issue, and nearly identical scope; 45506 is the fuller version of 45500." + }, + { + "left": "pull_request:43757", + "right": "pull_request:45500", + "accept": false, + "reason": "Related but not the same concrete change: 43757 only adds a fallback to avoid hard failure, while 45500 implements full GGUF support." + }, + { + "left": "pull_request:43757", + "right": "pull_request:45506", + "accept": false, + "reason": "43757 is a narrow compatibility fallback, whereas 45506 is a broader full-support PR; they are not the same fix and would not plausibly merge as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests appear to implement the same SwanLab integration change: adding run id/resume support in `integration_utils.py` for issue #43698, with one variant also expanding the docs. They are strong duplicate PR candidates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43816 is the cleaner representative: it carries the same SwanLab id/resume support plus the accompanying documentation updates, making it the more complete version of the shared change.", + "best_issue_reason": null, + "best_pr_reason": "PR #43816 is the best global representative because it appears to include the full feature change and docs for SwanLab run id/resume support, while #43779 is the narrower implementation-focused variant.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": true, + "reason": "Same underlying SwanLab callback enhancement for id/resume support in the same file and same issue target; the doc differences do not change the core code-path change, so these are duplicate PRs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target issue #43698, touch the same file, and the previews indicate the same SwanLab id/resume support with #43816 adding docs. The duplicate/mergeable verdict is conservative enough given the shared target and overlapping code change." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44013", + "pull_request:44017", + "pull_request:44018", + "pull_request:44019", + "pull_request:44044", + "pull_request:44056", + "pull_request:44059", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44116", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44071", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44068|pull_request:44086", + "pull_request:44071|pull_request:44085", + "pull_request:44059|pull_request:44086", + "pull_request:44085|pull_request:44086", + "pull_request:44059|pull_request:44722", + "pull_request:44098|pull_request:44101", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:43997|pull_request:44073", + "pull_request:44017|pull_request:44071", + "pull_request:44059|pull_request:44071", + "pull_request:44073|pull_request:44101", + "pull_request:44059|pull_request:44068", + "pull_request:44018|pull_request:44059", + "pull_request:44056|pull_request:44101", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:44017|pull_request:44018", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101", + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44018", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:44018|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154" + ], + "prepared_review_unit_hash": "8e4bbb82040c6f2fadbbd9d411e63ae54af34b8876ae39d8893f031653b99f67", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 78, + "serialized_chars": 84898, + "estimated_input_tokens": 21225, + "estimated_eval_tokens": 42706 + }, + "budget": { + "node_count": 27, + "item_count": 27, + "soft_pair_count": 72, + "serialized_chars": 75093, + "estimated_input_tokens": 18774, + "estimated_eval_tokens": 37804 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of model-specific refactors to standardized output tracing, not one single duplicate change. Only the GPTJ/CodeGen variants and the GPT-Neo variants look like true overlaps; the rest share the umbrella issue but not the same code path.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44722 is the best canonical PR for the GPTJ/CodeGen subcluster: it is the latest and most complete instance of that concrete output-tracing refactor.", + "best_issue_reason": null, + "best_pr_reason": "44722 is the strongest representative of the shared decorator-based output-tracing migration, with the broadest concrete GPTJ/CodeGen implementation among the overlapping PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and code paths; only the umbrella tracking issue matches." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "Different model families and code paths; same issue target is too broad to merge." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": true, + "reason": "Both are GPTJ output-tracing refactors on the same code path and could plausibly be one PR." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "ResNet/RT-DETR ResNet versus EfficientNet; same theme, but not the same concrete change." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "EfficientNet refactor versus GPTJ/CodeGen refactor; unrelated code paths." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "GPTJ/CodeGen versus EfficientNet; shared output-tracing theme only." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "MobileNetV2 versus DeBERTa; separate model-specific migrations." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "GPTJ/CodeGen versus MGP-STR; not the same underlying bug or change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Both are GPT-Neo output-tracing refactors for the same model and code path." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "GPTJ/CodeGen versus MPT; only the shared decorator pattern matches." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPTJ versus GPT-Neo; similar infrastructure work but different model code paths." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44066", + "accept": false, + "reason": "GPT-Neo versus GPTJ/CodeGen; not the same refactor." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet versus GPTJ/CodeGen; same umbrella issue, different implementations." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "MGP-STR versus GPTJ/CodeGen; no shared concrete code path." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet versus GPTJ; distinct model changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44019", + "accept": false, + "reason": "GPT-Neo versus ResNet; related pattern, but not a duplicate." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44071", + "accept": false, + "reason": "ResNet versus MPT; different model-specific output handling." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo versus Flaubert/XLM; not the same underlying fix." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo versus GPTJ/CodeGen; same refactor style, different code paths." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo versus Flaubert/XLM; separate model migrations." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44722", + "accept": false, + "reason": "MPT versus GPTJ/CodeGen; only the output-tracing framework is shared." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo versus MGP-STR; not the same concrete change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44085", + "accept": false, + "reason": "GPT-Neo versus GPTJ; similar theme, different implementation." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo versus MGP-STR; unrelated model code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT2 versus MGP-STR; only the umbrella output-tracing work overlaps." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "GPTJ versus MGP-STR; not the same bug or refactor." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT2 versus GPTJ/CodeGen; distinct model-specific refactors." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT versus Flaubert/XLM; different model families." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44101", + "accept": false, + "reason": "VisualBert versus Flaubert/XLM; not the same code path." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44722", + "accept": false, + "reason": "Segformer versus GPTJ/CodeGen; separate migrations." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44161", + "accept": false, + "reason": "Segformer versus LongT5; same pattern, different models." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44073", + "accept": false, + "reason": "Segformer versus VisualBert; unrelated implementations." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44073", + "accept": false, + "reason": "RegNet versus VisualBert; different model-specific code." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBert versus MPT; not a duplicate change." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "SqueezeBert versus VisualBert; separate output-tracing refactors." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44018", + "accept": false, + "reason": "SqueezeBert versus GPT-Neo; same theme only." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet versus MPNet; different models and code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet versus GPT-Neo; not the same underlying fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "RegNet versus LongT5; separate refactors." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet versus Flaubert/XLM; different model implementations." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "MPNet versus LongT5; same umbrella issue, different code paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet versus LongT5; not the same concrete change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "FNet/CVT versus Segformer; unrelated model-specific work." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44044", + "accept": false, + "reason": "SqueezeBert versus DeBERTa; distinct changes." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44074", + "accept": false, + "reason": "DeBERTa versus TextNet; same pattern, different code paths." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44722", + "accept": true, + "reason": "Both cover the same GPTJ/CodeGen decorator migration and could be merged into one PR." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44722", + "accept": true, + "reason": "Both are GPTJ output-tracing refactors with overlapping concrete changes." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44129", + "accept": false, + "reason": "GPTJ versus SpeechT5; same umbrella issue, not the same bug." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44007", + "accept": false, + "reason": "FNet/CVT versus ResNet/RT-DETR ResNet; unrelated model refactors." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "FNet/CVT versus SpeechT5; different code paths." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44129", + "accept": false, + "reason": "DeBERTa versus SpeechT5; separate model-specific changes." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44154", + "accept": false, + "reason": "DeBERTa versus ViTS; not the same underlying refactor." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative about overlap versus shared umbrella issue. The accepted soft-edge pairs are all within the same model-family refactor and look plausibly mergeable into a single PR for the same bugfix, while the rejected pairs are appropriately cautious about only sharing the broader standardized output-tracing theme." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 23, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44024", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44024|pull_request:44076", + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "a39be23eb4eeaf89398285982d8c83e5ee8f2f9c5677e4d1af2a76e2e9da9b0b", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 78, + "serialized_chars": 84898, + "estimated_input_tokens": 21225, + "estimated_eval_tokens": 42706 + }, + "budget": { + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17126, + "estimated_input_tokens": 4282, + "estimated_eval_tokens": 8820 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs are all model-specific refactors to standardized output tracing/capture_outputs, but they target different architectures and code paths, so they are not duplicates of one another.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44129 is the strongest representative of the cluster: it has an explicit issue link, a clear output-tracing refactor, and enough surrounding changes to show the intended migration pattern.", + "best_issue_reason": null, + "best_pr_reason": "PR 44129 is the best single PR to anchor the cluster because it is the most explicitly scoped and documented, while the others are similar migrations in unrelated models.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Same broad refactor theme, but different models (FocalNet vs ImageGPT) and different code paths; not the same underlying change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "Both migrate output tracing, but one is VisualBert and the other TextNet; they are separate model implementations and cannot be merged as one concrete fix." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "MPNet and VisualBert are unrelated model code paths; the similarity is only the shared standardized-output refactor pattern." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "MPNet and TextNet touch different architectures and forward paths, so this is not the same underlying bug or change." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert are unrelated implementations; these are separate output-tracing migrations, not one shared fix." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "SqueezeBert and SpeechT5 both adjust output capture, but they affect different model internals and cannot plausibly be merged into one PR as the same concrete code-path fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:43998|pull_request:44001", + "pull_request:43998|pull_request:44003", + "pull_request:43998|pull_request:44029", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:44000|pull_request:44001", + "pull_request:44000|pull_request:44002", + "pull_request:44000|pull_request:44003", + "pull_request:44000|pull_request:44025", + "pull_request:44001|pull_request:44002", + "pull_request:44001|pull_request:44003", + "pull_request:44001|pull_request:44025", + "pull_request:44002|pull_request:44029", + "pull_request:44002|pull_request:44003", + "pull_request:44003|pull_request:44004", + "pull_request:43998|pull_request:44004", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44029", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44028", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44029|pull_request:44030" + ], + "prepared_review_unit_hash": "1ed95264fa2191610891d8373501d4dee45e5e4cbed692316150e57dc1dc82b1", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 39, + "serialized_chars": 39774, + "estimated_input_tokens": 9944, + "estimated_eval_tokens": 20144 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 39, + "serialized_chars": 39774, + "estimated_input_tokens": 9944, + "estimated_eval_tokens": 20144 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs are all similar output-tracing refactors, but they target different model implementations and code paths. They are related by theme, not by the same underlying bug/change, so the soft edges should be rejected. No issue is present; one PR can serve as the cluster representative.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44003 is the strongest representative because it is a broader output-tracing refactor that touches two related model files and introduces the new capturing plumbing, making it more central than the single-model variants.", + "best_issue_reason": null, + "best_pr_reason": "PR 44003 best represents the cluster\u2019s common pattern while still being a concrete, nontrivial code change across multiple files.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model families and files (timm_backbone vs upernet); same refactor theme, not the same code-path bug." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "Timm backbone and vision-text dual encoder are unrelated implementations; this is only a shared output-tracing pattern." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "Different model code paths (timm_backbone vs univnet); no evidence of one shared defect." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "These touch different model stacks (timm_backbone vs mamba/falcon_mamba) and are separate refactors." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model families and mechanics; same refactor motif, not mergeable as one fix." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "MobileNetV1 and UnivNet are unrelated model paths; the overlap is only in output-tracing cleanup." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "MobileNetV1 vs UperNet are separate implementations; no shared underlying bug is shown." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model families and files; both are refactors, but not the same concrete change." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "MobileNetV1 and VisionEncoderDecoder are unrelated code paths despite similar signature cleanup." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "MobileNetV1 and SpeechEncoderDecoder are separate model stacks; same pattern, different fixes." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "Vision-Text Dual Encoder and UnivNet are different model paths; no shared bug or mergeable PR pair." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model families (vision-text dual encoder vs UperNet); only the refactor style matches." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "Vision-text dual encoder and mamba/falcon_mamba are unrelated concrete changes." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "Different targets and implementations; both are output-tracing refactors, but not the same fix." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "UnivNet and UperNet are distinct model paths; no evidence they address one underlying bug." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "UnivNet vs mamba/falcon_mamba are separate changes with only superficial similarity." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model stacks and changed files; not the same concrete issue." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "UperNet and RWKV are unrelated model implementations; shared theme is too broad." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "Different model code paths and refactor scopes; not plausibly one merged PR." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "Mamba/falcon_mamba output tracing and CodeGen output tracing are separate model-specific changes." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "Timm backbone and CodeGen are unrelated subsystems; the similarity is only generic refactoring." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "UnivNet and CodeGen fix different model code paths, so they should not be merged as duplicates." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "UperNet and CodeGen are distinct implementations with no shared concrete defect." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44004", + "accept": false, + "reason": "Vision-text dual encoder vs CodeGen are different model paths; same broad theme is insufficient." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "Depth Anything and VisionEncoderDecoder are unrelated model families; not the same bug or change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "Depth Anything and SpeechEncoderDecoder are different code paths; only the output-tracing refactor matches." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "Depth Anything and SuperPoint are separate models; no shared underlying issue is evident." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "Depth Anything and RWKV are unrelated implementations, so this is not one duplicate change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "Depth Anything and DPR touch different model stacks and should remain separate PRs." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "VisionEncoderDecoder and RWKV are different code paths; common output-tracing refactor is too broad." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are separate encoder-decoder variants with different implementations." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "Different model families (vision encoder-decoder vs SuperPoint), so no same-bug basis." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "VisionEncoderDecoder and DPR are unrelated model code paths; not mergeable as one fix." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "SpeechEncoderDecoder and SuperPoint are unrelated model implementations." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "SpeechEncoderDecoder and RWKV are different concrete code paths; only the refactor style overlaps." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "SpeechEncoderDecoder and DPR are unrelated targets, so this is not a duplicate change." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "SuperPoint and RWKV address different model logic; same theme does not justify merging." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "SuperPoint and DPR are distinct model code paths with no shared bug." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "RWKV and DPR are unrelated implementations; these are separate model-specific refactors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "89e02490afbfc5586ddd71e25599998ba6dac42e01bab8a9ce87a708572a10ae", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11071, + "estimated_input_tokens": 2768, + "estimated_eval_tokens": 5792 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11071, + "estimated_input_tokens": 2768, + "estimated_eval_tokens": 5792 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three separate model-addition pull requests for different architectures (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They share only boilerplate registry/docs updates, not the same underlying bug or code change, so none should be clustered as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model implementations and code paths: PenguinVL vision-language model vs Voxtral TTS speech model. Shared auto-registration/doc scaffolding is generic boilerplate, not the same change." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "Different targets and functionality: PenguinVL vs DeepSeek-OCR-2. The overlap is limited to standard Transformers integration files, which is insufficient to treat them as one PR." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "These add unrelated models for OCR vs text-to-speech. They touch similar framework plumbing but do not fix or implement the same concrete feature or bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "7302f7c67423962ccec9fef113101cdd411e5b09ed6f79626d8ff25f77b1b004", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs are related feature work around fusion, but they target different layers and code paths: one introduces a generic Module Fusion API, the other wires KernelConfig-based n-to-1 kernel fusion into model loading and adds an example.", + "confidence": 0.87, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR: 44979 is the foundational module-fusion API, while 45363 is a separate integration of KernelConfig-driven n-to-1 fusion built on top of that idea.", + "best_issue_reason": null, + "best_pr_reason": "44979 is the broader foundational abstraction and the cleaner cluster representative; 45363 is a more concrete integration/example change rather than the core API surface.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "Related but not duplicates: 44979 adds a new generic Module Fusion API (`src/transformers/module_fusion.py` and tests), while 45363 implements a specific `KernelConfig`-based n-to-1 fusion path across integration/model-loading files. They address different abstractions and code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "14470d6c689f3a4471d4ec01fd794d0a50221ba7eb0186a5054faee4d0eaf933", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests target the same Mistral regex AttributeError in `_patch_mistral_regex` and make the same code fix in `tokenization_utils_tokenizers.py`; the later PR also adds a test, so it is the stronger canonical PR candidate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45317 is the more complete version of the same fix: it applies the same code change as 45086 and also adds test coverage, making it the better canonical PR.", + "best_issue_reason": null, + "best_pr_reason": "PR 45317 is the best representative because it fixes the same concrete bug as 45086 but includes additional validation via tests and has more discussion activity.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Accept: both PRs address the same AttributeError in `_patch_mistral_regex` with the same underlying code-path fix in the same file, and 45317 is a superset with tests." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "81cd4f0525f766dac1bba01b28a0b2a2db1e228b0e84f248d286c1357244d986", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs add adjacent heterogeneous support: #45333 introduces per-layer config infrastructure, while #45332 adds model/cache handling on top of it. They are related, but not duplicates.", + "confidence": 0.87, + "canonical_issue_reason": null, + "canonical_pr_reason": "Choose #45333 as the canonical PR because it introduces the foundational per-layer heterogeneous config API and helper utilities that the other PR builds on.", + "best_issue_reason": null, + "best_pr_reason": "Choose #45332 as the best representative PR because it covers the broader end-to-end heterogeneous model and cache integration, making it the more complete user-facing change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "Reject: these are related prerequisite/follow-up PRs, not the same underlying change. #45333 adds config infrastructure; #45332 adds modeling/cache support and broader integration work." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "10750f0dc6fa0cb26f7c1ccc490a0f34e97e22ebf74b2d58a691713e74fa429c", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5306, + "estimated_input_tokens": 1327, + "estimated_eval_tokens": 2910 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5306, + "estimated_input_tokens": 1327, + "estimated_eval_tokens": 2910 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs in the same typing-cleanup area, but they are not the same underlying change. PR 45415 is a broad sweep adding type checking across many transformers modules and relaxing type-ignore warnings, while PR 45425 is a narrower typing helper update around `modeling_utils`/PEFT-related protocols and aliases.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45415 is the best representative of the cluster because it is the broader, more central typing/type-checking sweep affecting many files and build config.", + "best_issue_reason": null, + "best_pr_reason": "PR 45415 is the strongest global representative: it covers the larger cross-cutting type-checking initiative, whereas PR 45425 is a narrower follow-up/helper change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "Both are typing-related, but they address different scopes: 45415 is a broad repository-wide type-checking pass, while 45425 adds specific typing aliases/protocols for `modeling_utils`/PEFT. They are not the same concrete fix and would not naturally merge into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 23, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45470-2", + "nodes": [ + "pull_request:45470", + "pull_request:45487" + ], + "soft_pairs": [ + "pull_request:45470|pull_request:45487" + ], + "prepared_review_unit_hash": "1661d1c946f4c4e43067e21000404becf0f43d59ec1853e950583fc551f1d05b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5068, + "estimated_input_tokens": 1267, + "estimated_eval_tokens": 2790 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5068, + "estimated_input_tokens": 1267, + "estimated_eval_tokens": 2790 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are unrelated: one only skips flash-attn composite-model tests for Sam3LiteText, while the other fixes a real model-parallel/device-placement bug across several text models. They should not be clustered as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45487 is the strongest canonical PR because it contains the substantive code fix (device placement for buffered token type ids and related model-parallel adjustments) across multiple affected models, unlike PR 45470 which only adds test skips.", + "best_issue_reason": null, + "best_pr_reason": "PR 45487 is the best representative artifact for this cluster since it addresses an actual implementation bug with concrete code changes and broader impact; PR 45470 is just a test skip and not a comparable fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45470", + "right": "pull_request:45487", + "accept": false, + "reason": "Different problems and change types: 45470 skips flash-attn tests for Sam3LiteText due to unsupported flash attention, while 45487 fixes a model-parallel/device mismatch in AltCLIP/ChineseCLIP and related models. They are not the same underlying bug or mergeable change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +} diff --git a/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/manifest.json b/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..c8f432e1d5f12e421d58a90361e7f3448c0b84ab --- /dev/null +++ b/snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/manifest.json @@ -0,0 +1,14 @@ +{ + "analysis_id": "hybrid-model-20260421t060039z", + "artifacts": { + "hybrid": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "model": null, + "published_at": "2026-04-21T06:06:36Z", + "repo": "huggingface/transformers", + "schema_version": 1, + "snapshot_id": "20260421T060039Z", + "variant": "hybrid" +} diff --git a/snapshots/20260421T060039Z/manifest.json b/snapshots/20260421T060039Z/manifest.json index 2ecb1291018176a9341808612ebc5d1dd3a25c0d..e0b5e05e4b7798991c51b462a4e1fe3abd891888 100644 --- a/snapshots/20260421T060039Z/manifest.json +++ b/snapshots/20260421T060039Z/manifest.json @@ -32,6 +32,24 @@ "timeline_events": 274 }, "extracted_at": "2026-04-21T06:00:39Z", + "published_analysis": { + "canonical_analysis_id": "hybrid-model-20260421t060039z", + "runs": { + "hybrid-model-20260421t060039z": { + "analysis_id": "hybrid-model-20260421t060039z", + "artifacts": { + "hybrid": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "manifest_path": "snapshots/20260421T060039Z/analysis-runs/hybrid-model-20260421t060039z/manifest.json", + "model": null, + "published_at": "2026-04-21T06:06:36Z", + "variant": "hybrid" + } + }, + "schema_version": 1 + }, "repo": "huggingface/transformers", "snapshot_id": "20260421T060039Z", "watermark": {