diff --git a/analysis/current/analysis-report-hybrid.json b/analysis/current/analysis-report-hybrid.json index 41e0512ee8312d893dae32a940a63de47dcbf125..60f63961e37de93f72d81efd205a8cdaff8e765d 100644 --- a/analysis/current/analysis-report-hybrid.json +++ b/analysis/current/analysis-report-hybrid.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260430T000042Z", - "generated_at": "2026-04-30T00:10:16Z", + "snapshot_id": "20260430T060020Z", + "generated_at": "2026-04-30T06:08:49Z", "evidence_quality": "full", "llm_enrichment": true, "meta_bugs": [ @@ -1198,76 +1198,6 @@ } ] }, - { - "cluster_id": "cluster-43824-3", - "summary": "Cluster of 1 issues and 2 PRs centered on issue #43824.", - "status": "open", - "confidence": 0.75, - "canonical_issue_number": 43824, - "canonical_pr_number": 43836, - "issue_numbers": [ - 43824 - ], - "pr_numbers": [ - 43836, - 43842 - ], - "evidence_types": [ - "closing_reference", - "shared_issue_target" - ], - "pr_comparisons": [ - { - "left_pr_number": 43836, - "right_pr_number": 43842, - "code_similarity": 0.332, - "size_similarity": 0.088, - "file_overlap": 1.0, - "area_overlap": 0.033, - "patch_similarity": 0.017, - "shared_filenames": [ - "src/transformers/cli/serve.py" - ], - "shared_file_areas": [ - { - "filename": "src/transformers/cli/serve.py", - "left_ranges": [ - [ - 11, - 18 - ], - [ - 359, - 429 - ], - [ - 584, - 590 - ], - [ - 1892, - 1910 - ], - [ - 1917, - 1923 - ] - ], - "right_ranges": [ - [ - 54, - 61 - ], - [ - 587, - 593 - ] - ] - } - ] - } - ] - }, { "cluster_id": "cluster-41115-3", "summary": "Cluster of 1 issues and 2 PRs centered on issue #41115.", @@ -1289,16 +1219,14 @@ { "left_pr_number": 41116, "right_pr_number": 45613, - "code_similarity": 0.854, - "size_similarity": 0.901, - "file_overlap": 0.917, - "area_overlap": 0.842, + "code_similarity": 0.805, + "size_similarity": 0.885, + "file_overlap": 0.75, + "area_overlap": 0.856, "patch_similarity": 0.693, "shared_filenames": [ "docs/source/en/_toctree.yml", "docs/source/en/model_doc/minicpm3.md", - "src/transformers/models/__init__.py", - "src/transformers/models/auto/auto_mappings.py", "src/transformers/models/auto/modeling_auto.py", "src/transformers/models/minicpm3/__init__.py", "src/transformers/models/minicpm3/configuration_minicpm3.py", @@ -1318,8 +1246,8 @@ ], "right_ranges": [ [ - 711, - 718 + 713, + 720 ] ] }, @@ -1338,36 +1266,6 @@ ] ] }, - { - "filename": "src/transformers/models/__init__.py", - "left_ranges": [ - [ - 250, - 256 - ] - ], - "right_ranges": [ - [ - 248, - 254 - ] - ] - }, - { - "filename": "src/transformers/models/auto/auto_mappings.py", - "left_ranges": [ - [ - 331, - 337 - ] - ], - "right_ranges": [ - [ - 329, - 335 - ] - ] - }, { "filename": "src/transformers/models/auto/modeling_auto.py", "left_ranges": [ @@ -1386,16 +1284,12 @@ ], "right_ranges": [ [ - 283, - 289 - ], - [ - 696, - 702 + 699, + 705 ], [ - 1292, - 1298 + 1300, + 1306 ] ] }, @@ -1478,6 +1372,76 @@ } ] }, + { + "cluster_id": "cluster-43824-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43824, + "canonical_pr_number": 43836, + "issue_numbers": [ + 43824 + ], + "pr_numbers": [ + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, { "cluster_id": "cluster-45081-3", "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", @@ -1599,7 +1563,7 @@ "cluster_id": "cluster-45561-3", "summary": "Cluster of 1 issues and 2 PRs centered on issue #45561.", "status": "open", - "confidence": 0.55, + "confidence": 0.8, "canonical_issue_number": 45561, "canonical_pr_number": 45645, "issue_numbers": [ @@ -1610,7 +1574,9 @@ 45645 ], "evidence_types": [ - "closing_reference" + "closing_reference", + "shared_issue_target", + "soft_similarity" ], "pr_comparisons": [ { @@ -2033,7 +1999,7 @@ 45639 ], "target_issue_number": 45561, - "reason": "PRs in cluster-45561-3 are treated as duplicates because they converge on issue #45561 with closing_reference evidence." + "reason": "PRs in cluster-45561-3 are treated as duplicates because they converge on issue #45561 with closing_reference, shared_issue_target, soft_similarity evidence." } ], "best_issue": { diff --git a/analysis/current/analysis-report-hybrid.llm-reviews.json b/analysis/current/analysis-report-hybrid.llm-reviews.json index f9edecb733ece301ebfd6e9db2e7a9d00e0970e4..320e1728d2387f1f94cf27bc794344970e112fb7 100644 --- a/analysis/current/analysis-report-hybrid.llm-reviews.json +++ b/analysis/current/analysis-report-hybrid.llm-reviews.json @@ -1,8 +1,8 @@ { "schema_version": "1.0", "repo": "huggingface/transformers", - "snapshot_id": "20260430T000042Z", - "generated_at": "2026-04-30T00:10:16Z", + "snapshot_id": "20260430T060020Z", + "generated_at": "2026-04-30T06:08:49Z", "model": "gpt-5.4-mini?service_tier=flex", "reviews": [ { @@ -26,12 +26,12 @@ "issue:13244|issue:16618", "issue:15980|issue:16618" ], - "prepared_review_unit_hash": "653a36cc0fa13814dd047e575350566e83973ed7e845de8e6418d49b2d89e3c5", + "prepared_review_unit_hash": "abf237f969daad485a12d4b0d204c1711bd7299e7fb440ef3dfe7c90d77acb11", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 2478, + "serialized_chars": 2477, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, @@ -39,7 +39,7 @@ "node_count": 3, "item_count": 3, "soft_pair_count": 2, - "serialized_chars": 2478, + "serialized_chars": 2477, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, @@ -48,24 +48,24 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These issues are only loosely related by Hugging Face model/tokenizer usage, but they describe different problems: tokenization mismatch, private-model auth/download messaging, and a failure loading bert-base-uncased. I would not merge any of the soft pairs as duplicates.", - "confidence": 0.82, - "canonical_issue_reason": "issue:16618 is the broadest and most user-visible report in the set, but the three issues do not appear to be duplicates of one another.", + "summary": "These three issues are not duplicates of one another: one is about Tapas tokenization mismatching TensorFlow, one is about a misleading auth/error message when downloading private models, and one is about failing to load bert-base-uncased. The only loosely related item is the generic model-loading report, but it still describes a different failure mode.", + "confidence": 0.93, + "canonical_issue_reason": "Issue #16618 is the broadest model-loading report, so it is the closest thing to a representative issue in this set, but it does not share the same underlying bug as the other two.", "canonical_pr_reason": null, - "best_issue_reason": "issue:16618 is the best representative only in the sense that it is the clearest general model-loading failure; the others are narrower and about different code paths.", + "best_issue_reason": "#16618 is the best representative issue because it is the most general and user-facing model loading failure among the three, whereas #13244 and #15980 are clearly about different code paths.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:13244", "right": "issue:16618", "accept": false, - "reason": "Tokenization behavior in Tapas versus TensorFlow code is a different underlying bug than failing to load bert-base-uncased." + "reason": "Tapas tokenization mismatch is a different problem from failing to load bert-base-uncased; different model/component and different underlying bug." }, { "left": "issue:15980", "right": "issue:16618", "accept": false, - "reason": "A bad error message for unauthenticated private-model downloads is an auth/download UX issue, not the same bug as loading a public model failing." + "reason": "A bad error message for unauthorized private-model downloads is an auth/download UX issue, not the same bug as a model load failure for bert-base-uncased." } ] }, @@ -177,8 +177,8 @@ "issue:43828|issue:44292", "issue:43716|issue:44292", "issue:44292|issue:44918", - "issue:41762|issue:43638", "issue:43039|issue:44918", + "issue:41762|issue:43638", "issue:43827|issue:44512", "issue:43716|issue:44560", "issue:43828|issue:45685", @@ -187,8 +187,8 @@ "issue:30277|issue:36331", "issue:43039|issue:44805", "issue:34928|issue:36010", - "issue:43901|issue:44512", "issue:41762|issue:43596", + "issue:43901|issue:44512", "issue:39290|issue:41720", "issue:43866|issue:44863", "issue:43039|issue:44560", @@ -206,20 +206,20 @@ "issue:44841|issue:45084", "issue:41093|issue:41762" ], - "prepared_review_unit_hash": "f2639e21844f4bc54c73177296a8ed4ce4b51c9e01eafc039cae3debc36da180", + "prepared_review_unit_hash": "33747c85a35b0f0d69b854d1c03460296fbb9dd31fbe61633ce71156d5e2309b", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 67, - "serialized_chars": 47120, + "serialized_chars": 47117, "estimated_input_tokens": 11780, "estimated_eval_tokens": 23816 }, @@ -228,300 +228,414 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous, but there is a small duplicate set around DeepSpeed ZeRO-3 loading failures with `index 0 is out of bounds for dimension 0 with size 0`. Most other pairs are only loosely related by broad subsystem or error family and should stay separate.", - "confidence": 0.79, - "canonical_issue_reason": "Issue 43596 is the cleanest representative of the shared ZeRO-3 loader failure; 43638 and 41762 look like model-specific variants of the same underlying `index 0` out-of-bounds bug.", + "summary": "The cluster is heterogeneous, with one clearly duplicate issue pair and many soft-similarity matches that are only broad theme overlaps. The best representative issue is a generic index/shape-error report, but this is not a clean single-topic duplicate cluster.", + "confidence": 0.64, + "canonical_issue_reason": "issue:44805 is the broadest representative of the repeated index/shape-error family in this cluster, with the most nearby duplicate-like reports; the rest are mostly model-specific variants or unrelated bugs.", "canonical_pr_reason": null, - "best_issue_reason": "43596 is the best issue anchor because it captures the common DeepSpeed ZeRO-3 initialization failure in a generic way, making it the most suitable duplicate target for the accepted variants.", + "best_issue_reason": "issue:44805 is the best single issue to keep as the cluster anchor because it captures the most reusable error pattern while still being specific enough to point at the likely code path.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44322", "right": "issue:45464", "accept": false, - "reason": "Different Qwen3.5 failures: missing config attribute vs streaming chat/completions inference error." + "reason": "Different failures: missing config attribute vs streaming chat/completions runtime issue; not the same bug." }, { "left": "issue:45237", "right": "issue:45685", "accept": false, - "reason": "Different backend/model problems: AMD GPU support for GPT-OSS vs MPS int histogram error in MoE." + "reason": "Different backends and error types (AMD GPU runtime vs MPS histogram op); same broad hardware theme only." }, { "left": "issue:43716", "right": "issue:45237", "accept": false, - "reason": "Different model families and failure modes: Mistral-3 dtype mismatch vs GPT-OSS on AMD GPUs." + "reason": "Different model paths and symptoms: dtype mismatch in image preprocessing vs AMD GPU execution failure." }, { "left": "issue:43828", "right": "issue:45237", "accept": false, - "reason": "Both are dtype/backend complaints, but they affect different models and code paths." + "reason": "Both are model/backend errors, but one is autocast dtype mismatch and the other is AMD GPU incompatibility; not one bug." }, { "left": "issue:45237", "right": "issue:45507", "accept": false, - "reason": "AMD GPU runtime issue vs GraniteMoEHybrid calling an invalid method; not the same bug." + "reason": "Different model families and failures; no shared concrete code-path fix." }, { "left": "issue:44292", "right": "issue:45237", "accept": false, - "reason": "Qwen3 NVFP4 runtime failure is unrelated to the GPT-OSS AMD GPU issue." + "reason": "Distinct model/runtime problems; too broad to treat as duplicates." }, { "left": "issue:30277", "right": "issue:35545", "accept": false, - "reason": "DeepSpeed collective mismatch is unrelated to ModernBERT ONNX export." + "reason": "DeepSpeed collective mismatch vs ONNX export error are unrelated." }, { "left": "issue:35545", "right": "issue:42915", "accept": false, - "reason": "ONNX export error and FineGrainedFP8Config failure are different model/tooling issues." + "reason": "Both are model-specific errors, but onnx export and FP8 runtime are different code paths." }, { "left": "issue:35545", "right": "issue:41720", "accept": false, - "reason": "ModernBERT export to ONNX is unrelated to Qwen3 auto-device-map CUDA assert." + "reason": "Different failures: ONNX export vs auto device-mapping cuda assert." }, { "left": "issue:44509", "right": "issue:44512", "accept": false, - "reason": "Both are stale docs problems, but they reference different removed features and do not look like the same underlying change." + "reason": "Both are v5 docs cleanups, but they target different removed references and would not be one concrete fix." }, { "left": "issue:35545", "right": "issue:36010", "accept": false, - "reason": "ONNX export error vs `GenerationMixin` import breakage are separate problems." + "reason": "ONNX export error and GenerationMixin import error are unrelated." }, { "left": "issue:43638", "right": "issue:44805", "accept": false, - "reason": "Both involve shape/index errors under ZeRO-3, but the specific failure messages and contexts differ enough to avoid deduping." + "reason": "Same general zero3/index-error family, but these reports are about different triggers and model setups; not enough to merge confidently." }, { "left": "issue:43596", "right": "issue:44805", "accept": false, - "reason": "ZeRO-3 BertModel index-out-of-bounds is not the same as the mask-shape mismatch report." + "reason": "Same broad error shape, but BertModel ZeRO-3 loading and mask-shape mismatch are not the same concrete bug." }, { "left": "issue:44805", "right": "issue:45161", "accept": false, - "reason": "Mask/tensor shape mismatch and GPT-OSS tensor-parallel failure are unrelated." + "reason": "Both mention tensor-shape style issues, but one is a mask mismatch and the other is GPT-OSS tensor-parallel behavior." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Different failures: mask shape mismatch vs Qwen3.5 embedding unpacking with TRL." }, { "left": "issue:44661", "right": "issue:44805", "accept": false, - "reason": "Tokenizer mapping bug is unrelated to the mask/tensor shape mismatch issue." + "reason": "Tokenizer mapping add-model bug and mask/index shape error are unrelated." }, { "left": "issue:34634", "right": "issue:35545", "accept": false, - "reason": "BarkProcessor voice preset and ModernBERT ONNX export are different subsystems and failures." + "reason": "BarkProcessor voice_preset issue is unrelated to ModernBERT ONNX export." }, { "left": "issue:30277", "right": "issue:43638", "accept": false, - "reason": "Collective mismatch during ZeRO-3 is a different failure from the ZeRO-3 index-out-of-bounds loader bug." + "reason": "Different DeepSpeed-related bugs; one is collective mismatch, the other is ZeRO-3 index-out-of-bounds on Bert." }, { "left": "issue:43596", "right": "issue:43638", "accept": true, - "reason": "Same ZeRO-3 loader/index-out-of-bounds bug on BertModel; the wording differs but the failure is the same." + "reason": "These describe the same ZeRO-3 index-out-of-bounds loading bug, with only the Bert variant differing." }, { "left": "issue:43975", "right": "issue:44457", "accept": false, - "reason": "Incorrect detokenization and LoRA save/reload mismatch are distinct model-output issues." + "reason": "Different problems: detokenization bug vs LoRA merge/save/load result mismatch." }, { "left": "issue:43039", "right": "issue:45161", "accept": false, - "reason": "Liger Kernel cross-entropy path and GPT-OSS tensor parallel issue are not the same bug." + "reason": "Liger Kernel cross-entropy call and GPT-OSS tensor-parallel issue are different code paths." }, { "left": "issue:44292", "right": "issue:44457", "accept": false, - "reason": "Qwen NVFP4 runtime failure is unrelated to LoRA merge/save inconsistency." + "reason": "NVFP4 runtime failure and LoRA merge/save mismatch are unrelated." }, { "left": "issue:34928", "right": "issue:44805", "accept": false, - "reason": "Activation-checkpointing/FSDP recomputation mismatch is a different shape bug than the ZeRO-3 loader issue." + "reason": "Activation checkpointing/FSDP recomputation mismatch is a different bug from the mask/index shape errors." }, { "left": "issue:24643", "right": "issue:30277", "accept": false, - "reason": "DeepSpeed training `'weight' must be 2-D` is unrelated to the collective mismatch report." + "reason": "Both involve DeepSpeed, but the concrete failures differ: 2-D weight runtime error vs collective mismatch." }, { "left": "issue:43638", "right": "issue:45161", "accept": false, - "reason": "ZeRO-3 index-out-of-bounds and GPT-OSS tensor-parallel failure do not share the same code path." + "reason": "ZeRO-3 Bert loading and GPT-OSS TP behavior are different issues." }, { "left": "issue:34928", "right": "issue:36331", "accept": false, - "reason": "FSDP/activation checkpointing recomputation mismatch is unrelated to the `compute_loss` signature change." + "reason": "FSDP activation-checkpointing mismatch and CustomTrainer argument change are unrelated." }, { "left": "issue:43596", "right": "issue:45161", "accept": false, - "reason": "Different training/load failures: ZeRO-3 BertModel loading vs GPT-OSS tensor-parallel behavior." + "reason": "Different models and error classes; no common bug." }, { "left": "issue:43716", "right": "issue:45507", "accept": false, - "reason": "Mistral-3 dtype mismatch and GraniteMoEHybrid invalid method are unrelated." + "reason": "Different model-specific failures: dtype mismatch vs invalid method call in GraniteMoEHybrid." }, { "left": "issue:39290", "right": "issue:45081", "accept": false, - "reason": "Gemma3 sliding-window attribute error is unrelated to the Mistral tokenizer patch crash." + "reason": "Different model/API failures: Gemma3 config attribute vs tokenizer regex patch crash." }, { "left": "issue:43828", "right": "issue:45161", "accept": false, - "reason": "Autocast dtype mismatch in Phi-tiny-MoE is unrelated to GPT-OSS tensor-parallel failure." + "reason": "Autocast dtype mismatch and GPT-OSS MoE tensor-parallel failure are not the same bug." }, { "left": "issue:44918", "right": "issue:45161", "accept": false, - "reason": "Qwen3.5 embedding unpacking with TRL is unrelated to GPT-OSS MoE tensor-parallel behavior." + "reason": "TRL embedding unpacking and GPT-OSS tensor-parallel failure are unrelated." }, { - "left": "issue:43596", - "right": "issue:45161", + "left": "issue:34928", + "right": "issue:41720", "accept": false, - "reason": "ZeRO-3 loader bug and GPT-OSS tensor-parallel bug are different code paths." + "reason": "Activation checkpointing/FSDP mismatch and CUDA assert on device mapping are different problems." }, { - "left": "issue:41762", - "right": "issue:43638", - "accept": true, - "reason": "Same ZeRO-3 `index 0` out-of-bounds loader failure, just on a different model family." + "left": "issue:43716", + "right": "issue:45685", + "accept": false, + "reason": "Different backends and symptoms: image-preprocessor dtype mismatch vs MPS histogram not implemented." + }, + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Torch dynamo grouped_mm tracing error and image-preprocessor dtype mismatch are unrelated." + }, + { + "left": "issue:45507", + "right": "issue:45685", + "accept": false, + "reason": "Invalid-method bug in GraniteMoEHybrid and MPS Int histogram failure are different code paths." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "Autocast dtype mismatch and NVFP4 model runtime failure do not share a concrete fix." + }, + { + "left": "issue:43716", + "right": "issue:44292", + "accept": false, + "reason": "Different model/backend failures; only a broad runtime-error similarity." + }, + { + "left": "issue:44292", + "right": "issue:44918", + "accept": false, + "reason": "Different model-specific runtime errors with no shared bug." }, { "left": "issue:43039", "right": "issue:44918", "accept": false, - "reason": "Liger Kernel cross-entropy call and TRL Qwen3.5 embedding unpacking are unrelated." + "reason": "Liger Kernel cross-entropy call and TRL embedding unpacking are different issues." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": false, + "reason": "Both are ZeRO-3 loading errors, but different models and not enough evidence for the same root bug." }, { "left": "issue:43827", "right": "issue:44512", "accept": false, - "reason": "Both are docs issues, but they concern different removed v5 features and look like separate cleanup tasks." + "reason": "Both are docs-related, but one is pipeline-task removal and the other is the removed run command; not one change." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Different model-specific bugs: Mistral-3 dtype mismatch vs Qwen3-vl video StopIteration." + }, + { + "left": "issue:43828", + "right": "issue:45685", + "accept": false, + "reason": "Different runtime backends and symptoms; not the same bug." + }, + { + "left": "issue:44918", + "right": "issue:45685", + "accept": false, + "reason": "TRL unpacking bug and MPS Int histogram limitation are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Both touch training/runtime behavior, but cross-entropy kernel routing and autocast mismatch are different defects." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch and trainer signature mismatch are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Liger Kernel cross-entropy issue does not match the index/mask shape error family." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "Activation checkpointing mismatch and GenerationMixin import failure are unrelated." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Same broad ZeRO-3 theme, but different models and likely different underlying causes." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Docs mentioning return_all_scores and docs mentioning run command removal are separate documentation issues." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Gemma3 sliding-window attribute error and Qwen3 auto device mapping CUDA assert are unrelated." }, { "left": "issue:43866", - "right": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "Corrupted checkpoint vs NemotronH load implementation bug are different problems." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross-entropy routing and Qwen3-vl video StopIteration are unrelated." + }, + { + "left": "issue:43854", + "right": "issue:43866", "accept": false, - "reason": "Checkpoint corruption and unit-test load failure for GLM-4.7-Flash are different issues." + "reason": "Model-loading test failure and corrupted checkpoint are different issues." }, { "left": "issue:41093", "right": "issue:43638", "accept": false, - "reason": "Mask/index length mismatch and ZeRO-3 loader index-out-of-bounds are different failure modes." + "reason": "Both are index-shape style errors, but the masked-tensor mismatch and ZeRO-3 Bert loading failure are not the same bug." }, { "left": "issue:39290", "right": "issue:43531", "accept": false, - "reason": "Gemma3 sliding-window attribute error is unrelated to Qwen3-MoE sliding_window behavior." + "reason": "Different Qwen/Gemma sliding-window and configuration issues." }, { "left": "issue:43425", "right": "issue:45070", "accept": false, - "reason": "Torch version incompatibility and pydantic `PretrainedConfig` field breakage are unrelated." + "reason": "Torch version incompatibility and pydantic PretrainedConfig breakage are unrelated." }, { "left": "issue:45362", "right": "issue:45464", "accept": false, - "reason": "Qwen3.5 chat crash and Qwen3.5 streaming inference failure are separate runtime problems." + "reason": "Qwen3.5 chat crash and streaming chat/completions failure are not the same concrete bug." }, { "left": "issue:43404", "right": "issue:43866", "accept": false, - "reason": "Mistral3 tied-weight bug is unrelated to Ovis2 checkpoint corruption." + "reason": "Mistral3 weight-tying bug and corrupted checkpoint are unrelated." }, { "left": "issue:43792", "right": "issue:45070", "accept": false, - "reason": "Whisper run failure is unrelated to the pydantic config-model issue." + "reason": "Whisper model load failure and pydantic config breakage are unrelated." }, { "left": "issue:45464", "right": "issue:45685", "accept": false, - "reason": "Qwen3.5 streaming failure and MoE MPS histogram error are unrelated." + "reason": "Streaming inference failure and MPS Int histogram backend limitation are different issues." }, { "left": "issue:43541", "right": "issue:45685", "accept": false, - "reason": "Grouped_mm tracing error and MPS histogram-not-implemented error are different execution paths." + "reason": "Torch dynamo tracing failure and MPS histogram backend limitation are unrelated." }, { "left": "issue:43541", "right": "issue:43828", "accept": false, - "reason": "Both are numerical/runtime issues in MoE-ish models, but they are not the same bug or fix." + "reason": "Grouped_mm tracing error and autocast dtype mismatch are different bugs." }, { "left": "issue:36010", "right": "issue:39290", "accept": false, - "reason": "GenerationMixin import breakage and Gemma3 sliding_window attribute error are unrelated." + "reason": "GenerationMixin import failure and Gemma3 sliding-window attribute error are unrelated." }, { "left": "issue:44841", "right": "issue:45084", "accept": false, - "reason": "Voxtral processor failure and template compilation error are different issues." + "reason": "Processor failure for Voxtral and template-node compile error are different problems." }, { "left": "issue:41093", "right": "issue:41762", "accept": false, - "reason": "Mask-shape mismatch and ZeRO-3 index-out-of-bounds are different bugs." + "reason": "Both are tensor shape/index errors, but they involve different model-loading paths and are not clearly the same bug." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet, and the ZeRO-3 duplicate cluster around 43596/43638/41762 is supported by the titles. The negative soft-edge verdicts are mostly conservative and do not overclaim shared root causes beyond what the packet shows." + "feedback": "The evaluation is grounded in the packet and stays conservative. The cluster is indeed heterogeneous, the single accepted near-duplicate pair (issue:43596 / issue:43638) is well-supported by the shared ZeRO-3 index-out-of-bounds title pattern, and the other soft-edge rejections do not overstate similarity. The choice of issue:44805 as a broad index/shape-error anchor is plausible and not exaggerated." }, "evaluator_used": true, "retried": false, @@ -626,20 +740,20 @@ "issue:43828|issue:44928", "issue:43866|issue:44403" ], - "prepared_review_unit_hash": "95e5979fce976694d7de52a9d67a135591463c353438dfa4e55d43a090c767c7", + "prepared_review_unit_hash": "dbac255fda6533e7aa51ea4b9b8f9a2c901579a27a6620d9d550667541270fcc", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 33, - "serialized_chars": 37179, + "serialized_chars": 37180, "estimated_input_tokens": 9295, "estimated_eval_tokens": 18846 }, @@ -648,216 +762,216 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a grab-bag of unrelated Transformer/vLLM issues. The clearest duplicate set is the non-persistent buffer corruption regression in v5.x. A second likely duplicate pair is the SigLIP2 bad-output reports. Most other pairs only share a subsystem or vague symptom class and should stay separate.", - "confidence": 0.91, - "canonical_issue_reason": "issue:43950 is the clearest and broadest statement of the non-persistent-buffer regression, with an explicit `from_pretrained()` corruption description that subsumes the narrower \"junk\" reports.", + "summary": "This cluster is mostly heterogeneous and does not form one duplicate set. The only clear duplicate mini-cluster is the Transformers v5 non-persistent-buffer corruption regression.", + "confidence": 0.92, + "canonical_issue_reason": "issue:44534 is the broadest and clearest statement of the non-persistent-buffer regression, and it matches the other buffer-corruption reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43950 is the best representative issue for triage because it most precisely names the regression, its scope, and the loading path involved.", + "best_issue_reason": "issue:44534 is the best anchor for triage because it most directly names the underlying v5 buffer-corruption bug and generalizes the symptom.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43296", "right": "issue:43366", "accept": false, - "reason": "Different models and failures: PaddleOCR-VL load failure in vLLM vs GGUF/gpt-oss architecture support." + "reason": "Different problems: PaddleOCR-VL load failure vs gpt-oss GGUF support request." }, { "left": "issue:43572", "right": "issue:45070", "accept": false, - "reason": "Both involve config-related v5 breakage, but one is a missing StableLmConfig field and the other is a pydantic/PretrainedConfig field regression; not the same bug." + "reason": "Unrelated regressions: missing StableLmConfig field vs pydantic PretrainedConfig breakage." }, { "left": "issue:44387", "right": "issue:45005", "accept": false, - "reason": "One is int4 quantization memory growth/OOM, the other is tied-weights behavior on translation models; different code paths." + "reason": "OOM/memory-growth bug vs tied-weights regression; not the same code path." }, { "left": "issue:43541", "right": "issue:45161", "accept": false, - "reason": "Mixtral torch-dynamo grouped_mm tracing failure is unrelated to GPT-OSS tensor-parallel support." + "reason": "Grouped_mm tracing crash vs GPT-OSS tensor-parallel failure; distinct causes." }, { "left": "issue:43828", "right": "issue:45464", "accept": false, - "reason": "Both are runtime failures, but one is an autocast dtype mismatch in Phi-tiny-MoE and the other is a streaming chat/completions failure in Qwen3.5." + "reason": "Autocast dtype mismatch in Phi MoE vs streaming chat/completions failure in Qwen3.5." }, { "left": "issue:44291", "right": "issue:45070", "accept": false, - "reason": "Unexpected `_is_hf_initialized` during init_empty_weights is a different loading-path bug from the pydantic PretrainedConfig field issue." + "reason": "`init_empty_weights` argument error vs pydantic model-field regression; unrelated." }, { "left": "issue:43366", "right": "issue:45464", "accept": false, - "reason": "GPT-OSS GGUF support and Qwen3.5 streaming API failure are unrelated." + "reason": "General support request vs a specific streaming inference bug." }, { "left": "issue:43531", "right": "issue:45070", "accept": false, - "reason": "Qwen3-MoE sliding_window handling and pydantic config-field breakage are different regressions." + "reason": "Qwen3-MoE sliding-window bug vs pydantic field breakage; no shared underlying defect." }, { "left": "issue:39290", "right": "issue:43742", "accept": false, - "reason": "Gemma3TextConfig sliding-window attribute error and MobileLLM key error are different model-load failures." + "reason": "Gemma3 sliding_window_pattern attribute issue vs MobileLLM load key error; different models and failures." }, { "left": "issue:43054", "right": "issue:43994", - "accept": true, - "reason": "Both report SigLIP2 producing clearly wrong outputs; one frames it as much worse text embeddings, the other as nonsensical AutoModel/pipeline results, consistent with the same underlying model wiring bug." + "accept": false, + "reason": "Both mention SigLIP2, but one is embedding-quality degradation and the other is bad pipeline/AutoModel outputs; too vague to merge." }, { "left": "issue:41093", "right": "issue:43596", "accept": false, - "reason": "Mask-shape mismatch during indexing and a zero-sized-tensor IndexError under deepspeed zero3 are distinct symptoms and likely distinct causes." + "reason": "Mask length mismatch vs zero3/BertModel index error; different runtime failures." }, { "left": "issue:43541", "right": "issue:44589", "accept": false, - "reason": "Grouped_mm tracing failure is unrelated to missing Float8 storage type support." + "reason": "Torch-dynamo grouped_mm crash vs missing Float8 storage object; unrelated." }, { "left": "issue:45632", "right": "issue:45698", "accept": false, - "reason": "Both concern remote custom-module loading, but one is specifically a trust_remote_code cache-path collision and the other is a wrong-module load after save_pretrained; not enough evidence they are the same bug." + "reason": "Cache-path collision for local trust_remote_code models vs loading the wrong custom module after save_pretrained; similar area but not the same bug." }, { "left": "issue:40990", "right": "issue:44960", "accept": false, - "reason": "High perplexity on gpt-oss-20b and GLM5 are different model-quality reports with no shared failure mode." + "reason": "High perplexity on gpt-oss-20b vs GLM5 model title; no concrete overlap." }, { "left": "issue:43716", "right": "issue:44918", "accept": false, - "reason": "Mistral-3 image-preprocessor dtype mismatch and Qwen3.5 embedding unpacking failure in TRL are unrelated." + "reason": "Image preprocessor dtype mismatch vs Qwen3.5 embedding unpacking in TRL SFT; different failure modes." }, { "left": "issue:43404", "right": "issue:45005", "accept": false, - "reason": "Both mention tied weights, but one is a Mistral3 lm_head tie bug and the other is a broader translation-model v5 issue; not the same concrete defect." + "reason": "Specific Mistral3 tie-weights bug vs broader translation-model tied-weights regression; too broad to treat as one issue." }, { "left": "issue:43645", "right": "issue:45399", "accept": false, - "reason": "Custom-model notebook initialization and flash-attn2 fallback gating are unrelated issues." + "reason": "Notebook custom-model initialization breakage vs flash-attn fallback selection problem; unrelated." }, { "left": "issue:44530", "right": "issue:45005", "accept": false, - "reason": "PagedAttentionCache invalid group type on Qwen3.5 is not a tied-weights regression." + "reason": "PagedAttentionCache linear_attention crash vs tied-weights regression; different subsystems." }, { "left": "issue:44863", "right": "issue:45005", "accept": false, - "reason": "NemotronH checkpoint loading failure is unrelated to tied-weight handling." + "reason": "NemotronH checkpoint loading failure vs tied-weights issue; not the same bug." }, { "left": "issue:44360", "right": "issue:44512", "accept": false, - "reason": "DSA indexer/ReLU discussion and a docs typo about a removed run command are unrelated." + "reason": "DSA indexer ReLU discussion vs docs command removal typo; unrelated." }, { "left": "issue:42915", "right": "issue:45399", "accept": false, - "reason": "Qwen3Moe FP8 config failure and flash-attn2 fallback blocking are different problems." + "reason": "Qwen3Moe FineGrainedFP8Config failure vs flash-attn fallback blocking; distinct problems." }, { "left": "issue:43873", "right": "issue:45005", "accept": false, - "reason": "Quantization/offloading behavior is unrelated to tied weights." + "reason": "Quantization/offloading behavior vs tied-weights regression; no shared concrete code path." }, { "left": "issue:39186", "right": "issue:44918", "accept": false, - "reason": "FSDP 2-D weight runtime error is unrelated to Qwen3.5 input-embedding unpacking in TRL." + "reason": "FSDP 2-D weight runtime error vs Qwen3.5 SFT embedding unpacking; unrelated." }, { "left": "issue:44492", "right": "issue:44512", "accept": false, - "reason": "Cache-strategy typo and removed-doc-command mention are both documentation-level but not the same issue." + "reason": "Cache-strategy typo vs stale docs command mention; not duplicates." }, { "left": "issue:44918", "right": "issue:45507", "accept": false, - "reason": "TRL embedding unpacking failure and GraniteMoEHybrid invalid method call are different model/integration bugs." + "reason": "TRL SFT embedding issue vs GraniteMoE invalid-method call; unrelated." }, { "left": "issue:43975", "right": "issue:44292", "accept": false, - "reason": "DeepSeek detokenization regression and Qwen-3-8B-NVFP4 runtime failure are unrelated." + "reason": "Detokenization bug vs Qwen NVFP4 runtime error; different symptoms and likely causes." }, { "left": "issue:43950", "right": "issue:44534", "accept": true, - "reason": "Same regression: v5 corrupts non-persistent buffers during load; 44534 is a shorter restatement of the same bug." + "reason": "Same regression: non-persistent buffers are corrupted during `from_pretrained`; 44534 is the broader formulation." }, { "left": "issue:43644", "right": "issue:43950", "accept": true, - "reason": "Same non-persistent-buffer corruption regression; 43950 is the more explicit issue, while 43644 is the broader symptom description." + "reason": "Both report the same v5 non-persistent-buffer corruption/junk-filled buffer bug." }, { "left": "issue:44292", "right": "issue:45507", "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime failure and GraniteMoEHybrid invalid method are unrelated." + "reason": "Qwen NVFP4 execution failure vs GraniteMoE invalid method; unrelated." }, { "left": "issue:39692", "right": "issue:43054", "accept": false, - "reason": "SigLIP2 doc-example errors/quantization failure are not the same as the broader bad-embedding quality report." + "reason": "SigLIP2 docs example/quantization problems vs poor SigLIP2 text embeddings; not the same concrete bug." }, { "left": "issue:41553", "right": "issue:43883", "accept": false, - "reason": "Bad AutoTokenizer error message for Voxtral and missing `all_tied_weights_keys` on MolmoForCausalLM are unrelated." + "reason": "Bad AutoTokenizer error for Voxtral vs missing `all_tied_weights_keys` on Molmo; unrelated." }, { "left": "issue:43828", "right": "issue:44928", "accept": false, - "reason": "Both involve numerics, but autocast dtype mismatch is not the same as Qwen3.5 RLHF NaNs from 3D position_ids/SDPA fallback." + "reason": "Autocast dtype mismatch in Phi MoE vs Qwen3.5 RLHF NaN from 3D position_ids; different root causes." }, { "left": "issue:43866", "right": "issue:44403", "accept": false, - "reason": "Corrupted Ovis2 checkpoint and generic loading noise are not the same defect." + "reason": "Corrupted checkpoint report vs loading-noise complaint; not the same bug." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is well grounded in the packet and stays mostly conservative. The canonical issue choice for the non-persistent-buffer regression is supported by the titles and pair evidence, and the only accepted soft-edge duplicate outside that cluster is reasonably framed as likely rather than certain. No mergeability issue applies here since these are issue pairs, not PR pairs." + "feedback": "The assessment is grounded in the packet and stays conservative on the soft edges. The only clear duplicate mini-cluster is indeed the v5 non-persistent-buffer corruption set around 43644/43950/44534, and the rejected pairings are mostly well-justified as distinct bugs or support requests. The cluster summary does not overclaim broader duplication." }, "evaluator_used": true, "retried": false, @@ -964,20 +1078,20 @@ "issue:43854|issue:44863", "issue:42915|issue:43872" ], - "prepared_review_unit_hash": "d6738f153ada2df681fb22c1db23dfee47938210a92023b71667259e6473cc42", + "prepared_review_unit_hash": "a29ee64e71a379dbd30c8d96af11219ce02cd1a36dd1e86edb4c585a80ad79ed", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 36, - "serialized_chars": 37867, + "serialized_chars": 37865, "estimated_input_tokens": 9467, "estimated_eval_tokens": 19190 }, @@ -986,222 +1100,216 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous. I found a few true duplicate pairs around the same underlying regression/theme (SigLIP2 quality, Whisper fbank extraction, a docs cleanup, and one `_is_hf_initialized` loading regression), but most pairs are distinct issues in different models or subsystems.", - "confidence": 0.61, - "canonical_issue_reason": "Issue 45020 is the best umbrella candidate: it is broad, open, and captures the recurring recent-transformers / remote-code model-loading breakage pattern better than the narrower model-specific reports.", + "summary": "This cluster is mostly a grab-bag of distinct Transformers regressions, not one tight duplicate set. Only a few pairs look like true duplicates: the docs/pipeline pair, the SigLIP2 discrepancy pair, and one Qwen3 ZeRO-3/loading pair.", + "confidence": 0.74, + "canonical_issue_reason": "Issue 45020 is the best umbrella-style issue here: it is open, broad, and captures the general class of recent-version model-loading regressions better than the more narrowly scoped reports.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45020 is the most suitable global anchor for this mixed cluster because it is the broadest, still-active report and can absorb the repeated loading/regression theme better than the more symptom-specific issues.", + "best_issue_reason": "45020 is the most useful triage hub among these because it is broad enough to subsume several remote-code/loading breakages, while the other issues are mostly model-specific edge cases.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36331", "right": "issue:41762", "accept": false, - "reason": "Different failures: training-time `compute_loss` signature mismatch vs DeepSpeed ZeRO-3 loading crash." + "reason": "Different failure surfaces: trainer custom loss signature vs Gemma3 ZeRO-3 loading IndexError." }, { "left": "issue:41553", "right": "issue:43881", "accept": false, - "reason": "Both are load failures, but for unrelated models and different error paths." + "reason": "Both are loading failures, but for different models and different bug classes." }, { "left": "issue:43872", "right": "issue:44589", "accept": false, - "reason": "Distinct incompatibilities: `_is_hf_initialized`/Int8Params vs missing Float8 storage." + "reason": "Both are compatibility/type errors, but the underlying causes differ (_is_hf_initialized vs Float8 storage lookup)." }, { "left": "issue:43054", "right": "issue:45701", "accept": false, - "reason": "SigLIP2 embedding regression vs a generic tokenization-change complaint; not the same bug." + "reason": "SigLIP2 embedding quality regression vs tokenization version-change report; not the same bug." }, { "left": "issue:38175", "right": "issue:43054", - "accept": true, - "reason": "Both report SigLIP2 output quality/behavior regressions and look like symptoms of the same implementation mismatch." + "accept": false, + "reason": "Same model family, but one is zero probabilities and the other is worse text embeddings; too little to treat as the same defect." }, { "left": "issue:42947", "right": "issue:44387", "accept": false, - "reason": "LoRA gradient checkpointing vs int4 quantization memory growth are different issues." + "reason": "Both mention memory behavior, but checkpointing/LoRA and int4 OOM are different code paths." }, { "left": "issue:43054", "right": "issue:43493", "accept": true, - "reason": "43493 describes the likely root-cause SigLIP2 implementation discrepancy that matches 43054's degraded embeddings." + "reason": "Both point to a SigLIP2 implementation mismatch/discrepancy affecting outputs; plausible same underlying model bug." }, { "left": "issue:43881", "right": "issue:43883", "accept": false, - "reason": "Different models and different missing-attribute/load failures." + "reason": "Different models and different missing-attribute/loading failures." }, { "left": "issue:44403", "right": "issue:44451", "accept": false, - "reason": "Generic loading noise vs a specific model load failure for ScandiBERT." + "reason": "Generic loading noise vs a specific model load failure; not the same issue." }, { "left": "issue:43975", "right": "issue:45520", "accept": false, - "reason": "Tokenizer detokenization bug vs Python 3.13 flash_attn import error." + "reason": "Tokenizer detokenization bug vs flash-attn import/KeyError on Python 3.13." }, { "left": "issue:43643", "right": "issue:45020", "accept": false, - "reason": "`AutoConfig` missing fields under `trust_remote_code` is narrower than the broader remote-code breakage report; not clearly the same underlying bug." + "reason": "Remote-code handling is related, but this is a specific missing-fields AutoConfig bug, not clearly the same defect as the broad umbrella report." }, { "left": "issue:43975", "right": "issue:45237", "accept": false, - "reason": "Unrelated problems: detokenization vs AMD GPU support." + "reason": "Unrelated: tokenizer detokenization vs AMD GPU execution failure." }, { "left": "issue:44403", "right": "issue:44488", "accept": false, - "reason": "Different model-loading complaints with no clear shared root cause." + "reason": "One is logging noise, the other is a model load failure for a specific checkpoint." }, { "left": "issue:42947", "right": "issue:43856", "accept": false, - "reason": "Checkpointing effectiveness and MoE memory usage are separate training issues." + "reason": "Different training-memory regressions with different mechanisms and models." }, { "left": "issue:43782", "right": "issue:44960", "accept": false, - "reason": "Different models and different failure modes (`weight_only=True` vs GLM5 loading)." + "reason": "Different model families and different failure modes." }, { "left": "issue:42491", "right": "issue:44368", "accept": false, - "reason": "LoRA compatibility regression vs a tie-word-embeddings warning; not the same concrete bug." + "reason": "Both touch Qwen3/LoRA, but one is a hf4.x/hf5.x checkpoint incompatibility and the other is a tie_word_embeddings warning." }, { "left": "issue:41762", "right": "issue:42915", "accept": false, - "reason": "Gemma3 ZeRO-3 load failure vs Qwen3 MoE FP8 failure are unrelated." + "reason": "Both involve ZeRO-3/loading, but the models and reported failures are not the same concrete bug." }, { "left": "issue:43975", "right": "issue:44451", "accept": false, - "reason": "Different tokenizer/model-specific failures." + "reason": "Different tokenizer/model-specific issues." }, { "left": "issue:45020", "right": "issue:45356", "accept": false, - "reason": "Broad remote-code regressions vs a Kimi-K2.5 tokenizer regression; not the same issue." + "reason": "Broad remote_code breakage vs a Kimi tokenizer regression with codec handling and warning text; not clearly identical." }, { "left": "issue:44661", "right": "issue:45020", "accept": false, - "reason": "Model-addition helper failure vs general remote-code loading regressions." + "reason": "Model registration edge case vs broad remote_code regressions; too different to merge as duplicates." }, { "left": "issue:43824", "right": "issue:43872", "accept": false, - "reason": "Importing a missing class and an `_is_hf_initialized`/bitsandbytes TypeError are different bugs." + "reason": "Import/export issue for a missing class vs bitsandbytes initializer incompatibility." }, { "left": "issue:44991", "right": "issue:45399", "accept": false, - "reason": "Tokenizer load failure vs flash-attn fallback gating; unrelated code paths." + "reason": "Tokenizer loading regression vs flash-attn fallback gating; unrelated." }, { "left": "issue:43827", "right": "issue:44509", "accept": true, - "reason": "Both are the same docs regression: stale references to removed pipeline tasks after v5 cleanup." + "reason": "Both are the same docs regression: pipeline task references that remained after v5 removals." }, { "left": "issue:45081", "right": "issue:45399", "accept": false, - "reason": "Mistral regex patch crash vs flash-attn fallback checks are unrelated." + "reason": "Tokenizer regex crash vs flash-attn fallback gating; unrelated subsystems." }, { "left": "issue:43792", "right": "issue:44220", - "accept": true, - "reason": "Both point to Whisper audio feature extraction failing; `_torch_extract_fbank_features` is the concrete path behind the general can't-run report." + "accept": false, + "reason": "Possibly adjacent Whisper/audio code, but not enough evidence that they are the same bug." }, { "left": "issue:43404", "right": "issue:43950", "accept": false, - "reason": "Untied lm_head weights vs non-persistent buffer corruption are different model-state bugs." + "reason": "Weight tying in Mistral3 vs non-persistent buffer corruption; different defects." }, { "left": "issue:43257", "right": "issue:43866", "accept": false, - "reason": "Qwen3 MoE weight-conversion issue vs corrupted Ovis2 checkpoint." + "reason": "Different Qwen3 MoE and Ovis2 issues with no shared concrete failure." }, { "left": "issue:41553", "right": "issue:44991", "accept": false, - "reason": "Voxtral error messaging vs tokenizer load failure for a different model." + "reason": "Different model-loading failures with different root causes." }, { "left": "issue:43742", "right": "issue:45399", "accept": false, - "reason": "MobileLLM key error and flash-attn fallback gating do not share a concrete root cause." + "reason": "Different subsystems: MobileLLM config/key error vs flash-attn fallback gating." }, { "left": "issue:42915", "right": "issue:45020", "accept": false, - "reason": "Qwen3 MoE FP8 failure is too specific to be the same bug as broad remote-code breakage." + "reason": "FineGrainedFP8Config failure is not the same as the broad remote_code regression report." }, { "left": "issue:43742", "right": "issue:45020", "accept": false, - "reason": "Model-specific key error vs generic remote-code breakage." + "reason": "Specific MobileLLM load failure vs generic remote_code breakage." }, { "left": "issue:43940", "right": "issue:45313", - "accept": false, - "reason": "Both are Qwen DeepSpeed ZeRO-3 load failures, but for different model generations and with different reported symptoms." + "accept": true, + "reason": "Both report Qwen3-family ZeRO-3 weight loading failures with the same missing-weights symptom and loader path." }, { "left": "issue:43872", "right": "issue:44291", "accept": true, - "reason": "Same underlying `_is_hf_initialized` regression surfacing during model loading / parameter construction." - }, - { - "left": "issue:41553", - "right": "issue:44488", - "accept": false, - "reason": "Different model-load issues with no shared concrete failure path." + "reason": "These are the same unexpected `_is_hf_initialized` argument bug during model loading/init_empty_weights." } ] }, "evaluator_result": { "accept": true, - "feedback": "The cluster summary is grounded in the packet and stays conservative overall. The accepted soft pairs are all reasonably supported by the issue titles/descriptions, and the rejected pairs do not overreach by forcing unrelated model/subsystem failures together. The choice of 45020 as a broad anchor is also defensible for this mixed set." + "feedback": "The summary is grounded in the packet and stays conservative overall. The accepted duplicate calls are reasonably supported by matching symptoms or the same specific regression class (docs pipeline removal, `_is_hf_initialized`, ZeRO-3 weight loading, SigLIP2 output discrepancy). The rejected pairs are also framed cautiously and do not overclaim sameness from superficial similarity alone." }, "evaluator_used": true, "retried": false, @@ -1304,226 +1412,226 @@ "issue:38175|issue:39692", "issue:42491|issue:43720" ], - "prepared_review_unit_hash": "1e67f73b9376a7253e4c21b957f48177514053709b6302000a34634762090488", + "prepared_review_unit_hash": "b17b5455cc32a76c0855dc6b3026e9587863bab5c095e1a0d623f4e18c9ab0ef", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37219, - "estimated_input_tokens": 9305, - "estimated_eval_tokens": 18866 + "serialized_chars": 37221, + "estimated_input_tokens": 9306, + "estimated_eval_tokens": 18868 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This set is mostly unrelated issue reports. Only a few pairs look like true duplicates: repeated Transformers v5 non-persistent-buffer regression, identical mask-shape IndexError reports, the same SigLIP2 output bug, and the custom-model initialization regression.", - "confidence": 0.74, - "canonical_issue_reason": "Issue 43644 is the clearest canonical representative: it states the non-persistent-buffer regression directly and is the strongest match for its near-identical duplicate (44534).", + "summary": "This cluster is mostly a noisy mix of unrelated Transformers issues; only a few soft pairs look like true duplicates of the same underlying bug/regression. The clearest repeated theme is the non-persistent-buffer corruption regression in v5.", + "confidence": 0.58, + "canonical_issue_reason": "Issue 44534 is the clearest representative of the non-persistent-buffer regression: concise, specific, and matching the same bug described in 43644.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43644 is the best issue to anchor the cluster because it is specific, broadly descriptive, and maps cleanly to a near-exact duplicate report.", + "best_issue_reason": "44534 is the best single issue to anchor the shared buffer-corruption regression subgroup because it states the core problem directly and broadly.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44928", "right": "issue:45588", "accept": false, - "reason": "Different bugs: one is a Qwen3.5 training/SDPA gradient-collapse report, the other is an s_aux=None flash-attention crash." + "reason": "Different failures: 3D position_ids/BF16 SDPA fallback vs an s_aux=None AttributeError in flash_attention.py." }, { "left": "issue:43257", "right": "issue:45305", "accept": false, - "reason": "Both involve DeepSpeed, but the failures are different code paths: MoE weight conversion vs gradient averaging with GAS." + "reason": "Both involve Qwen3/MoE and DeepSpeed, but they describe different bugs: loading/conversion vs gradient averaging." }, { "left": "issue:36331", "right": "issue:41093", "accept": false, - "reason": "Unrelated errors: custom Trainer loss-signature incompatibility vs a tensor/mask shape mismatch." + "reason": "Different code paths and symptoms: custom trainer API mismatch vs a tensor/mask shape mismatch." }, { "left": "issue:41093", "right": "issue:44805", - "accept": true, - "reason": "Same underlying mask/index shape mismatch IndexError; the second report is essentially the same bug with genericized shapes." + "accept": false, + "reason": "Same generic IndexError shape template, but no evidence they stem from the same model or code path." }, { "left": "issue:43872", "right": "issue:45538", "accept": false, - "reason": "Different subsystems and failures: bitsandbytes Int8Params constructor incompatibility vs tokenizer model_max_length behavior." + "reason": "Unrelated: bitsandbytes init incompatibility vs a CLIPTokenizer model_max_length issue." }, { "left": "issue:44479", "right": "issue:44560", - "accept": false, - "reason": "Both mention video/Qwen models, but the concrete failures and model paths differ; not enough evidence of one shared bug." + "accept": true, + "reason": "Both report the same 5.3.0 Qwen video-input regression/StopIteration class of failure across related model variants." }, { "left": "issue:44530", "right": "issue:44863", "accept": false, - "reason": "Different models and errors: PagedAttentionCache linear_attention crash vs NemotronH checkpoint-loading failure." + "reason": "Different bugs: Qwen3.5 PagedAttentionCache group-type crash vs NemotronH checkpoint loading." }, { "left": "issue:24643", "right": "issue:30064", "accept": false, - "reason": "Completely different areas: DeepSpeed training weight shape error vs image-processor segmentation-map handling." + "reason": "Completely different subsystems and errors: DeepSpeed training weight shape vs segmentation-map processing." }, { "left": "issue:43366", "right": "issue:43828", "accept": false, - "reason": "No shared bug: GGUF/gpt-oss support request vs Phi-tiny-MoE autocast dtype mismatch." + "reason": "Different issues: GGUF architecture support vs a dtype mismatch under autocast." }, { "left": "issue:44155", "right": "issue:45584", "accept": false, - "reason": "Different workloads and root causes: AudioFlamingo batched-track leakage vs Whisper empty-transcription generation failure." + "reason": "Different domains and failures: batched audio track leakage vs empty-transcription generation after align_special_tokens." }, { "left": "issue:43366", "right": "issue:43531", "accept": false, - "reason": "Both are Qwen/GGUF-adjacent, but one is support for gpt-oss architecture and the other is a Qwen3-MoE sliding_window issue." + "reason": "Both touch Qwen3/MoE-adjacent areas, but one is GGUF architecture support and the other is a sliding_window bug." }, { "left": "issue:43550", "right": "issue:44910", "accept": false, - "reason": "Both are runtime/attention bugs, but the model families and concrete failure modes differ (Bamba compile/SDPA vs Qwen3.5 flash-attn position_ids)." + "reason": "Different concrete failures: torch.compile/SDPA on Bamba vs Qwen3.5 flash-attention crashes from 3D position_ids handling." }, { "left": "issue:44560", "right": "issue:45685", "accept": false, - "reason": "Unrelated bugs: Qwen3-VL video StopIteration vs an MPS histogram limitation for MoE integers." + "reason": "Unrelated: Qwen video StopIteration regression vs an MPS histogram op not implemented for Int." }, { "left": "issue:43645", "right": "issue:43646", "accept": true, - "reason": "Same Transformers 5 custom-model initialization regression; one report is just a Jupyter-specific manifestation of the same bug." + "reason": "These describe the same custom-model initialization regression; 43645 is just the Jupyter notebook repro variant." }, { "left": "issue:43950", "right": "issue:45325", "accept": false, - "reason": "Distinct regressions: non-persistent buffer corruption vs Qwen2.5-VL rope index scaling for still images." + "reason": "Different regressions: non-persistent buffer corruption vs Qwen2.5-VL temporal position_ids scaling." }, { "left": "issue:43824", "right": "issue:43957", "accept": false, - "reason": "Different problems: missing Qwen2_5_VL import symbol vs meta-device model-loading breakage." + "reason": "Different loading failures: missing Qwen2.5-VL class import vs meta-device loading breakage." }, { "left": "issue:42915", "right": "issue:43957", "accept": false, - "reason": "Different failures: Qwen3-MoE with FineGrainedFP8Config vs general torch.device('meta') loading regressions." + "reason": "Different bugs: Qwen3Moe FineGrainedFP8Config failure vs meta-device model-loading breakage." }, { "left": "issue:43366", "right": "issue:44589", "accept": false, - "reason": "No common bug: gpt-oss support vs missing Float8 storage type during load/serialization." + "reason": "No clear overlap beyond general loading/runtime issues; gpt-oss GGUF support is unrelated to Float8 storage lookup." }, { "left": "issue:44560", "right": "issue:44918", "accept": false, - "reason": "Both mention Qwen3.5/video, but one is a video-input StopIteration regression and the other is a TRL embedding unpacking failure." + "reason": "Related model family, but the failures are different: video StopIteration vs embedding unpacking with TRL SFT trainer." }, { "left": "issue:42617", "right": "issue:43366", "accept": false, - "reason": "Different issues: 3d_parallel.py execution failure vs GGUF gpt-oss architecture support." + "reason": "Different problems: 3d_parallel.py launch failure vs GGUF gpt-oss architecture support." }, { "left": "issue:38175", "right": "issue:43994", - "accept": true, - "reason": "Same SigLIP2 bug report: zero probabilities / nonsensical outputs for google/siglip2-base-patch16-224." + "accept": false, + "reason": "Same model family, but the reports point to different symptoms and likely different root causes." }, { "left": "issue:43646", "right": "issue:43950", "accept": false, - "reason": "Both are Transformers 5 regressions, but one is custom model initialization and the other is non-persistent buffer corruption." + "reason": "Custom model initialization break vs non-persistent buffer corruption are separate regressions." }, { "left": "issue:36010", "right": "issue:42915", "accept": false, - "reason": "Unrelated: GenerationMixin import regression vs Qwen3-MoE FP8 configuration failure." + "reason": "Different bugs: missing GenerationMixin import vs Qwen3Moe FP8 failure." }, { "left": "issue:44560", "right": "issue:44805", "accept": false, - "reason": "Different bugs: Qwen3-VL video StopIteration vs the mask/index shape mismatch IndexError." + "reason": "Different errors: video StopIteration vs a mask/tensor shape mismatch." }, { "left": "issue:44877", "right": "issue:45030", "accept": false, - "reason": "Different config-validation problems affecting different models." + "reason": "Both are config-related, but the model families and failure modes are too different to treat as one bug." }, { "left": "issue:43638", "right": "issue:44661", "accept": false, - "reason": "Different bugs: DeepSpeed/zero3 Bert out-of-bounds index vs tokenizer-mapping add-new-model-like failure." + "reason": "Different failures: DeepSpeed ZeRO3 Bert index error vs add-new-model-like/tokenizer-mapping behavior." }, { "left": "issue:43596", "right": "issue:44661", "accept": false, - "reason": "Different bugs: DeepSpeed zero3 Bert index-out-of-bounds vs tokenizer mapping name handling." + "reason": "Different bugs: ZeRO3 Bert zero-size tensor index error vs tokenizer mapping interaction." }, { "left": "issue:43317", "right": "issue:44756", "accept": false, - "reason": "Different topics: dequantized-model loading with device_map=auto vs mmap/OOM on Strix Halo." + "reason": "Different resource/loading issues: gpu+cpu offload dequantization vs mmap OOM on Strix Halo." }, { "left": "issue:43644", "right": "issue:44534", "accept": true, - "reason": "Near-exact duplicate: both report the Transformers v5 non-persistent-buffer junk/corruption regression." + "reason": "Same underlying v5 regression corrupting non-persistent buffers; titles are effectively two phrasings of the same bug." }, { "left": "issue:38175", "right": "issue:39692", "accept": false, - "reason": "Related model family, but different bugs: unexpected zero probabilities vs documentation example errors." + "reason": "Shared SigLIP2 context, but one is zero probabilities while the other is a documentation example with multiple errors." }, { "left": "issue:42491", "right": "issue:43720", "accept": false, - "reason": "Different issues: Qwen3 MoE LoRA compatibility across versions vs BitNet packed-weight unpacking during accelerate loading." + "reason": "Different incompatibilities: Qwen3 MoE LoRA/hf4.x vs bitsandbytes Int8Params kwargs." } ] }, "evaluator_result": { "accept": true, - "feedback": "Grounded overall: the canonical choice (43644) is well supported by the near-exact duplicate 44534, and the other accepted pairs are plausible from the titles. The summary stays mostly conservative. The only pair that feels slightly less certain is 38175/43994, which reads more like a closely related SigLIP2 output issue than a guaranteed exact duplicate, but that does not undermine the cluster as a whole." + "feedback": "The summary is grounded in the packet and stays conservative overall. The three accepted pairs are plausibly same-bug groupings from the titles alone, and the rejected pairs are mostly justified by different model families, subsystems, or error modes. No clear overstatement stands out." }, "evaluator_used": true, "retried": false, @@ -1600,15 +1708,15 @@ "issue:43708|issue:44075", "issue:42491|issue:45276", "issue:41720|issue:42915", - "issue:43257|issue:43854", "issue:43856|issue:43873", "issue:39692|issue:43994", + "issue:43257|issue:43854", "issue:44488|issue:44991", "issue:42915|issue:44756", "issue:42491|issue:43931", "issue:45362|issue:45406", - "issue:43644|issue:43645", "issue:44661|issue:44683", + "issue:43644|issue:43645", "issue:43856|issue:44387", "issue:44910|issue:45588", "issue:42491|issue:45292", @@ -1621,245 +1729,248 @@ "issue:39692|issue:44960", "issue:43335|issue:45663", "issue:43425|issue:43957", - "issue:43873|issue:44387", "issue:42947|issue:45704", + "issue:43873|issue:44387", "issue:44387|issue:44756", "issue:43854|issue:45216", "issue:44291|issue:44756" ], - "prepared_review_unit_hash": "fdffd8c81c6072f8a365a9b343bb55d04b9bc936e8c7b932017e249082684122", + "prepared_review_unit_hash": "202d00b8dbabf16f6f4dcf935d784cfba66be1b94cfe284fe0e84e55e7423e26", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 33, - "serialized_chars": 37003, - "estimated_input_tokens": 9251, - "estimated_eval_tokens": 18758 + "serialized_chars": 37000, + "estimated_input_tokens": 9250, + "estimated_eval_tokens": 18756 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly a grab bag of unrelated Transformers regression reports (model loading, quantization, generation, tokenizer, and embedding-resize bugs). There isn\u2019t a strong duplicate core, and the soft pairs mostly share superficial wording rather than the same concrete code path.", - "confidence": 0.46, - "canonical_issue_reason": null, + "summary": "This cluster is mostly heterogeneous Transformers bug reports with no single cluster-wide duplicate. I found two strong near-duplicate issue pairs around model weight tying and embedding resizing, but the rest are separate regressions across different models/features.", + "confidence": 0.63, + "canonical_issue_reason": "Issue 45276 is the best canonical anchor: it states the broader `resize_token_embeddings`/output-embedding regression, while 45335 is a narrower model-specific manifestation.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45292 is the closest reusable representative of the small resize_token_embeddings subgroup, but the overall cluster is too heterogeneous for a single strong canonical issue.", + "best_issue_reason": "Issue 45276 is the strongest general representative of the duplicate-like subset because its symptom is model-agnostic and likely shared across variants.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45313", "right": "issue:45357", "accept": false, - "reason": "Both are Qwen3.5 save/load regressions, but one is ZeRO-3 language_model weight loading and the other is incorrect visual-encoder keys; different code paths." + "reason": "Same model family, but different code paths: one is ZeRO-3 loading of `language_model` weights, the other is incorrect visual-encoder keys during `save_pretrained`." }, { "left": "issue:44960", "right": "issue:44977", "accept": false, - "reason": "Different models and failures: GLM5 unspecified load issue vs Qwen3.5 flash-attention generation failure." + "reason": "Both involve flash-attention and generation, but they affect different models and describe different failures; not the same concrete bug." }, { "left": "issue:44451", "right": "issue:44488", "accept": false, - "reason": "Both are model-loading failures, but they concern different BERT models and likely different tokenizer/model-specific breakage." + "reason": "Both are model-loading failures for different checkpoints, but the titles do not show a shared root cause or code path." }, { "left": "issue:43854", "right": "issue:43931", "accept": false, - "reason": "Different model families and symptoms: GLM-4.7-Flash test loading vs Qwen3-VL weight-shape mismatch." + "reason": "Different failure modes: unit-test loading for GLM-4.7 versus a shape-mismatch load error for Qwen3-VL." }, { "left": "issue:43708", "right": "issue:44075", "accept": false, - "reason": "One is Trainer checkpoint resumption math, the other is SGD argument handling; unrelated bugs." + "reason": "Unrelated bugs: Trainer checkpoint step calculation versus SGD optimizer arguments being ignored." }, { "left": "issue:42491", "right": "issue:45276", "accept": false, - "reason": "LoRA/Qwen3 MoE hf4.x-to-hf5.x compatibility is unrelated to Gemma4 token-embedding resize propagation." + "reason": "Qwen3 MoE LoRA compatibility across hf4.x/hf5.x is unrelated to the resize-token-embeddings regression." }, { "left": "issue:41720", "right": "issue:42915", "accept": false, - "reason": "Auto device-mapping CUDA assert and FineGrainedFP8Config failure are different Qwen3/MoE problems." - }, - { - "left": "issue:43257", - "right": "issue:43854", - "accept": false, - "reason": "Qwen3 MoE weight-conversion under accelerate+deepspeed is unrelated to GLM-4.7-Flash loading in unit tests." + "reason": "Separate issues: CUDA assert with auto device mapping versus FineGrainedFP8Config failure." }, { "left": "issue:43856", "right": "issue:43873", "accept": false, - "reason": "Both mention memory/quantization, but one is training-memory inefficiency and the other is offloading behavior; not the same bug." + "reason": "Both mention memory/offloading, but one is training memory usage and the other is quantization/offload behavior; not the same bug." }, { "left": "issue:39692", "right": "issue:43994", "accept": false, - "reason": "Same model family (SigLIP2) but different failures: docs example errors vs nonsensical outputs from AutoModel/pipeline." + "reason": "Same model family, but one is a docs/example + quantization problem and the other is a runtime AutoModel/pipeline correctness issue." + }, + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Different problems: Qwen3 MoE weight conversion with accelerate/deepspeed versus GLM-4.7 load failure." }, { "left": "issue:44488", "right": "issue:44991", "accept": false, - "reason": "Both are tokenizer/loading regressions, but they affect different models and are not clearly the same underlying tokenizer bug." + "reason": "Both fail to load models, but they involve different checkpoints and no shared concrete loading bug is shown." }, { "left": "issue:42915", "right": "issue:44756", "accept": false, - "reason": "FineGrainedFP8Config failure and Strix Halo mmap/OOM are unrelated." + "reason": "Qwen3 MoE FP8 training failure and Strix Halo mmap OOM are unrelated." }, { "left": "issue:42491", "right": "issue:43931", "accept": false, - "reason": "Different regressions: Qwen3 MoE LoRA compatibility vs Qwen3-VL shape mismatch on load." + "reason": "Different model-specific load/training bugs with no evidence of a common root cause." }, { "left": "issue:45362", "right": "issue:45406", "accept": false, - "reason": "Qwen3.5 chat crash and Gemma4Processor missing _tokenizer are unrelated model/serving bugs." + "reason": "A chat crash in Qwen3.5 is not the same as Gemma4Processor missing `_tokenizer` in `transformers serve`." }, { - "left": "issue:43644", - "right": "issue:43645", + "left": "issue:44661", + "right": "issue:44683", "accept": false, - "reason": "Both are Transformers 5.0.0 regressions, but one concerns junk-filled buffers and the other Jupyter custom-model initialization." + "reason": "Tokenizer-mapping registration bug versus compiled flex_attention incompatibility; unrelated code paths." }, { - "left": "issue:44661", - "right": "issue:44683", + "left": "issue:43644", + "right": "issue:43645", "accept": false, - "reason": "Tokenizer-mapping model creation and compiled flex_attention on torch>=2.9 are unrelated." + "reason": "Both are Transformers 5.0 regressions, but one is about non-persistent buffers and the other about custom model initialization in notebooks." }, { "left": "issue:43856", "right": "issue:44387", "accept": false, - "reason": "Training memory inefficiency vs int4 quantization reserved-memory growth; similar area, different concrete bug." + "reason": "Both relate to memory, but one is Qwen3 MoE training efficiency and the other is int4 quantization reserved-memory growth leading to OOM." }, { "left": "issue:44910", "right": "issue:45588", "accept": false, - "reason": "Flash-attention packed-sequence misinterpretation and s_aux=None AttributeError are different failure modes." + "reason": "Different flash-attention failures: misinterpreted 3D position_ids versus `s_aux=None` on sink-less models." }, { "left": "issue:42491", "right": "issue:45292", "accept": false, - "reason": "Qwen3 MoE checkpoint compatibility is unrelated to a generic resize_token_embeddings/output_embeddings bug." + "reason": "Qwen3 MoE LoRA compatibility is unrelated to the generic `resize_token_embeddings`/output_embeddings bug." }, { "left": "issue:43404", "right": "issue:43454", - "accept": false, - "reason": "Both are weight-tying issues, but they affect different model classes and likely different implementation paths." + "accept": true, + "reason": "Both report the same underlying weight-tying regression: `lm_head` is not tied correctly, causing broken generation across model variants." }, { "left": "issue:45276", "right": "issue:45335", - "accept": false, - "reason": "Both say resize_token_embeddings fails to update model-specific embeddings, but the affected architectures differ and there\u2019s not enough evidence this is one shared code-path bug." + "accept": true, + "reason": "Same concrete bug: `resize_token_embeddings` does not propagate to the output/decoder embeddings, with 45335 being a model-specific instance." }, { "left": "issue:36010", "right": "issue:39404", "accept": false, - "reason": "ImportError for GenerationMixin and Whisper return_language regression are unrelated." + "reason": "Importing `GenerationMixin` and Whisper `return_language` pipeline behavior are unrelated regressions." }, { "left": "issue:44164", "right": "issue:44291", "accept": false, - "reason": "save/from_pretrained extra_state handling is unrelated to init_empty_weights/_is_hf_initialized errors." + "reason": "Different API breakages: `extra_state` handling in save/load versus an unexpected `_is_hf_initialized` argument in `init_empty_weights`." }, { "left": "issue:44402", "right": "issue:44488", "accept": false, - "reason": "Vocabulary-size mismatch and tokenizer/model loading failure are different issues." + "reason": "Tokenizer vocab-size mismatch and a separate model-loading failure are not the same bug." }, { "left": "issue:41720", "right": "issue:44155", "accept": false, - "reason": "CUDA assert during Qwen3 auto device mapping is unrelated to AudioFlamingo3 track leakage in batched inference." + "reason": "CUDA assert in Qwen3 auto device mapping is unrelated to AudioFlamingo3 track-to-track embedding/token leakage." }, { "left": "issue:39692", "right": "issue:44960", "accept": false, - "reason": "SigLIP2 documentation/runtime issues and GLM5 loading are unrelated." + "reason": "Different models and different failure modes; one is a SigLIP2 docs/runtime issue, the other is a flash-attention generation bug." }, { "left": "issue:43335", "right": "issue:45663", "accept": false, - "reason": "SwitchTransformers sparse-layer config and Gemma-4 FSDP2 shared_kv_states KeyError are different model internals." + "reason": "Sparse-layer construction in SwitchTransformers and Gemma-4 FSDP2 shared_kv_states rebuilding are separate issues." }, { "left": "issue:43425", "right": "issue:43957", "accept": false, - "reason": "Torch 2.10 incompatibility and meta-device loading breakage are different compatibility regressions." + "reason": "Torch 2.10 incompatibility and meta-device loading breakage are distinct regressions." }, { - "left": "issue:43873", - "right": "issue:44387", + "left": "issue:42947", + "right": "issue:45704", "accept": false, - "reason": "Offloading with quantization and increased CUDA reserved memory under int4 quantization overlap conceptually but are not the same concrete bug." + "reason": "Gradient checkpointing with PEFT LoRA and an apex RMSNorm memory leak are unrelated." }, { - "left": "issue:42947", - "right": "issue:45704", + "left": "issue:43873", + "right": "issue:44387", "accept": false, - "reason": "Gradient checkpointing with PEFT LoRA is unrelated to apex.FusedRMSNorm memory leak." + "reason": "Both mention quantization/memory, but one is offloading behavior and the other is increased reserved CUDA memory under int4 quantization." }, { "left": "issue:44387", "right": "issue:44756", "accept": false, - "reason": "Both mention memory pressure, but one is int4 quantization reserved-memory growth and the other is mmap/OOM on Strix Halo." + "reason": "OOM from int4 reserved-memory growth is different from disabling mmap on Strix Halo to avoid OOM." }, { "left": "issue:43854", "right": "issue:45216", "accept": false, - "reason": "GLM-4.7-Flash loading in tests and Qwen3.5 save_pretrained regression are unrelated." + "reason": "GLM-4.7 unit-test loading failure is unrelated to Qwen3.5 save_pretrained checkpoint corruption." }, { "left": "issue:44291", "right": "issue:44756", "accept": false, - "reason": "Unexpected _is_hf_initialized argument during meta init and Strix Halo mmap/OOM are unrelated." + "reason": "Unexpected `_is_hf_initialized` in meta initialization is unrelated to Strix Halo mmap OOM." } ] }, - "evaluator_result": null, - "evaluator_used": false, + "evaluator_result": { + "accept": true, + "feedback": "Grounded overall. The canonical issue choice and the two accepted soft pairs are conservative and supported by the titles: 45276/45335 are clearly the same resize_token_embeddings/output_embeddings regression, and 43404/43454 both describe lm_head weight-tying failures with similar symptoms. The remaining pairwise rejections are appropriately cautious, and the cluster summary correctly frames the rest as heterogeneous regressions rather than one duplicate family." + }, + "evaluator_used": true, "retried": false, - "accepted_nontrivial_soft_edge": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -1927,8 +2038,8 @@ ], "soft_pairs": [ "issue:44292|issue:44912", - "issue:44743|issue:44960", "issue:45684|issue:45698", + "issue:44743|issue:44960", "issue:45276|issue:45292", "issue:43957|issue:44756", "issue:42947|issue:43381", @@ -1963,20 +2074,20 @@ "issue:43856|issue:44756", "issue:39404|issue:44671" ], - "prepared_review_unit_hash": "6fed7dd65fa1b0106113d7e902a78e19e2e61f384ba4df15b1f2a201728e334a", + "prepared_review_unit_hash": "a689f282053f7d16620b28985709eb38ab5b58ccf675ea4e9caa7f9c379ee8c9", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38395, + "serialized_chars": 38394, "estimated_input_tokens": 9599, "estimated_eval_tokens": 19454 }, @@ -1985,234 +2096,234 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly heterogeneous Transformers regressions and model-specific loading bugs. The only clear duplicate chain is around `resize_token_embeddings` not propagating to model-specific embedding/output modules (`output_embeddings`, `embed_tokens_per_layer`, `decoder.embed_tokens`).", - "confidence": 0.58, - "canonical_issue_reason": "issue:45292 is the broadest, most reusable statement of the shared `resize_token_embeddings` regression, and it best covers the more specific variants in issue:45276 and issue:45335.", + "summary": "This cluster is mostly a heterogeneous set of Transformers v5 regressions around loading, custom-model plumbing, tokenizer/config behavior, and model-specific runtime bugs. The only clear duplicate chain is the resize_token_embeddings trio; most other soft pairs are too broad or target different code paths.", + "confidence": 0.67, + "canonical_issue_reason": "Issue 43611 is the broadest representative of the cluster\u2019s main theme: a Transformers v5 model-loading regression affecting custom-model behavior via `base_model_prefix`. It is more central than the narrower, model-specific reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45292 is the strongest representative issue because it names the shared API-level failure rather than one specific architecture, while the rest of the cluster is largely unrelated.", + "best_issue_reason": "43611 is the best single triage anchor in this set because it is a general v5 loading regression with broad applicability, whereas most other issues are narrower one-off model or runtime failures.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44292", "right": "issue:44912", "accept": false, - "reason": "Both are Qwen/quantization-related, but they describe different loading failures and code paths." + "reason": "Both are quantization/loading failures, but they involve different quantization formats and different failure modes; not the same bug." }, { - "left": "issue:44743", - "right": "issue:44960", + "left": "issue:45684", + "right": "issue:45698", "accept": false, - "reason": "Different models and different bugs: cache state reset vs a GLM5 issue." + "reason": "Both involve custom-model save/load flows, but one is a permissions issue and the other loads the wrong module; distinct underlying problems." }, { - "left": "issue:45684", - "right": "issue:45698", + "left": "issue:44743", + "right": "issue:44960", "accept": false, - "reason": "Both involve save/load custom modules, but one is a permissions propagation bug and the other is loading the wrong module." + "reason": "Different model/runtime bugs: recurrent cache state reset vs a GLM5-specific issue." }, { "left": "issue:45276", "right": "issue:45292", "accept": true, - "reason": "Same underlying `resize_token_embeddings` propagation bug; both report output/embedding modules not being updated." + "reason": "Same `resize_token_embeddings` regression; 45276 is a Gemma4-specific manifestation of the broader output_embeddings failure in 45292." }, { "left": "issue:43957", "right": "issue:44756", "accept": false, - "reason": "`torch.device(\"meta\")` loading failures are unrelated to the Strix Halo mmap OOM issue." + "reason": "Unrelated loading/meta-device bug versus a Strix Halo mmap/OOM issue." }, { "left": "issue:42947", "right": "issue:43381", "accept": false, - "reason": "Both mention gradient checkpointing, but one is LoRA ineffectiveness and the other is eval-mode incompatibility." + "reason": "Both mention gradient checkpointing, but one is about LoRA ineffective checkpointing and the other about eval-mode restrictions." }, { "left": "issue:43381", "right": "issue:44928", "accept": false, - "reason": "Completely different failures: eval-mode checkpointing vs RLHF NaN/SDPA fallback." + "reason": "Different failures: eval-mode checkpointing restriction vs Qwen3.5 RLHF NaNs from 3D position_ids/SDPA fallback." }, { "left": "issue:44164", "right": "issue:44756", "accept": false, - "reason": "`extra_state` save/load handling is unrelated to the mmap/OOM issue." + "reason": "save/from_pretrained extra_state handling is unrelated to the Strix Halo mmap/OOM problem." }, { "left": "issue:43646", "right": "issue:43708", "accept": false, - "reason": "Custom model initialization and resume-from-checkpoint step \u0917\u0923culation are different bugs." + "reason": "Custom model initialization breakage and Trainer resume/max_steps calculation are different code paths." }, { "left": "issue:45588", "right": "issue:45663", "accept": false, - "reason": "Both are attention/Gemma-related, but they describe different failures in different code paths." + "reason": "Flash-attention sink-less crash and Gemma-4 FSDP2 KeyError are separate model/integration bugs." }, { "left": "issue:44479", "right": "issue:44877", "accept": false, - "reason": "Video-input regression for Qwen models is unrelated to strict config loading for granite_speech." + "reason": "A video-input regression for Qwen variants is not the same as granite_speech config strictness." }, { "left": "issue:44402", "right": "issue:44451", "accept": false, - "reason": "Tokenizer vocab-size mismatch vs a specific model loading failure; not the same bug." + "reason": "Tokenizer vocab-size mismatch and a specific model-loading failure are not the same underlying issue." }, { "left": "issue:45292", "right": "issue:45335", "accept": true, - "reason": "Same `resize_token_embeddings` regression, with `decoder.embed_tokens` as a model-specific manifestation of the broader output-embedding bug." + "reason": "Same `resize_token_embeddings` family bug: both report the resized embeddings not propagating to the model\u2019s target embedding/output layers." }, { "left": "issue:43645", "right": "issue:44164", "accept": false, - "reason": "Notebook custom-model initialization is unrelated to `extra_state` serialization handling." + "reason": "Notebook custom-model initialization and extra_state save/load failures are different regressions." }, { "left": "issue:43257", "right": "issue:44863", "accept": false, - "reason": "Different model families and different loading failures." + "reason": "Different model families and different loading failures; not the same bug." }, { "left": "issue:43828", "right": "issue:43957", "accept": false, - "reason": "Both are dtype/device-related, but one is autocast mismatch and the other is meta-device loading." + "reason": "Autocast dtype mismatch and meta-device loading failure are distinct runtime/loading problems." }, { "left": "issue:43425", "right": "issue:43828", "accept": false, - "reason": "Torch version incompatibility is not the same as a specific autocast dtype mismatch." + "reason": "Torch version incompatibility is too broad and does not match the specific autocast dtype mismatch bug." }, { "left": "issue:44589", "right": "issue:44756", "accept": false, - "reason": "Float8 storage lookup failure and mmap OOM are unrelated." + "reason": "Float8 storage lookup failure is unrelated to mmap/OOM behavior." }, { "left": "issue:39404", "right": "issue:45701", "accept": false, - "reason": "Whisper pipeline `return_language` regression is unrelated to a tokenizer-version behavior change." + "reason": "Whisper pipeline return_language regression is unrelated to a general tokenization-change report." }, { "left": "issue:44479", "right": "issue:45663", "accept": false, - "reason": "Qwen video regression and Gemma-4 FSDP2 KeyError are different bugs." + "reason": "Different areas: video input regression versus Gemma-4 training/FSDP2 state handling." }, { "left": "issue:43906", "right": "issue:44530", "accept": false, - "reason": "Isolated reproduction of an old issue vs a Qwen3.5 PagedAttentionCache crash; no shared bug." + "reason": "Isolated reproduction of another issue and a PagedAttentionCache crash are not the same bug." }, { "left": "issue:43931", "right": "issue:44863", "accept": false, - "reason": "Different model-loading failures with no clear shared root cause." + "reason": "Qwen3-VL shape mismatch and NemotronH checkpoint loading are different model-loading failures." }, { "left": "issue:43611", "right": "issue:43645", "accept": false, - "reason": "Both are v5 custom-model loading regressions, but the symptoms and likely code paths differ." + "reason": "Both are v5/custom-model regressions, but one is about `base_model_prefix` loading and the other is about notebook-defined custom model initialization." }, { "left": "issue:43901", "right": "issue:44509", "accept": false, - "reason": "Both are docs-related, but they cover different pipeline-task deprecations." + "reason": "Both are docs-related, but they refer to different pipeline/task deprecations and not the same fix." }, { "left": "issue:43425", "right": "issue:43716", "accept": false, - "reason": "Torch incompatibility and a Mistral-3 dtype mismatch are unrelated." + "reason": "Torch incompatibility is not the same as a Mistral-3 image preprocessor dtype mismatch." }, { "left": "issue:44530", "right": "issue:45084", "accept": false, - "reason": "Different error classes and unrelated subsystems." + "reason": "Different failures: PagedAttentionCache invalid group type vs compile-time non-template-node error." }, { "left": "issue:39404", "right": "issue:44912", "accept": false, - "reason": "Whisper pipeline behavior and Qwen quantization fallback are unrelated." + "reason": "Whisper pipeline language-return issue is unrelated to MXFP4 quantization fallback." }, { "left": "issue:43299", "right": "issue:43931", "accept": false, - "reason": "Both touch Qwen3-VL loading, but the reported failures differ enough that they do not clearly share one bug." + "reason": "Qwen3VL-MoE loading regression and Qwen3-VL weight-shape mismatch are related only by model family, not the same bug." }, { "left": "issue:44863", "right": "issue:45663", "accept": false, - "reason": "NemotronH checkpoint loading and Gemma-4 FSDP2 attention state are different problems." + "reason": "NemotronH checkpoint loading and Gemma-4 FSDP2 KeyError are different code-path failures." }, { "left": "issue:44479", "right": "issue:44805", "accept": false, - "reason": "Video regression vs tensor mask shape mismatch: different code paths." + "reason": "Video-input regression and a mask shape IndexError are unrelated." }, { "left": "issue:43257", "right": "issue:45663", "accept": false, - "reason": "Qwen3 MoE conversion/loading and Gemma-4 FSDP2 attention-state rebuilding are unrelated." + "reason": "Qwen3 MoE conversion/loading issue is unrelated to Gemma-4 FSDP2 shared_kv_states behavior." }, { "left": "issue:44265", "right": "issue:44898", "accept": false, - "reason": "`torch.export`/`torch_compilable_check` failure is unrelated to Perceiver interpolation behavior." + "reason": "torch.export/torch_compilable_check failure and Perceiver resolution/interpolation failure are different bugs." }, { "left": "issue:43994", "right": "issue:44079", "accept": false, - "reason": "SigLIP2 inference mismatch and `ModelOutput` key assignment are different bugs." + "reason": "Nonsensical Siglip2 outputs and ModelOutput key assignment are distinct issues." }, { "left": "issue:44671", "right": "issue:44991", "accept": false, - "reason": "CamemBERT prediction drift and tokenizer loading for est-roberta are unrelated." + "reason": "CamemBERT prediction regressions and tokenizer loading for est-roberta are different problems." }, { "left": "issue:43856", "right": "issue:44756", "accept": false, - "reason": "Qwen3 MoE training memory usage is unrelated to Strix Halo mmap OOM." + "reason": "Qwen3 MoE memory efficiency and Strix Halo mmap/OOM are not the same underlying bug." }, { "left": "issue:39404", "right": "issue:44671", "accept": false, - "reason": "Whisper pipeline behavior and CamemBERT MLM predictions are unrelated." + "reason": "Whisper pipeline language return and CamemBERT masked-LM prediction regressions are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "Grounded and suitably conservative overall. The resize_token_embeddings issues are correctly identified as the only clear duplicate chain, and the false-pair rejections do not overclaim shared root cause. Canonical issue choice is well supported by the packet." + "feedback": "The summary is grounded in the packet and stays suitably conservative. The only accepted soft-edge chain is the resize_token_embeddings family, which is well supported by the titles and similarity scores. The other pairwise rejections are cautious and plausible, and the chosen anchor/canonical issue is defensible as the broadest v5 loading regression in the set." }, "evaluator_used": true, "retried": false, @@ -2294,18 +2405,18 @@ "issue:43883|issue:44488", "issue:43957|issue:44164", "issue:43582|issue:43632", - "issue:44488|issue:45081", "issue:44164|issue:44479", + "issue:44488|issue:45081", "issue:40990|issue:43421", "issue:43611|issue:43644", "issue:44488|issue:44987", "issue:44206|issue:44220", "issue:43883|issue:44991", "issue:44811|issue:45584", - "issue:34567|issue:40444", "issue:43716|issue:45084", - "issue:43644|issue:43646", + "issue:34567|issue:40444", "issue:43643|issue:44530", + "issue:43644|issue:43646", "issue:43452|issue:43482", "issue:44451|issue:44987", "issue:36032|issue:42915", @@ -2314,24 +2425,24 @@ "issue:42915|issue:44530", "issue:44849|issue:45325", "issue:43404|issue:44466", - "issue:44987|issue:44991", "issue:36032|issue:43645", + "issue:44987|issue:44991", "issue:43262|issue:43377" ], - "prepared_review_unit_hash": "077a914ff50fa0d219fba6960cbee3004fa9af326550782ac7c9c7e17edb3374", + "prepared_review_unit_hash": "9ce1b3fb6c876777168b003ada4f81f6cda4475110afb221468782be82383e60", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 34, - "serialized_chars": 37985, + "serialized_chars": 37986, "estimated_input_tokens": 9497, "estimated_eval_tokens": 19250 }, @@ -2340,210 +2451,216 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab-bag of unrelated Transformers regression reports, with a loose theme around v5 model/tokenizer loading failures. I did not find any soft-edge pairs that clearly describe the same underlying bug/change.", - "confidence": 0.93, - "canonical_issue_reason": "issue:43611 is the broadest and most representative loading-regression report in the set; several nearby issues are also Transformers v5 init/load failures, but this one is the cleanest umbrella candidate.", + "summary": "This cluster is a loose bag of unrelated Transformers issues that only share broad surface keywords like loading, tokenizers, Whisper, or v5 regressions. I would not merge any of the soft pairs; the title-level overlap is not enough to indicate the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43611 is the broadest, most central regression-style report in the set: a generic Transformers v5 loading break tied to `base_model_prefix`, which makes it the best representative of the cluster\u2019s overall theme despite the cluster\u2019s mixed topics.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43611 is the best single representative issue for the cluster because it captures the general v5 loading regression theme without being overly model-specific.", + "best_issue_reason": "issue:43611 is the most generally applicable issue here and best captures the recurring v5 model-loading regression pattern seen in several nearby titles.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43425", "right": "issue:44918", "accept": false, - "reason": "Torch version compatibility and Qwen3.5/TRL embedding unpacking are different problems." + "reason": "Different failures: Torch version incompatibility vs Qwen3.5 embedding unpacking with TRL SFT; same broad area, not the same bug." }, { "left": "issue:43596", "right": "issue:44683", "accept": false, - "reason": "DeepSpeed ZeRO-3/BertModel index failure is unrelated to compiled flex_attention on torch>=2.9." + "reason": "One is a DeepSpeed ZeRO3/BertModel indexing crash; the other is a torch>=2.9 flex_attention compilation failure. Different code paths." }, { "left": "issue:44589", "right": "issue:45084", "accept": false, - "reason": "Float8 storage lookup and template-node compilation are unrelated error classes." + "reason": "Float8 storage lookup error vs template-node compilation error; no evidence of a shared underlying defect." }, { "left": "issue:40990", "right": "issue:42947", "accept": false, - "reason": "Perplexity regression and LoRA gradient-checkpointing behavior are not the same bug." + "reason": "Perplexity quality issue on GPT-OSS is unrelated to gradient checkpointing/LoRA behavior." }, { "left": "issue:43425", "right": "issue:45685", "accept": false, - "reason": "Torch 2.10 incompatibility and an MPS histogram int error are unrelated." + "reason": "Torch 2.10 incompatibility is unrelated to an MPS histogram kernel missing for Int." }, { "left": "issue:36032", "right": "issue:43452", "accept": false, - "reason": "T5 tokenizer method conflict and GGUF loading breakage are different code paths." + "reason": "Tokenizer method-name conflict in T5 is unrelated to `gguf_file` loading failures for AutoTokenizer/AutoModel." }, { "left": "issue:44991", "right": "issue:45081", "accept": false, - "reason": "Different tokenizer-loading failures for different models; no clear shared root cause." + "reason": "Both affect tokenizer loading, but one is an EMBEDDIA RoBERTa tokenizer regression and the other is a Mistral regex-patching crash; different bugs." }, { "left": "issue:32090", "right": "issue:39404", "accept": false, - "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper return_language pipeline behavior." + "reason": "Trainer GPU broadcast NoneType error and Whisper `return_language` pipeline regression are unrelated." }, { "left": "issue:43883", "right": "issue:44488", "accept": false, - "reason": "Missing all_tied_weights_keys and failing to load a specific BERT model are unrelated." + "reason": "Both are model-loading failures, but the symptoms and model families differ; not the same underlying issue." }, { "left": "issue:43957", "right": "issue:44164", "accept": false, - "reason": "Meta-device loading regression and extra_state save/load handling are distinct issues." + "reason": "Meta-device loading breakage and extra_state save/load handling are different serialization/loading problems." }, { "left": "issue:43582", "right": "issue:43632", "accept": false, - "reason": "Apple Silicon allocator warmup TypeError is unrelated to the _is_hf_initialized flag regression." + "reason": "Apple Silicon allocator warmup TypeError is unrelated to the `_is_hf_initialized` flag regression." }, { - "left": "issue:44488", - "right": "issue:45081", + "left": "issue:44164", + "right": "issue:44479", "accept": false, - "reason": "Both are tokenizer/model loading problems, but for different models and different failure points." + "reason": "save/from_pretrained extra_state handling is not the same as a Qwen video-input regression." }, { - "left": "issue:44164", - "right": "issue:44479", + "left": "issue:44488", + "right": "issue:45081", "accept": false, - "reason": "extra_state serialization and Qwen video-input regression are unrelated." + "reason": "Both are loading-related, but the affected models and failure modes are different; no shared concrete code-path bug." }, { "left": "issue:40990", "right": "issue:43421", "accept": false, - "reason": "Perplexity on GPT-OSS and runtime special-token post-processing updates are unrelated." + "reason": "Perplexity regression and runtime special-token/post-processor update behavior are unrelated." }, { "left": "issue:43611", "right": "issue:43644", "accept": false, - "reason": "Base_model_prefix loading regression and junk-filled non-persistent buffers are different bugs." + "reason": "Both mention v5 model-loading behavior, but one is `base_model_prefix` loading and the other is non-persistent buffer initialization; different root causes." }, { "left": "issue:44488", "right": "issue:44987", "accept": false, - "reason": "Separate model-loading failures for different models; not the same concrete issue." + "reason": "Different model loading regressions: `cjvt/sleng-bert` vs `physical-intelligence/fast`." }, { "left": "issue:44206", "right": "issue:44220", "accept": false, - "reason": "Unsupported center argument in LasrFeatureExtractor and _torch_extract_fbank_features are related thematically but not clearly the same bug." + "reason": "`LasrFeatureExtractor` unsupported argument crash and `_torch_extract_fbank_features()` are related audio areas but not the same concrete failure." }, { "left": "issue:43883", "right": "issue:44991", "accept": false, - "reason": "Missing tied-weights attribute and tokenizer loading failure are unrelated." + "reason": "Missing `all_tied_weights_keys` during loading is not the same as tokenizer loading for EMBEDDIA/est-roberta." }, { "left": "issue:44811", "right": "issue:45584", "accept": false, - "reason": "Both involve Whisper special tokens, but the symptoms and code paths differ enough that they are not clearly the same bug." - }, - { - "left": "issue:34567", - "right": "issue:40444", - "accept": false, - "reason": "TrainerState token accounting and Qwen2.5-VL iterable-dataset multimodal failure are unrelated." + "reason": "Both are Whisper-related, but one is batch decoding skip-special-tokens behavior and the other is empty transcription after align_special_tokens; different bugs." }, { "left": "issue:43716", "right": "issue:45084", "accept": false, - "reason": "Mistral-3 dtype mismatch and non-template-node compilation are unrelated." + "reason": "Mistral image-preprocessor dtype mismatch and template compilation failure are unrelated." }, { - "left": "issue:43644", - "right": "issue:43646", + "left": "issue:34567", + "right": "issue:40444", "accept": false, - "reason": "Non-persistent buffer serialization and custom model initialization regressions are different failures." + "reason": "TrainerState token counting and Qwen2.5-VL iterable dataset multi-image finetuning are unrelated." }, { "left": "issue:43643", "right": "issue:44530", "accept": false, - "reason": "AutoConfig trust_remote_code field loss and PagedAttentionCache linear_attention crash are unrelated." + "reason": "`AutoConfig` missing fields with `trust_remote_code=True` is unrelated to Qwen3.5 PagedAttentionCache group-type crashes." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 custom-model regressions, but one is non-persistent buffer junk and the other is custom model initialization; too broad and not the same bug." }, { "left": "issue:43452", "right": "issue:43482", "accept": false, - "reason": "Both mention GGUF loading, but one is a broad from_pretrained break and the other is a specific model report without enough evidence of the same code-path." + "reason": "`gguf_file` loading breakage and Qwen2.5-GGUF v5 loading failure are related only at a high level; not enough to treat as the same defect." }, { "left": "issue:44451", "right": "issue:44987", "accept": false, - "reason": "Loading ScandiBERT and loading physical-intelligence/fast are separate model-specific failures." + "reason": "Model loading failure for `vesteinn/ScandiBERT` is unrelated to `physical-intelligence/fast` loading failure." }, { "left": "issue:36032", "right": "issue:42915", "accept": false, - "reason": "T5 tokenizer conflict and Qwen3Moe FP8 failure are unrelated." + "reason": "T5 tokenizer special-token conflict and Qwen3Moe FineGrainedFP8Config failure are unrelated." }, { "left": "issue:43317", "right": "issue:43482", "accept": false, - "reason": "Dequantized model offload failure and Qwen2.5-GGUF loading failure are distinct regressions." + "reason": "device_map/offload dequantized-model loading and Qwen2.5-GGUF loading failure are different loading paths and different bugs." }, { "left": "issue:43541", "right": "issue:45084", "accept": false, - "reason": "Torch dynamo grouped_mm tracing failure is unrelated to non-template-node compilation." + "reason": "Mixtral grouped_mm torch dynamo tracing error is unrelated to template-node compilation errors." }, { "left": "issue:42915", "right": "issue:44530", "accept": false, - "reason": "Qwen3Moe FP8 config failure and Qwen3.5 PagedAttentionCache crash are different bugs." + "reason": "Qwen3Moe FP8 config failure and PagedAttentionCache linear_attention crash are different Qwen issues with different failure modes." }, { "left": "issue:44849", "right": "issue:45325", "accept": false, - "reason": "Qwen3.5 hidden-states behavior and Qwen2.5-VL rope-index scaling are unrelated." + "reason": "Both are Qwen3.5 regressions, but one is output_hidden_states handling and the other is video rope-position scaling; not the same bug." }, { "left": "issue:43404", "right": "issue:44466", "accept": false, - "reason": "Mistral3 weight-tying bug and device-dependent serialization inconsistency are related in theme but not the same concrete failure." + "reason": "Both concern tied weights/lm_head, but one is a specific model not tying weights and the other is device-dependent serialization of lm_head.weight; related area, not the same concrete bug." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer special-token method conflict is unrelated to custom model initialization in Jupyter notebooks." }, { "left": "issue:44987", "right": "issue:44991", "accept": false, - "reason": "Different model-loading regressions affecting different repositories/models." + "reason": "Both are loading failures, but they involve different models and different parts of the stack (model vs tokenizer)." }, { "left": "issue:43262", "right": "issue:43377", "accept": false, - "reason": "Audio chat-template sampling-rate default and MIMI padding-mask batching bug are unrelated." + "reason": "Audio processor chat-template sampling-rate default and MIMI encoder padding-mask batching are unrelated." } ] }, @@ -2628,8 +2745,8 @@ "issue:43377|issue:43819", "issue:44451|issue:45081", "issue:44220|issue:44683", - "issue:43404|issue:45127", "issue:44265|issue:44610", + "issue:43404|issue:45127", "issue:44987|issue:45081", "issue:42915|issue:45084", "issue:43632|issue:44756", @@ -2643,218 +2760,218 @@ "issue:43317|issue:43856", "issue:40990|issue:44811", "issue:44079|issue:45663", - "issue:43421|issue:44568", "issue:42175|issue:45542", + "issue:43421|issue:44568", "issue:43531|issue:44877" ], - "prepared_review_unit_hash": "14df59b0869d2fee19904f1fb1d582b0ca6707931208f8491b30f2e972f4be4e", + "prepared_review_unit_hash": "fc9d6ba6f1a358bd2278e135540c8147bb4e6e117f4bef7de2279c8c25ea8d3f", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 29, - "serialized_chars": 36385, - "estimated_input_tokens": 9097, - "estimated_eval_tokens": 18450 + "serialized_chars": 36393, + "estimated_input_tokens": 9099, + "estimated_eval_tokens": 18454 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous. I only see two clear duplicate subgroups: the Qwen3.5 save_pretrained checkpoint-key regression, and the Qwen2.5-VL / Qwen3-VL temporal-position-id video regression. The rest are broad-similarity matches across different bugs and should stay separate.", + "summary": "This cluster is mostly heterogeneous. I only see two likely duplicate pairs: the Qwen3.5 `save_pretrained` regression pair and the TensorFlow backend installation/package pair. The rest are separate bugs affecting different models or code paths.", "confidence": 0.78, - "canonical_issue_reason": "issue:45216 is the clearest representative: a concrete Qwen3.5 save_pretrained regression, and issue:45357 reads like a narrower follow-up on the same incorrect visual-encoder key handling.", + "canonical_issue_reason": "No single canonical issue cleanly covers the whole cluster because it splits into unrelated bug themes; if forced to pick one representative, issue 45216 is the clearest regression-style report in the strongest duplicate pair.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45216 is the best issue to anchor the cluster because it is concrete, regression-scoped, and has the strongest duplicate evidence among the items.", + "best_issue_reason": "Issue 45216 is the best standalone representative among the duplicate-like reports: it is broad, regression-focused, and its narrower follow-up (45357) maps to the same `save_pretrained` problem.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43646", "right": "issue:44534", "accept": false, - "reason": "Both are Transformers v5 regressions, but one is custom-model init and the other is buffer initialization; different code paths and symptoms." + "reason": "Different failures: custom model init vs non-persistent buffer initialization." }, { "left": "issue:45084", "right": "issue:45507", "accept": false, - "reason": "Unrelated failures: template compilation versus an invalid method call in GraniteMoEHybrid." + "reason": "Unrelated code paths: template compilation error vs GraniteMoEHybrid method dispatch." }, { "left": "issue:43994", "right": "issue:44610", "accept": false, - "reason": "Different models and different bug types: bad outputs/perplexity versus a processor image-size mismatch." + "reason": "Different models and symptoms; one is bad outputs, the other is a processor/image-size mismatch." }, { "left": "issue:43645", "right": "issue:44534", "accept": false, - "reason": "Notebook custom-model initialization and non-persistent buffer junk are separate regression mechanisms." + "reason": "Jupyter custom-model init issue is distinct from the buffer junk regression." }, { "left": "issue:41553", "right": "issue:43582", "accept": false, - "reason": "Bad AutoTokenizer error message for Voxtral is unrelated to Apple Silicon allocator warmup TypeError." + "reason": "Bad AutoTokenizer error for Voxtral is unrelated to an Apple Silicon warmup TypeError." }, { "left": "issue:43322", "right": "issue:43540", "accept": false, - "reason": "Both are multimodal model-loading issues, but they affect different models and different failure modes." + "reason": "Both are multimodal/video-related, but they target different models and concrete failures." }, { "left": "issue:44403", "right": "issue:45081", "accept": false, - "reason": "Generic loading noise versus a specific Mistral tokenizer regex crash; not the same bug." + "reason": "Generic loading noise is not the same bug as the Mistral regex backend crash." }, { "left": "issue:45216", "right": "issue:45357", "accept": true, - "reason": "Both report the same Qwen3.5 save_pretrained regression producing incorrect checkpoint keys, with 45357 looking like a later, narrower manifestation." + "reason": "Same Qwen3.5 `save_pretrained` regression; 45357 is a narrower symptom of the same broken save path." }, { "left": "issue:43377", "right": "issue:43819", "accept": false, - "reason": "Audio batch-vs-single mismatch from padding-mask handling is not the same as DAC from_latents / STE mismatch." + "reason": "Different model families and different numerical mismatch causes." }, { "left": "issue:44451", "right": "issue:45081", "accept": false, - "reason": "ScandiBERT load failure and Mistral regex patch crash are unrelated tokenizer/model-loading problems." + "reason": "Loading a specific model is unrelated to the Mistral tokenizer backend crash." }, { "left": "issue:44220", "right": "issue:44683", "accept": false, - "reason": "Fbank extraction and compiled flex_attention are unrelated subsystems." + "reason": "Audio feature extraction and compiled flex_attention are separate subsystems." }, { - "left": "issue:43404", - "right": "issue:45127", + "left": "issue:44265", + "right": "issue:44610", "accept": false, - "reason": "One is a missing lm_head tie in Mistral3; the other is LoRA merge collapse with extended vocab. Similar theme, different bug." + "reason": "Export failure due to torch_compilable_check is unrelated to the OmDet processor size bug." }, { - "left": "issue:44265", - "right": "issue:44610", + "left": "issue:43404", + "right": "issue:45127", "accept": false, - "reason": "torch.export / torch_compilable_check is unrelated to the OmDet-Turbo processor size issue." + "reason": "Both mention weight tying, but they describe different models and different failure modes." }, { "left": "issue:44987", "right": "issue:45081", "accept": false, - "reason": "Loading physical-intelligence/fast and the Mistral tokenizer backend crash are different failure reports." + "reason": "Different model-loading regressions; no shared code-path evidence." }, { "left": "issue:42915", "right": "issue:45084", "accept": false, - "reason": "Qwen3Moe FP8 config failure is unrelated to template-node compilation." + "reason": "Qwen3Moe FP8 config failure is unrelated to the template-node compilation error." }, { "left": "issue:43632", "right": "issue:44756", "accept": false, - "reason": "_is_hf_initialized / model init behavior is unrelated to mmap OOM avoidance." + "reason": "Different bugs: `_is_hf_initialized` breakage vs mmap/OOM handling." }, { "left": "issue:44479", "right": "issue:45325", - "accept": true, - "reason": "Both point to the same Qwen VL temporal-position-id / rope-index regression affecting video or still-image paths in 5.3.0." + "accept": false, + "reason": "Related model family, but one is a broad video regression and the other is a still-image rope-index scaling bug." }, { "left": "issue:44062", "right": "issue:44987", "accept": false, - "reason": "AddedToken keyword conflict is unrelated to loading physical-intelligence/fast." + "reason": "AddedToken keyword conflict is unrelated to loading `physical-intelligence/fast`." }, { "left": "issue:43296", "right": "issue:43531", "accept": false, - "reason": "PaddleOCR-VL loading and Qwen3-MoE sliding_window behavior are different issues." + "reason": "Different model families and failure mechanisms." }, { "left": "issue:42915", "right": "issue:44291", "accept": false, - "reason": "FP8 Qwen3Moe failure and unexpected _is_hf_initialized in init_empty_weights are separate Transformer v5 regressions." + "reason": "FineGrainedFP8Config failure is not the same as the `_is_hf_initialized` init_empty_weights TypeError." }, { "left": "issue:43611", "right": "issue:44534", "accept": false, - "reason": "base_model_prefix loading breakage and non-persistent buffer junk are distinct initialization bugs." + "reason": "Base-model-prefix loading regression is distinct from non-persistent buffer corruption." }, { "left": "issue:36032", "right": "issue:43742", "accept": false, - "reason": "T5Tokenizer add_special_tokens conflict and MobileLLM-125M key error are unrelated tokenizer/model-loading bugs." + "reason": "Tokenizer method name conflict is unrelated to MobileLLM key errors." }, { "left": "issue:40444", "right": "issue:45325", "accept": false, - "reason": "Both touch Qwen2.5-VL, but one is multi-image IterableDataset training and the other is temporal position-id scaling." + "reason": "IterableDataset multi-image failure is not the same as the Qwen2.5-VL rope-index regression." }, { "left": "issue:43317", "right": "issue:43856", "accept": false, - "reason": "device_map/offload loading failure and Qwen3 MoE memory usage are not the same code-path issue." + "reason": "Dequantized offload/device-map bug is unrelated to Qwen3 MoE memory inefficiency." }, { "left": "issue:40990", "right": "issue:44811", "accept": false, - "reason": "High perplexity on gpt-oss and Whisper batch_decode skip_special_tokens behavior are unrelated." + "reason": "Perplexity anomaly and Whisper decode behavior are unrelated." }, { "left": "issue:44079", "right": "issue:45663", "accept": false, - "reason": "ModelOutput key assignment and Gemma-4 FSDP2 shared_kv_states rebuilding are different bugs." - }, - { - "left": "issue:43421", - "right": "issue:44568", - "accept": false, - "reason": "Runtime special-token post-processor updates and mdeberta add_special_tokens BOS/EOS regression are similar area, but not clearly the same underlying bug." + "reason": "ModelOutput key assignment and Gemma4 attention state rebuilding are different bugs." }, { "left": "issue:42175", "right": "issue:45542", + "accept": true, + "reason": "Both describe the same TensorFlow packaging/backend omission after install; 45542 is a narrower manifestation of the same missing-TF problem." + }, + { + "left": "issue:43421", + "right": "issue:44568", "accept": false, - "reason": "Both involve TensorFlow optional deps, but one is install extras packaging and the other is a tensorboard-without-TF runtime backend error." + "reason": "Related tokenizer-special-token area, but not enough evidence they are the same concrete bug." }, { "left": "issue:43531", "right": "issue:44877", "accept": false, - "reason": "Qwen3-MoE sliding_window and granite_speech config strictness are unrelated." + "reason": "Qwen3-MoE sliding-window behavior is unrelated to the granite_speech config load failure." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet and stays mostly conservative. The two accepted pairs are plausibly the same bug family based on the titles, and the other soft-edge rejections avoid over-claiming duplicate status across clearly different models or failure modes." + "feedback": "The summary is grounded in the packet and stays conservative. The two accepted pairs are plausible duplicates from their titles alone: 45216/45357 both concern Qwen3.5 `save_pretrained`, and 42175/45542 both concern missing/undefined TensorFlow backend installation behavior. The remaining rejects are appropriately cautious given the evidence." }, "evaluator_used": true, "retried": false, @@ -2897,19 +3014,18 @@ "issue:43749", "issue:43792", "issue:43818", + "issue:43828", "issue:43881", "issue:43883", "issue:43994", "issue:44079", "issue:44265", "issue:44291", - "issue:44387", "issue:44403", "issue:44466", "issue:44488", "issue:44534", "issue:44589", - "issue:44617", "issue:44683", "issue:44829", "issue:44898", @@ -2934,260 +3050,257 @@ "issue:44829|issue:45479", "issue:43482|issue:45538", "issue:33666|issue:40444", - "issue:44403|issue:44991", "issue:43792|issue:44589", + "issue:44403|issue:44991", "issue:44079|issue:44534", "issue:43377|issue:45491", "issue:35707|issue:40444", "issue:43482|issue:43525", "issue:43749|issue:44079", - "issue:43881|issue:44488", "issue:44291|issue:44589", + "issue:43881|issue:44488", "issue:43644|issue:44079", "issue:40444|issue:45381", "issue:45325|issue:45381", "issue:44265|issue:45072", "issue:41720|issue:45081", "issue:43632|issue:45072", - "issue:43611|issue:43646", "issue:43531|issue:44291", + "issue:43611|issue:43646", "issue:42175|issue:43296", "issue:43994|issue:44898", "issue:43454|issue:44466", "issue:42915|issue:43482", "issue:43526|issue:43883", - "issue:44387|issue:44617" + "issue:43828|issue:44683" ], - "prepared_review_unit_hash": "2d109976159c33823f486d687d02b80cfbf395531e73e16c6ea3ded976b51dde", + "prepared_review_unit_hash": "e8ea2e32678e773748ab36fc95676192af28330d8609650f76e5133572d8eead", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { - "node_count": 48, - "item_count": 48, + "node_count": 47, + "item_count": 47, "soft_pair_count": 32, - "serialized_chars": 37355, - "estimated_input_tokens": 9339, - "estimated_eval_tokens": 18934 + "serialized_chars": 36813, + "estimated_input_tokens": 9204, + "estimated_eval_tokens": 18664 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a grab-bag of recent Transformers v5 regressions, with a few genuine near-duplicates around model initialization and Qwen2.5-VL position-id handling. Most soft pairs are only superficially similar and should stay separate.", - "confidence": 0.71, - "canonical_issue_reason": "issue:45020 is the broadest umbrella in the set: it captures the general class of recent Transformers-version regressions breaking model loading/behavior, which best represents the shared triage theme across many of these reports.", + "summary": "These are mostly distinct model- or release-specific regression reports. A few pairs are thematically close (especially v5 and Qwen2.5-VL issues), but none are strong enough to merge as the same underlying bug/change.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45020 is the broadest open umbrella for recent-transformers regressions around remote_code/model loading, so it best fits as the cluster anchor among these mostly specific breakage reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 is the best single triage anchor because it is open, broad, and most likely to accumulate links from the surrounding v5 regression reports.", + "best_issue_reason": "issue:45020 is the most useful representative issue because it is broad, current, and open, making it the best landing point for related regression reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42915", "right": "issue:44589", "accept": false, - "reason": "Both involve fp8/floats, but they report different failures and likely different code paths (Qwen3MoE FineGrainedFP8Config vs missing Float8 storage object)." + "reason": "Both mention FP8/float8 problems, but one is a FineGrainedFP8Config failure and the other is a Float8 storage lookup error; these look like different bugs." }, { "left": "issue:43818", "right": "issue:45357", "accept": false, - "reason": "Different models and failures: Video-LLaVA tower/weight-sharing bug vs Qwen3.5 save_pretrained visual-key regression." + "reason": "Different models and different code paths: Video-LLaVA tower/weights vs Qwen3.5 save_pretrained visual-key handling." }, { "left": "issue:43632", "right": "issue:44534", "accept": false, - "reason": "Both are v5 regressions, but one is about `_is_hf_initialized` while the other is about non-persistent buffers; not the same bug." + "reason": "Both are transformers v5 regressions, but one concerns `_is_hf_initialized` and the other non-persistent buffer serialization; not the same bug." }, { "left": "issue:43425", "right": "issue:44403", "accept": false, - "reason": "Unrelated issues: Torch version compatibility vs noisy transformer loading output." + "reason": "Torch compatibility/versioning issue vs a generic loading-noise report; no concrete shared failure mode." }, { "left": "issue:36032", "right": "issue:41720", "accept": false, - "reason": "Tokenizer name conflict and Qwen3 auto device-map CUDA assert are unrelated." + "reason": "Tokenizer method-name conflict and a CUDA assert during auto device mapping are unrelated failures." }, { "left": "issue:44683", "right": "issue:45020", "accept": false, - "reason": "torch.compile/flex_attention failure is a different problem from remote_code model-loading regressions." + "reason": "One is a torch>=2.9 compiled flex_attention failure; the other is a broader remote_code loading regression. Different issues." }, { "left": "issue:44829", "right": "issue:45479", "accept": false, - "reason": "Different sequence-classification failures with different triggers; not the same underlying bug." + "reason": "Both affect classification training, but one is tied to flash_attention_3 and the other to `num_labels=1` loss behavior; different root causes." }, { "left": "issue:43482", "right": "issue:45538", "accept": false, - "reason": "Qwen2.5-GGUF loading under v5 and CLIPTokenizer model_max_length are unrelated tokenizer/loading behaviors." + "reason": "GGUF loading under transformers v5 and CLIPTokenizer's absurd `model_max_length` are unrelated." }, { "left": "issue:33666", "right": "issue:40444", "accept": false, - "reason": "Both are Qwen2-VL fine-tuning issues, but one is multi-GPU training and the other is IterableDataset/multi-image batching." + "reason": "Different Qwen2-VL finetuning problems: multi-GPU training vs iterable-dataset multi-image prompting." }, { - "left": "issue:44403", - "right": "issue:44991", + "left": "issue:43792", + "right": "issue:44589", "accept": false, - "reason": "General loading noise vs a specific tokenizer-loading regression for EMBEDDIA/est-roberta." + "reason": "Whisper runtime failure and a Float8 storage error do not point to the same underlying bug." }, { - "left": "issue:43792", - "right": "issue:44589", + "left": "issue:44403", + "right": "issue:44991", "accept": false, - "reason": "Whisper runtime failure and Float8 storage lookup failure are different issues." + "reason": "Generic loading noise vs a specific tokenizer-loading regression for EMBEDDIA/est-roberta; not the same defect." }, { "left": "issue:44079", "right": "issue:44534", "accept": false, - "reason": "ModelOutput key assignment and non-persistent buffer junk are distinct internal-data regressions." + "reason": "ModelOutput key assignment and non-persistent buffer serialization are different internal behaviors." }, { "left": "issue:43377", "right": "issue:45491", "accept": false, - "reason": "Both mention batched/masked behavior, but the affected models and concrete bugs are different." + "reason": "Both involve batching/padding effects, but one is MIMI encoder padding-mask mismatch and the other is Gemma3 sliding-window NaNs; different code paths." }, { "left": "issue:35707", "right": "issue:40444", "accept": false, - "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL multi-image IterableDataset finetuning." + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to Qwen2.5-VL multi-image IterableDataset failures." }, { "left": "issue:43482", "right": "issue:43525", "accept": false, - "reason": "GGUF loading under v5 and missing pad_token_id on Llama4Config are different failures." + "reason": "GGUF loading failure and missing `pad_token_id` on `Llama4Config` are distinct model/load-path issues." }, { "left": "issue:43749", "right": "issue:44079", "accept": false, - "reason": "FSDP CPU RAM efficient loading and ModelOutput key handling are not the same bug." + "reason": "FSDP CPU RAM efficient loading is unrelated to ModelOutput key bookkeeping." }, { - "left": "issue:43881", - "right": "issue:44488", + "left": "issue:44291", + "right": "issue:44589", "accept": false, - "reason": "Different model-loading failures with no clear shared code path." + "reason": "`init_empty_weights` with unexpected `_is_hf_initialized` argument and a Float8 storage lookup error are separate problems." }, { - "left": "issue:44291", - "right": "issue:44589", + "left": "issue:43881", + "right": "issue:44488", "accept": false, - "reason": "Unexpected `_is_hf_initialized` argument and missing Float8 storage object are different regressions." + "reason": "Model load failure for glm-4v-9b vs tokenizer loading for cjvt/sleng-bert; no shared bug." }, { "left": "issue:43644", "right": "issue:44079", "accept": false, - "reason": "Both are v5 internal-behavior reports, but one is about buffers and the other about ModelOutput keys." + "reason": "Non-persistent buffer junk and ModelOutput key assignment are unrelated regressions." }, { "left": "issue:40444", "right": "issue:45381", "accept": false, - "reason": "Multi-image iterable finetuning and Qwen2.5-VL video vision_position_ids are related only at a high level." + "reason": "Multi-image prompt finetuning and Qwen2.5-VL video position-id errors are different modalities and symptoms." }, { "left": "issue:45325", "right": "issue:45381", - "accept": true, - "reason": "Same Qwen2.5-VL temporal/vision position-id bug: both reports point to incorrect position_id computation in v5.3.0." + "accept": false, + "reason": "Both are Qwen2.5-VL position-id issues, but one is still-image temporal scaling and the other is video vision-position IDs; close, but not clearly the same bug." }, { "left": "issue:44265", "right": "issue:45072", "accept": false, - "reason": "torch.export/torch_compilable_check failure and bfloat16 dtype mismatch are unrelated." + "reason": "torch.export / torch_compilable_check failures and bfloat16 dtype mismatches are not the same code-path issue." }, { "left": "issue:41720", "right": "issue:45081", "accept": false, - "reason": "Qwen3 auto device mapping CUDA assert is unrelated to the Mistral regex patch crash." + "reason": "A800 auto device mapping CUDA asserts and Mistral tokenizer regex patch crashes are unrelated." }, { "left": "issue:43632", "right": "issue:45072", "accept": false, - "reason": "`_is_hf_initialized` breakage and bfloat16 dtype mismatches do not share the same concrete bug." - }, - { - "left": "issue:43611", - "right": "issue:43646", - "accept": true, - "reason": "Both describe the same v5 model-initialization/loading regression affecting custom models/base_model_prefix behavior." + "reason": "`_is_hf_initialized` regression and bfloat16 dtype mismatch are distinct v5-related bugs." }, { "left": "issue:43531", "right": "issue:44291", "accept": false, - "reason": "Qwen3-MoE sliding-window issues and init_empty_weights `_is_hf_initialized` errors are different code paths." + "reason": "Qwen3-MoE sliding-window behavior and `init_empty_weights` argument handling do not share the same root cause." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 loading/init regressions, but one is about `base_model_prefix` and the other custom model initialization; too different to treat as one bug." }, { "left": "issue:42175", "right": "issue:43296", "accept": false, - "reason": "Backend packaging omission and PaddleOCR-VL loading failure are unrelated." + "reason": "Packaging/back-end selection under pip install and PaddleOCR-VL loading in vLLM are unrelated." }, { "left": "issue:43994", "right": "issue:44898", "accept": false, - "reason": "Different vision-model failures: SIGLIP2 nonsensical outputs vs Perceiver non-default-resolution classification failure." + "reason": "Different vision-model failures: nonsensical SigLIP2 outputs vs Perceiver resolution/interpolation failure." }, { "left": "issue:43454", "right": "issue:44466", "accept": false, - "reason": "AyaVision lm_head tying failure and device-dependent serialization of tied weights are distinct problems." + "reason": "One is missing weight tying in AyaVisionConfig; the other is device-dependent serialization of tied weights. Related theme, but not the same bug." }, { "left": "issue:42915", "right": "issue:43482", "accept": false, - "reason": "Qwen3MoE FP8 failure and Qwen2.5-GGUF v5 loading failure are unrelated." + "reason": "FineGrainedFP8Config failure and Qwen2.5-GGUF loading failure are unrelated." }, { "left": "issue:43526", "right": "issue:43883", "accept": false, - "reason": "BeitImageProcessorFast label reduction bug and Molmo tied-weights attribute error are unrelated." + "reason": "BeitImageProcessorFast label reduction and Molmo missing `all_tied_weights_keys` are different implementation bugs." }, { - "left": "issue:44387", - "right": "issue:44617", + "left": "issue:43828", + "right": "issue:44683", "accept": false, - "reason": "Int4 memory regression and Sam3Video OOM are both memory-related but not the same underlying bug." + "reason": "Autocast dtype mismatch in Phi-tiny-MoE and compiled flex_attention failure on torch>=2.9 are separate issues." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary stays grounded in the packet: it correctly treats the set as mostly unrelated v5 regressions with only a couple of plausible near-duplicates. The accepted soft pairs are reasonably conservative from the titles alone, and the chosen umbrella issue (45020) is a defensible broad anchor for the cluster." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -3228,21 +3341,21 @@ "issue:43749", "issue:43792", "issue:43819", - "issue:43828", "issue:43873", "issue:43881", "issue:43957", "issue:44186", "issue:44265", "issue:44291", + "issue:44387", "issue:44466", "issue:44534", "issue:44589", "issue:44610", + "issue:44617", "issue:44683", "issue:44877", "issue:44898", - "issue:44977", "issue:44991", "issue:45072", "issue:45325", @@ -3254,7 +3367,7 @@ "issue:45701" ], "soft_pairs": [ - "issue:43828|issue:44683", + "issue:44387|issue:44617", "issue:43525|issue:44589", "issue:43792|issue:43957", "issue:43881|issue:44991", @@ -3270,14 +3383,14 @@ "issue:44186|issue:44265", "issue:44610|issue:45325", "issue:35707|issue:41720", - "issue:43606|issue:43819", "issue:43454|issue:43550", + "issue:43606|issue:43819", "issue:43377|issue:44610", "issue:43377|issue:44898", "issue:43454|issue:45072", - "issue:44466|issue:45357", - "issue:43749|issue:44534", "issue:39692|issue:40444", + "issue:43749|issue:44534", + "issue:44466|issue:45357", "issue:43792|issue:44683", "issue:43454|issue:43720", "issue:43296|issue:44991", @@ -3285,245 +3398,235 @@ "issue:41720|issue:43322", "issue:45632|issue:45684", "issue:43526|issue:43881", - "issue:43404|issue:45357", - "issue:41720|issue:44977" + "issue:43404|issue:45357" ], - "prepared_review_unit_hash": "a3c08204e9219a529d9a164bf9eb1e0aa8cfcd127865a3c6135ed9e14cf2f362", + "prepared_review_unit_hash": "a3f019850be47be4d50b99b16ba25fc72bb988de21f306a90e943eba929fce66", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 33, - "serialized_chars": 37631, - "estimated_input_tokens": 9408, - "estimated_eval_tokens": 19072 + "soft_pair_count": 32, + "serialized_chars": 37325, + "estimated_input_tokens": 9332, + "estimated_eval_tokens": 18920 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a set of unrelated model-loading / processor / generation bugs. The only plausible duplicate pair is the two offloading+quantization loading failures; the rest are distinct code paths and should stay separate.", - "confidence": 0.63, - "canonical_issue_reason": "issue:43873 is the broadest and most central report in the cluster: it describes offloading not working with quantization, which subsumes the more specific device_map/offload loading failure.", + "summary": "This cluster is mostly a loose set of Transformer/model-loading regressions with only superficial title similarity. None of the soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "issue:43873 is the best representative issue because it covers the shared offloading/quantization failure mode and is phrased as the umbrella bug rather than one narrow instance.", + "best_issue_reason": "issue:43632 is the broadest representative of the cluster because it describes a general Transformers v5 initialization/loading regression that could sit behind several downstream failures, but it is still too general to serve as a true duplicate anchor for the rest.", "best_pr_reason": null, "soft_edge_verdicts": [ { - "left": "issue:43828", - "right": "issue:44683", + "left": "issue:44387", + "right": "issue:44617", "accept": false, - "reason": "Different failures: autocast dtype mismatch vs compiled flex_attention failure on torch>=2.9." + "reason": "Both mention OOM, but one is int4 quantization CUDA reserved memory and the other is a Sam3Video load-time CUDA OOM; different model and failure mechanism." }, { "left": "issue:43525", "right": "issue:44589", "accept": false, - "reason": "Unrelated symptoms and code paths: missing pad_token_id vs missing Float8 storage object." + "reason": "Llama4Config missing pad_token_id is a config-attribute bug; Float8 storage lookup is a serialization/storage-type error. No shared code path." }, { "left": "issue:43792", "right": "issue:43957", "accept": false, - "reason": "Different problem areas: Whisper runtime failure vs meta-device loading regression." + "reason": "Whisper-large-v2 runtime failure and meta-device loading regressions are both loading-related, but they are distinct model/path problems." }, { "left": "issue:43881", "right": "issue:44991", "accept": false, - "reason": "GLM-4V loading failure and tokenizer loading for a different model are not the same bug." + "reason": "glm-4v-9b loading failure and tokenizer loading for EMBEDDIA/est-roberta are unrelated models with different failure surfaces." }, { "left": "issue:43701", "right": "issue:45325", "accept": false, - "reason": "Resume-from-checkpoint key mismatch is unrelated to Qwen2.5-VL rope/position-id scaling." + "reason": "Checkpoint resume key mismatch is a training-state issue; Qwen2.5-VL rope index scaling is an inference/indexing bug." }, { "left": "issue:43262", "right": "issue:43746", "accept": false, - "reason": "Audio chat-template sampling-rate default bug is unrelated to GraniteSpeech PEFT checkpoint loading." + "reason": "Audio chat-template sampling-rate default and GraniteSpeech PEFT checkpoint loading are separate audio/model-loading issues." }, { "left": "issue:43317", "right": "issue:43873", - "accept": true, - "reason": "Both report the same offloading/quantization loading path failing under device_map/CPU-GPU offload; one is a specific instance of the broader bug." + "accept": false, + "reason": "Both involve offloading/quantization, but one is a specific dequantized-model load failure and the other is a broader offloading behavior report." }, { "left": "issue:43632", "right": "issue:45563", "accept": false, - "reason": "Global v5 initialization breakage is unrelated to a paged generate warning about num_return_sequences." + "reason": "_is_hf_initialized breaks model init/loading; stale num_return_sequences warning is a generate warning bug." }, { "left": "issue:40990", "right": "issue:45584", "accept": false, - "reason": "High perplexity on GPT-OSS and Whisper empty-transcription generation are unrelated model behaviors." + "reason": "High perplexity on gpt-oss-20b and Whisper empty-transcription failure are unrelated model behaviors." }, { "left": "issue:44291", "right": "issue:44877", "accept": false, - "reason": "init_empty_weights/_is_hf_initialized argument breakage is not the same as strict config blocking granite_speech loading." + "reason": "Unexpected _is_hf_initialized argument during init_empty_weights and strict GraniteSpeech config rejection are different root causes." }, { "left": "issue:34634", "right": "issue:35707", "accept": false, - "reason": "Bark voice_preset issue and progressive generation with inputs_embeds/past_key_values are unrelated." + "reason": "BarkProcessor voice_preset and progressive generation with inputs_embeds/past_key_values are unrelated API/model issues." }, { "left": "issue:43606", "right": "issue:44265", "accept": false, - "reason": "CPU offload device mismatch for Bark is unrelated to torch.export failing on torch_compilable_check." + "reason": "Bark-small CPU offload device mismatch and torch.export failure with torch_compilable_check are different subsystems." }, { "left": "issue:43873", "right": "issue:45701", "accept": false, - "reason": "Quantization/offloading failure is unrelated to a tokenization version-change regression." + "reason": "Quantization/offloading behavior and tokenizer changes across versions are not the same bug." }, { "left": "issue:44186", "right": "issue:44265", "accept": false, - "reason": "LayoutLMv2 tokenizer crash is unrelated to torch.export compatibility." + "reason": "LayoutLMv2Tokenizer padding/truncation crashes are a tokenizer bug; torch.export with torch_compilable_check is an export-time failure." }, { "left": "issue:44610", "right": "issue:45325", "accept": false, - "reason": "Processor image-size mismatch is unrelated to Qwen2.5-VL temporal position-id scaling." + "reason": "OmDet-Turbo processor size mismatch and Qwen2.5-VL rope-index scaling affect different model components." }, { "left": "issue:35707", "right": "issue:41720", "accept": false, - "reason": "Progressive generation with cached embeddings is unrelated to Qwen3 auto device mapping cudaErrorAssert." + "reason": "inputs_embeds/past_key_values generation issue and Qwen3 auto-device-map cudaErrorAssert are unrelated failures." }, { - "left": "issue:43606", - "right": "issue:43819", + "left": "issue:43454", + "right": "issue:43550", "accept": false, - "reason": "Both touch audio-ish models, but one is CPU offload device mismatch and the other is DAC latent consistency; not the same bug." + "reason": "AyaVision lm_head weight tying and Bamba-9B-v2 torch.compile+SDPA are different models and code paths." }, { - "left": "issue:43454", - "right": "issue:43550", + "left": "issue:43606", + "right": "issue:43819", "accept": false, - "reason": "lm_head weight tying failure is unrelated to torch.compile+SDPA behavior." + "reason": "Bark-small offload device mismatch and DAC.from_latents mismatch are separate audio-model bugs." }, { "left": "issue:43377", "right": "issue:44610", "accept": false, - "reason": "Batched-vs-single encoder output mismatch is unrelated to OmDet-Turbo processor resolution mismatch." + "reason": "MIMI batched-vs-single padding-mask bug and OmDet-Turbo processor/model-size mismatch are unrelated." }, { "left": "issue:43377", "right": "issue:44898", "accept": false, - "reason": "Missing padding mask support in MIMI encoder is not the same as Perceiver non-default resolution handling." + "reason": "Missing padding-mask support in MIMI and Perceiver non-default-resolution failure are different model-specific issues." }, { "left": "issue:43454", "right": "issue:45072", "accept": false, - "reason": "Weight-tying/serialization issue is unrelated to bf16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + "reason": "lm_head tying in AyaVision and dtype mismatches in SwitchTransformers/TimmWrapperModel are unrelated regressions." }, { - "left": "issue:44466", - "right": "issue:45357", + "left": "issue:39692", + "right": "issue:40444", "accept": false, - "reason": "General lm_head serialization inconsistency is unrelated to Qwen3.5 visual encoder key saving." + "reason": "SigLIP2 docs example errors and Qwen2.5-VL IterableDataset training failure are different problems; one is documentation, the other training/input handling." }, { "left": "issue:43749", "right": "issue:44534", "accept": false, - "reason": "FSDP CPU RAM efficient loading breakage and non-persistent buffer junk are different initialization/loading bugs." + "reason": "FSDP CPU RAM efficient loading breakage and non-persistent buffers filled with junk are both v5-related, but the underlying bugs differ." }, { - "left": "issue:39692", - "right": "issue:40444", + "left": "issue:44466", + "right": "issue:45357", "accept": false, - "reason": "SigLIP2 doc example errors are unrelated to Qwen2.5-VL finetuning with multi-image iterable datasets." + "reason": "Both concern serialization/save_pretrained regressions, but one is lm_head.weight tying and the other is incorrect visual-encoder keys." }, { "left": "issue:43792", "right": "issue:44683", "accept": false, - "reason": "Whisper runtime failure is unrelated to compiled flex_attention on torch>=2.9." + "reason": "Whisper runtime failure and compiled flex_attention failure on torch>=2.9 are unrelated." }, { "left": "issue:43454", "right": "issue:43720", "accept": false, - "reason": "lm_head weight tying bug is unrelated to packed-weight unpacking during accelerate loading." + "reason": "AyaVision weight tying and BitNet packed-weight unpacking during accelerate loading are different load paths." }, { "left": "issue:43296", "right": "issue:44991", "accept": false, - "reason": "PaddleOCR-VL load failure in vLLM is unrelated to est-roberta tokenizer loading." + "reason": "PaddleOCR-VL load failure in vLLM and tokenizer loading failure for EMBEDDIA/est-roberta are not the same issue." }, { "left": "issue:43296", "right": "issue:44877", "accept": false, - "reason": "vLLM/PaddleOCR-VL loading failure is unrelated to strict granite_speech config validation." + "reason": "PaddleOCR-VL model loading and strict GraniteSpeech config loading are distinct failures." }, { "left": "issue:41720", "right": "issue:43322", "accept": false, - "reason": "Qwen3 auto device mapping cuda assert and Llava Next segfault are different model-specific loading crashes." + "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segmentation fault are different model-loading failures with no shared code path." }, { "left": "issue:45632", "right": "issue:45684", "accept": false, - "reason": "Cache-path collisions for trust_remote_code and permission propagation in save_pretrained are unrelated." + "reason": "Cache-path collision for trust_remote_code and read-only permissions propagation in save_pretrained are separate filesystem issues." }, { "left": "issue:43526", "right": "issue:43881", "accept": false, - "reason": "BeitImageProcessorFast label reduction bug is unrelated to GLM-4V loading failure." + "reason": "BeitImageProcessorFast reduce_labels bug and glm-4v-9b loading failure are unrelated." }, { "left": "issue:43404", "right": "issue:45357", "accept": false, - "reason": "lm_head tying in Mistral3 is unrelated to incorrect visual encoder keys in Qwen3.5 save_pretrained." - }, - { - "left": "issue:41720", - "right": "issue:44977", - "accept": false, - "reason": "Auto device mapping crash on A800 and flash-attention generation failure are different Qwen3.5 issues." + "reason": "Mistral3 lm_head tying and Qwen3.5 visual-encoder key serialization are different model/config regressions." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet, and the only accepted soft pair (43317/43873) is a reasonable conservative duplicate call based on the titles. The remaining false verdicts are generally cautious enough given the available evidence." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -3579,6 +3682,7 @@ "issue:44898", "issue:44913", "issue:44918", + "issue:44977", "issue:44991", "issue:45059", "issue:45072", @@ -3589,6 +3693,7 @@ "issue:45704" ], "soft_pairs": [ + "issue:41720|issue:44977", "issue:44423|issue:45406", "issue:40990|issue:45701", "issue:44186|issue:45072", @@ -3597,8 +3702,8 @@ "issue:44387|issue:45704", "issue:42617|issue:42915", "issue:44877|issue:44991", - "issue:44479|issue:44918", "issue:43450|issue:45701", + "issue:44479|issue:44918", "issue:44610|issue:44898", "issue:43550|issue:43720", "issue:36331|issue:42915", @@ -3624,196 +3729,202 @@ "issue:43421|issue:45701", "issue:44492|issue:44509" ], - "prepared_review_unit_hash": "c35f09a929e6485b695a29fdfa30baf06a5f865f65f120ce847b2292999aa646", + "prepared_review_unit_hash": "afcb82aacfa15a5d3d0b91f4f4f792ec8127c090451cff972af8788896d79520", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 34, - "serialized_chars": 37212, - "estimated_input_tokens": 9303, - "estimated_eval_tokens": 18862 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38063, + "estimated_input_tokens": 9516, + "estimated_eval_tokens": 19288 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is largely over-grouped by semantic similarity: it spans unrelated tokenizer, multimodal/serve, config, trainer, offloading, and memory regressions. None of the soft pairs look like true duplicates, so I would not merge any of them.", - "confidence": 0.9, - "canonical_issue_reason": "No strong canonical issue emerges because the cluster is heterogeneous. If one issue must anchor it, 43873 is the broadest infrastructure-level report (offloading/quantization), but it does not \u05d1\u05d0\u05de\u05ea unify the rest.", + "summary": "This cluster is heterogeneous: the soft pairs are mostly superficial keyword matches across unrelated model families, subsystems, and failure modes. None look like the same underlying bug/change, so there is no coherent duplicate set here.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the items do not converge on one underlying defect; they span unrelated bugs in tokenization, model loading, quantization, serving, video processing, and training.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43873 is the best available anchor because it is open, broader than the others, and sits in a recurring system area (offloading/quantization). Still, it is only a weak representative for this mixed cluster.", + "best_issue_reason": "No global best issue is suitable, because none of these issues is representative of a real duplicate cluster and several are clearly unrelated false positives.", "best_pr_reason": null, "soft_edge_verdicts": [ + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "Both are Qwen-related, but one is an auto-device-map CUDA assert and the other is a flash-attention generation problem; different bugs." + }, { "left": "issue:44423", "right": "issue:45406", "accept": false, - "reason": "Both involve serve/multimodal crashes, but they hit different code paths and different processor errors; not the same bug." + "reason": "Both involve serving multimodal models, but the failures are different AttributeErrors on different objects/code paths." }, { "left": "issue:40990", "right": "issue:45701", "accept": false, - "reason": "One is high perplexity on a specific model/dataset, the other is tokenization changing across versions; related area, different underlying issue." + "reason": "One is a perplexity complaint on a specific model/dataset; the other is a version-dependent tokenization change. Too broad and not the same bug." }, { "left": "issue:44186", "right": "issue:45072", "accept": false, - "reason": "Tokenizer crash vs dtype mismatch in a different model path; no shared concrete failure." + "reason": "Completely different subsystems: tokenizer NER/padding crash vs dtype mismatch in inference." }, { "left": "issue:43873", "right": "issue:44756", "accept": false, - "reason": "Both mention offloading/memory, but one is quantization offload behavior and the other is mmap OOM on Strix Halo; different root causes." + "reason": "Both mention memory/offload, but one is quantization offloading behavior and the other is mmap OOM on Strix Halo; not the same defect." }, { "left": "issue:43577", "right": "issue:43708", "accept": false, - "reason": "BLIP2 dtype loading bug and Trainer checkpoint max_steps regression are unrelated." + "reason": "One is a model dtype loading bug; the other is Trainer checkpoint resume step \u0a97\u0aa3culation. Unrelated." }, { "left": "issue:44387", "right": "issue:45704", "accept": false, - "reason": "Both are memory-related, but one is int4 CUDA reserved memory and the other is an apex RMSNorm leak in T5; not the same bug." + "reason": "Both are memory-related, but one concerns int4 quantization reserved memory and the other a T5/apex memory leak; different causes and fixes." }, { "left": "issue:42617", "right": "issue:42915", "accept": false, - "reason": "3d_parallel.py failure and Qwen3Moe FP8 failure are different model/runtime problems." + "reason": "Different models and failures: a 3d_parallel.py runtime issue vs Qwen3Moe FP8 loading failure." }, { "left": "issue:44877", "right": "issue:44991", "accept": false, - "reason": "Strict config loading and tokenizer loading failures are separate API regressions." + "reason": "Both are loading problems, but config strictness and tokenizer loading are distinct bugs." }, { - "left": "issue:44479", - "right": "issue:44918", + "left": "issue:43450", + "right": "issue:45701", "accept": false, - "reason": "Video-input regression in inference vs Qwen3.5 embedding unpacking in TRL training; different paths and symptoms." + "reason": "Video processor batching shape bug vs tokenization/version change; no shared code path." }, { - "left": "issue:43450", - "right": "issue:45701", + "left": "issue:44479", + "right": "issue:44918", "accept": false, - "reason": "Video batch-shape bug and tokenization-version change are unrelated." + "reason": "Both mention Qwen and multimodal/training, but one is a video-input regression and the other is an SFT embedding-unpacking failure." }, { "left": "issue:44610", "right": "issue:44898", "accept": false, - "reason": "Both are image preprocessing mismatches, but the models, expected sizes, and failure modes differ." + "reason": "Both are image-resolution mismatches, but they affect different models and different preprocessing/model expectations; not the same bug." }, { "left": "issue:43550", "right": "issue:43720", "accept": false, - "reason": "torch.compile/SDPA failure and packed-weight loading failure are distinct bugs." + "reason": "One is torch.compile with SDPA on Bamba; the other is accelerate loading/unpacking packed BitNet weights. Different code paths." }, { "left": "issue:36331", "right": "issue:42915", "accept": false, - "reason": "CustomTrainer kwargs regression and Qwen3Moe FP8 loading failure are unrelated." + "reason": "Trainer compute_loss API break vs Qwen3Moe FP8 load failure; unrelated." }, { "left": "issue:36331", "right": "issue:42617", "accept": false, - "reason": "Trainer loss-signature change and 3d_parallel.py failure are different issues." + "reason": "CustomTrainer keyword-arg regression vs 3d_parallel.py failure; unrelated." }, { "left": "issue:44186", "right": "issue:44610", "accept": false, - "reason": "Both are input-processing bugs, but they affect different processors and failure modes." + "reason": "Tokenizer crash on NER/padding vs OmDet-Turbo processor image-size mismatch; unrelated." }, { "left": "issue:43316", "right": "issue:44913", "accept": false, - "reason": "Config API discrepancy and rotary_pct reload loss are not the same underlying defect." + "reason": "Both are config/API consistency issues, but for different config classes and different state-reload behavior." }, { "left": "issue:44671", "right": "issue:44743", "accept": false, - "reason": "CamemBERT masked-LM regression and Qwen3.5 recurrent-state reset bug are unrelated model-specific issues." + "reason": "CamemBERT masked-LM prediction issue and Qwen3.5 recurrent-state reset are unrelated model-specific bugs." }, { "left": "issue:42915", "right": "issue:43317", "accept": false, - "reason": "FineGrainedFP8 loading failure and dequantized model offload loading failure are different code paths." + "reason": "Both involve loading/offload themes, but one is Qwen3Moe FP8 failure and the other dequantized model offload loading; not the same bug." }, { "left": "issue:43295", "right": "issue:45701", "accept": false, - "reason": "Processor/tokenizer regression and tokenization-version change are not the same bug." + "reason": "Regression around processor.tokenizer/images vs a general tokenization-version complaint; different issues." }, { "left": "issue:43550", "right": "issue:43606", "accept": false, - "reason": "Compile+SDPA issue and CPU offload device mismatch are unrelated." + "reason": "Different models and failures: SDPA/torch.compile on Bamba vs CPU-offload device mismatch on Bark." }, { "left": "issue:41720", "right": "issue:42617", "accept": false, - "reason": "Auto device-map cudaErrorAssert and 3d_parallel.py failure are different runtime problems." + "reason": "Qwen auto device mapping CUDA assert and 3d_parallel.py failure do not share a concrete bug." }, { "left": "issue:43531", "right": "issue:43572", "accept": false, - "reason": "Sliding-window behavior in Qwen3-MoE and missing pad_token_idx in StableLmConfig are unrelated." + "reason": "Sliding-window behavior in Qwen3-MoE vs missing pad_token_idx in StableLmConfig; unrelated." }, { "left": "issue:41720", "right": "issue:45161", "accept": false, - "reason": "Both touch distributed inference on MoE models, but the concrete failures and affected paths differ." + "reason": "Both touch MoE/model-parallel topics, but one is a device-map CUDA assert and the other tensor-parallel support for GPT-OSS; different code paths." }, { "left": "issue:44292", "right": "issue:44479", "accept": false, - "reason": "NVFP4 model loading/running error and video-input regression are different issues." + "reason": "Different Qwen variants and different failures: NVFP4 runtime error vs video-input regression." }, { "left": "issue:43388", "right": "issue:45059", "accept": false, - "reason": "Metric-gather label truncation and SAM3 PCS text/box behavior are unrelated." + "reason": "Metric-gathering label truncation vs SAM3 text/bounding-box odd behavior; unrelated." }, { "left": "issue:44186", "right": "issue:44361", "accept": false, - "reason": "Two tokenizer failures, but on different tokenizers and different failure mechanisms." + "reason": "Different tokenizer/model bugs with no shared code path." }, { "left": "issue:43540", "right": "issue:43782", "accept": false, - "reason": "Video processing failure and from_pretrained weight_only error are not the same bug." + "reason": "Video-processing ValueError in Qwen3OmniMoe vs Qwen3VL weight_only loading error; similar family, different bug." }, { "left": "issue:42915", @@ -3825,37 +3936,37 @@ "left": "issue:41720", "right": "issue:43873", "accept": false, - "reason": "Auto device mapping crash and quantization offloading issue are both infra-related but not the same defect." + "reason": "Auto device mapping CUDA assert vs quantization offloading behavior; too broad to be the same defect." }, { "left": "issue:36010", "right": "issue:42617", "accept": false, - "reason": "ImportError for GenerationMixin and 3d_parallel.py runtime failure have no common code path." + "reason": "ImportError for GenerationMixin vs 3d_parallel.py runtime issue; unrelated." }, { "left": "issue:45406", "right": "issue:45464", "accept": false, - "reason": "Serve crashes in different multimodal flows: missing _tokenizer vs streaming inference failure." + "reason": "Both are serving/runtime issues, but one is Gemma4Processor missing _tokenizer and the other is Qwen3.5 streaming chat/completions failure; different bugs." }, { "left": "issue:36331", "right": "issue:41720", "accept": false, - "reason": "Trainer compute_loss signature regression and device-map cudaErrorAssert are unrelated." + "reason": "Trainer compute_loss signature regression vs Qwen auto device-map CUDA assert; unrelated." }, { "left": "issue:43421", "right": "issue:45701", "accept": false, - "reason": "Runtime special-token post-processing update bug and tokenization-version change are different issues." + "reason": "Runtime special-token post-processor update issue vs general tokenization-version change; not the same bug." }, { "left": "issue:44492", "right": "issue:44509", "accept": false, - "reason": "A cache-strategy typo and stale docs about removed pipeline tasks are separate documentation/config issues." + "reason": "A typo in cache strategy docs/code naming is not the same underlying issue as stale docs mentioning removed pipeline tasks." } ] }, @@ -3940,8 +4051,8 @@ "issue:43421|issue:43927", "issue:43606|issue:43653", "issue:34567|issue:35707", - "issue:42886|issue:44843", "issue:44062|issue:44451", + "issue:42886|issue:44843", "issue:34634|issue:41720", "issue:44451|issue:44991", "issue:43782|issue:45381", @@ -3958,217 +4069,187 @@ "issue:45081|issue:45356", "issue:43525|issue:43531" ], - "prepared_review_unit_hash": "1e084c2a9d3d5df268b261583074a68fdc8e5977a8f7b9ad5604c697d2a083d3", + "prepared_review_unit_hash": "c8a901a66f9db0d6b3f7cc0f6c24f66aac4c3dce2e50696a50a672280724d4fc", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 29, - "serialized_chars": 36086, - "estimated_input_tokens": 9022, - "estimated_eval_tokens": 18300 + "serialized_chars": 36083, + "estimated_input_tokens": 9021, + "estimated_eval_tokens": 18298 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly heterogeneous cluster with a few true duplicate/near-duplicate issue pairs. The only strong acceptances are the Qwen2.5-VL batched chat-template crash pair and the HF_HUB_OFFLINE/Mistral-regex tokenizer-loading regression pair; the rest are related-at-a-distance but not the same concrete bug.", - "confidence": 0.91, - "canonical_issue_reason": "No single canonical issue cleanly represents the whole cluster because the items span unrelated bugs. If forced to anchor the genuine duplicate pair, issue 44514 is the clearest representative: it is a near-verbatim match to 44545 with the same Qwen2_5_VLProcessor.apply_chat_template batched-padding crash.", + "summary": "Most pairs are clearly unrelated. I accepted two soft edges where the issues describe the same underlying failure, and rejected the rest as different bugs/features. No PRs were present in the input.", + "confidence": 0.76, + "canonical_issue_reason": "issue:44545 is the cleaner duplicate target for the Qwen2_5_VLProcessor batched padding=False crash: it has the exact symptom/title and more inbound references than 44514.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44514 is the best issue anchor because it is the clearest exact-match duplicate in the set and describes a concrete, narrowly scoped crash path that is duplicated by 44545.", + "best_issue_reason": "issue:44843 is the strongest standalone issue in the set: it names the concrete offline failure path (`_patch_mistral_regex` / `model_info()` in `HF_HUB_OFFLINE` mode) and is directly actionable.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:33357", "right": "issue:35707", "accept": false, - "reason": "Different subsystems and failure modes: MacOS CLIP bus error vs progressive generation with inputs_embeds/past_key_values." + "reason": "Unrelated bugs: MacOS bus error with a CLIP model vs progressive generation with `inputs_embeds`/`past_key_values`." }, { "left": "issue:44514", "right": "issue:44545", "accept": true, - "reason": "Near-verbatim duplicate: both report the same Qwen2_5_VLProcessor.apply_chat_template batched-input crash when padding=False." + "reason": "Same bug: both report `Qwen2_5_VLProcessor.apply_chat_template` crashing on batched input when `padding=False`." }, { "left": "issue:30064", "right": "issue:34634", "accept": false, - "reason": "Unrelated bugs: void segmentation-map processing vs BarkProcessor voice_preset handling." + "reason": "Different subsystems and failures: segmentation-map processing vs Bark `voice_preset` handling." }, { "left": "issue:41720", "right": "issue:43746", "accept": false, - "reason": "Different models and code paths: Qwen3 auto device mapping CUDA assert vs GraniteSpeech PEFT local checkpoint loading." + "reason": "Different code paths: auto device mapping CUDA assert vs PEFT/local-checkpoint loading for GraniteSpeech." }, { "left": "issue:43606", "right": "issue:43720", "accept": false, - "reason": "Both involve loading/offload, but the concrete failures differ: Bark CPU offload device mismatch vs BitNet packed-weights handling during accelerate loading." + "reason": "Both involve loading/offload, but the concrete failures differ: device mismatch vs packed-weight unpacking." }, { "left": "issue:43873", "right": "issue:45704", "accept": false, - "reason": "Not the same bug: quantization/offloading behavior vs a T5 apex.FusedRMSNorm memory leak." + "reason": "Quantization/offloading behavior is unrelated to the T5 Apex RMSNorm memory leak." }, { "left": "issue:43550", "right": "issue:43653", "accept": false, - "reason": "Different root causes: torch.compile/SDPA on Bamba vs tokenizer special-token registration for BigBird." + "reason": "Different problems: `torch.compile`/SDPA on Bamba vs tokenizer special-token decode output." }, { "left": "issue:44625", "right": "issue:45479", "accept": false, - "reason": "Related around num_labels, but one is config propagation and the other is degenerate single-label loss; not the same concrete bug." + "reason": "Both mention `num_labels`, but one is config propagation and the other is zero-loss classification behavior." }, { "left": "issue:44368", "right": "issue:45357", "accept": false, - "reason": "Same model family, but different issues: LoRA warning about tie_word_embeddings vs incorrect visual-encoder keys saved in Qwen3.5." + "reason": "Unrelated: a tied-embeddings warning during LoRA fine-tuning vs incorrect visual-encoder keys on save/load." }, { "left": "issue:43421", "right": "issue:43927", "accept": false, - "reason": "Both are tokenizer/config related, but runtime post-processor refresh is not the same as save/load loss of custom token IDs causing IndexError." - }, - { - "left": "issue:42886", - "right": "issue:44843", - "accept": true, - "reason": "Same underlying offline tokenizer regression: unconditional _patch_mistral_regex/model_info() behavior breaks HF_HUB_OFFLINE cache loading." - }, - { - "left": "issue:43606", - "right": "issue:43653", - "accept": false, - "reason": "Different bugs in different models: Bark offload device mismatch vs BigBird mask-token special registration." - }, - { - "left": "issue:34567", - "right": "issue:35707", - "accept": false, - "reason": "TrainerState token-count update bug is unrelated to progressive generation with cached past_key_values." + "reason": "Different tokenization bugs: runtime post-processor updates vs saving/loading custom token IDs." }, { "left": "issue:44062", "right": "issue:44451", "accept": false, - "reason": "Both are loading-related, but the concrete failure signatures differ and do not show the same root cause." + "reason": "An `AddedToken` argument error is not the same as failing to load a specific model checkpoint." }, { - "left": "issue:34634", - "right": "issue:41720", - "accept": false, - "reason": "BarkProcessor voice_preset failure and Qwen3 auto device mapping CUDA assert are unrelated." + "left": "issue:42886", + "right": "issue:44843", + "accept": true, + "reason": "Same underlying offline tokenizer failure: both center on `HF_HUB_OFFLINE=1` breaking cache-based loading, and 44843 identifies the concrete `model_info()` call in the patch path." }, { - "left": "issue:44451", - "right": "issue:44991", + "left": "issue:43531", + "right": "issue:44589", "accept": false, - "reason": "Both concern model/tokenizer loading, but the affected models and failure mechanisms are different and not shown to share a code path." + "reason": "Unrelated failures: Qwen3-MoE `sliding_window` vs missing `Float8_e4m3fnStorage`." }, { - "left": "issue:43782", - "right": "issue:45381", + "left": "issue:43421", + "right": "issue:44186", "accept": false, - "reason": "Different Qwen multimodal bugs: weight_only loading error vs incorrect video vision_position_ids." + "reason": "Both are tokenizer-related, but one is about special-token post-processing and the other is a crash on NER/batched padding." }, { - "left": "issue:43421", - "right": "issue:43653", + "left": "issue:43994", + "right": "issue:44671", "accept": false, - "reason": "Same broad tokenizer area, but not the same concrete bug: runtime special-token post-processing vs empty decode from mask-token registration." + "reason": "Different models and symptoms: SigLIP2 pipeline output issues vs CamemBERT masked-LM predictions." }, { "left": "issue:44898", "right": "issue:45072", "accept": false, - "reason": "Unrelated image-resolution issue vs dtype mismatch in inference." + "reason": "Different failure modes: resolution/interpolation mismatch vs dtype mismatch in bfloat16 inference." }, { "left": "issue:44077", "right": "issue:44625", "accept": false, - "reason": "No overlap in concrete code path: patchtsmixer post_init policy vs Qwen3.5 num_labels propagation." + "reason": "No shared bug: `post_init` policy for PatchTSMixer vs Qwen3.5 `num_labels` propagation." }, { "left": "issue:44987", "right": "issue:45042", "accept": false, - "reason": "Different problems: loading a specific model vs PIL image processors incorrectly requiring torchvision." + "reason": "Different issues: loading `physical-intelligence/fast` vs PIL image processors wrongly requiring torchvision." }, { "left": "issue:44918", "right": "issue:44936", "accept": false, - "reason": "Different trainer failures: unpacking Qwen3.5 input embeddings in TRL SFT vs evaluate-after-train behavior." + "reason": "Both involve trainers, but one is about unpacking Qwen3.5 input embeddings with TRL and the other about `evaluate()` after `train()`." }, { "left": "issue:43531", "right": "issue:44589", "accept": false, - "reason": "Sliding-window model issue is unrelated to missing Float8 storage type errors." - }, - { - "left": "issue:43421", - "right": "issue:44186", - "accept": false, - "reason": "Different tokenizer bugs: runtime special-token post-processing vs LayoutLMv2 NER padding/truncation crashes." - }, - { - "left": "issue:43994", - "right": "issue:44671", - "accept": false, - "reason": "Different models and behaviors: SigLIP2 nonsensical pipeline outputs vs CamemBERT masked LM regression." + "reason": "Different bugs with no concrete overlap: model sliding-window config vs a float8 storage lookup error." }, { "left": "issue:43381", "right": "issue:43844", "accept": false, - "reason": "Eval-mode gradient checkpointing limitation is unrelated to ZeRO-3 gradient growth." + "reason": "Unrelated training/runtime behaviors: gradient checkpointing in eval mode vs gradient explosion with ZeRO-3." }, { "left": "issue:39692", "right": "issue:44610", "accept": false, - "reason": "Documentation example/model-processor mismatch is unrelated to OmDet-Turbo processor resolution mismatch." + "reason": "A documentation/example mismatch is not the same as a processor/model input-size mismatch." }, { "left": "issue:45081", "right": "issue:45356", "accept": false, - "reason": "Related Mistral-regex family, but the evidence suggests distinct bugs: one is an AttributeError during tokenizer loading, the other is a Kimi-K2.5 codec-handling regression with a misleading warning." + "reason": "Related area (`fix_mistral_regex`), but the concrete failures differ: a crash on missing `backend_tokenizer` vs codec/regression/warning behavior in Kimi-K2.5." }, { "left": "issue:43525", "right": "issue:43531", "accept": false, - "reason": "Unrelated config attribute error vs sliding_window bug in Qwen3-MoE." + "reason": "Different root causes: missing `pad_token_id` on `Llama4Config` vs Qwen3-MoE `sliding_window` behavior." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The cluster-level summary is mostly grounded, and the 44514\u219444545 and 42886\u219444843 edges look supportable from the titles. However, the accept on 45081\u219445356 is a bit too strong for the evidence provided: both touch _patch_mistral_regex/fix_mistral_regex, but one is an AttributeError crash in Mistral tokenizer loading while the other is a Kimi-K2.5 regression about codec handling and a misleading warning. That reads as related family, not clearly the same bug/near-duplicate. Because the soft-edge verdicts should stay conservative, I\u2019d reject this output." + "accept": true, + "feedback": "The analysis is grounded in the packet and the soft-edge decisions are conservative. The two accepted pairs are effectively duplicate symptom reports, while the rejected pairs are mostly clearly different bugs/features. The issue-only summary is also accurate, since no PRs appear in the input." }, "evaluator_used": true, - "retried": true, + "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -4242,9 +4323,9 @@ "issue:43381|issue:45704", "issue:45183|issue:45479", "issue:39692|issue:43873", + "issue:43746|issue:43873", "issue:44220|issue:44661", "issue:40444|issue:43746", - "issue:43746|issue:43873", "issue:43540|issue:44008", "issue:43716|issue:44008", "issue:43335|issue:43756", @@ -4265,208 +4346,208 @@ "issue:44448|issue:44671", "issue:43644|issue:43749" ], - "prepared_review_unit_hash": "40e4647c1813ba9acdc604dd1240f56463c6a7045c037da2257b84896c3b3f64", + "prepared_review_unit_hash": "90251e6fc359fbfe3b6237cdcd4c9dc41209d1dac58a78f3107b5a4b9788210d", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 29, - "serialized_chars": 36147, - "estimated_input_tokens": 9037, - "estimated_eval_tokens": 18330 + "serialized_chars": 36155, + "estimated_input_tokens": 9039, + "estimated_eval_tokens": 18334 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These are mostly unrelated issue reports spanning tokenizer/config loading, multimodal preprocessing, dtype/device-map bugs, model-specific regressions, and a few generic runtime failures. The soft-similarity pairs share keywords or subsystem overlap, but they do not describe the same concrete bug or change, so none should be merged as duplicates.", - "confidence": 0.94, - "canonical_issue_reason": "issue:43749 has the broadest impact and strongest signal in this set (multiple discussion threads and inbound references) around a concrete broken loading path, making it the best representative issue if one must be chosen.", + "summary": "This cluster is a heterogeneous grab-bag of unrelated tokenizer, vision, quantization, loading, and regression reports. None of the soft pairs look like the same underlying bug/change, so I would not merge any of them as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "Issue 44843 is the best standalone representative because it is a concrete core-path AutoTokenizer regression with broad user impact and a clear reproduction; the rest are mostly model-specific or orthogonal bugs, so there is no strong cluster-wide duplicate canonical.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43749 is the most central/visible issue in the cluster and clearly describes a specific user-facing regression in loading behavior.", + "best_issue_reason": "Issue 44843 is the most generally impactful and self-contained bug in the set, and it is better suited as a representative issue than the many narrow model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43596", "right": "issue:44220", "accept": false, - "reason": "Different failures: BertModel + ZeRO-3 initialization vs audio feature extraction helper error." + "reason": "Different failure areas: DeepSpeed/BertModel zero-sized indexing vs audio fbank feature extraction." }, { "left": "issue:43618", "right": "issue:43697", "accept": false, - "reason": "CLIP attentions regression is unrelated to RTDetrV2 output drift." + "reason": "Both are Transformers v5 regressions, but they affect different models and different code paths." }, { "left": "issue:41720", "right": "issue:45704", "accept": false, - "reason": "Qwen3 auto device mapping CUDA assert is unrelated to T5/apex RMSNorm memory leak." + "reason": "Unrelated problems: Qwen3 auto device mapping CUDA assert vs T5 RMSNorm memory leak." }, { "left": "issue:43746", "right": "issue:45381", "accept": false, - "reason": "GraniteSpeech PEFT local checkpoint loading differs from Qwen2.5-VL video position-id bug." + "reason": "Local PEFT checkpoint loading for GraniteSpeech is unrelated to Qwen2.5-VL video position-id errors." }, { "left": "issue:43381", "right": "issue:45704", "accept": false, - "reason": "Gradient checkpointing eval-mode restriction is not the same as T5 fused RMSNorm leak." + "reason": "Gradient checkpointing in eval mode and an apex RMSNorm leak are separate bugs." }, { "left": "issue:45183", "right": "issue:45479", "accept": false, - "reason": "Generic input validation error messaging is unrelated to single-label classification zero-loss behavior." + "reason": "Input validation/XOR messaging bug is not the same as the zero-loss classification bug." }, { "left": "issue:39692", "right": "issue:43873", "accept": false, - "reason": "SigLIP2 docs/model mismatch and quantization issue is not the same as quantization offloading behavior." + "reason": "Docs example mismatches/quantization errors are unrelated to offloading behavior with quantization." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "GraniteSpeech local checkpoint loading and quantized offloading failures are different issues." }, { "left": "issue:44220", "right": "issue:44661", "accept": false, - "reason": "Audio feature extraction bug is unrelated to add-new-model-like failure in tokenizer mapping names." + "reason": "Audio feature extraction bug is unrelated to add-new-model-like failing inside TOKENIZER_MAPPING_NAMES." }, { "left": "issue:40444", "right": "issue:43746", "accept": false, - "reason": "Qwen2.5-VL iterable dataset/multi-image finetuning failure is unrelated to GraniteSpeech local checkpoint loading." - }, - { - "left": "issue:43746", - "right": "issue:43873", - "accept": false, - "reason": "PEFT checkpoint loading and quantization offloading are different code paths and different symptoms." + "reason": "Qwen2.5-VL multi-image iterable dataset failure is unrelated to GraniteSpeech checkpoint loading." }, { "left": "issue:43540", "right": "issue:44008", "accept": false, - "reason": "Qwen3OmniMoe video processing ValueError is unrelated to Gemma3n variable-name collision in forward()." + "reason": "Video-input processing for Qwen3OmniMoe and Gemma3n variable-name collision are different code-path failures." }, { "left": "issue:43716", "right": "issue:44008", "accept": false, - "reason": "Mistral-3 image preprocessor dtype mismatch is not the same as Gemma3n audio mask attribute error." + "reason": "Image-preprocessor dtype mismatch and audio_mel_mask attribute collision are unrelated model bugs." }, { "left": "issue:43335", "right": "issue:43756", "accept": false, - "reason": "Sparse-layer creation bug in SwitchTransformers is distinct from Smollm3 RoPE-layer mismatch." + "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer dropping are different configuration/model issues." }, { "left": "issue:43720", "right": "issue:43819", "accept": false, - "reason": "BitNet packed-weight loading bug is unrelated to DAC.from_latents / missing STE mismatch." + "reason": "Packed-weight unpacking during accelerate loading is unrelated to DAC.from_latents/STE mismatch." }, { "left": "issue:43653", "right": "issue:43720", "accept": false, - "reason": "BigBirdTokenizer special-token decode bug is unrelated to BitNet packed-weight unpacking during accelerate loading." + "reason": "BigBirdTokenizer special-token registration and BitNet packed-weight loading are different bugs." }, { "left": "issue:30064", "right": "issue:36010", "accept": false, - "reason": "Void segmentation map processing and GenerationMixin import failure are completely different issues." + "reason": "Void segmentation-map processing and a GenerationMixin import error are unrelated." }, { "left": "issue:44062", "right": "issue:44488", "accept": false, - "reason": "AddedToken special-argument conflict is unrelated to loading cjvt/sleng-bert." + "reason": "AddedToken special-argument conflict is not the same as failing to load cjvt/sleng-bert." }, { "left": "issue:44843", "right": "issue:45081", "accept": false, - "reason": "Offline model_info patch issue and tokenizer backend_tokenizer crash are different Mistral-regex failures." + "reason": "Both involve _patch_mistral_regex, but one is an offline model_info call and the other is a backend_tokenizer attribute crash; different root causes." }, { "left": "issue:43421", "right": "issue:44811", "accept": false, - "reason": "Runtime post-processor update for special tokens is not the same as Whisper batch_decode skip_special_tokens handling." + "reason": "Runtime special-token/post-processor syncing is unrelated to Whisper batch_decode ignoring skip_special_tokens." }, { "left": "issue:43322", "right": "issue:45685", "accept": false, - "reason": "Llava Next segmentation fault and MPS histogram Int error are unrelated runtime failures." + "reason": "Llava Next segfault and an MPS histogram op limitation are unrelated." }, { "left": "issue:43577", "right": "issue:43818", "accept": false, - "reason": "BLIP2 dtype propagation bug is unrelated to Video-LLaVA temporal-attention/weight-sharing defect." + "reason": "Blip2 dtype propagation and Video-LLaVA temporal-attention/weight-sharing problems are different issues." }, { "left": "issue:44186", "right": "issue:44898", "accept": false, - "reason": "LayoutLMv2Tokenizer NER/padding crash is not Perceiver non-default resolution failure." + "reason": "LayoutLMv2 tokenizer padding/NER crash is unrelated to Perceiver interpolation failure." }, { "left": "issue:43550", "right": "issue:45072", "accept": false, - "reason": "Bamba SDPA torch.compile failure is a different model/path than SwitchTransformers and TimmWrapperModel bfloat16 mismatches." + "reason": "torch.compile with SDPA on Bamba-9B-v2 is unrelated to bf16 dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:43531", "right": "issue:43643", "accept": false, - "reason": "Qwen3-MoE sliding_window bug is unrelated to AutoConfig trust_remote_code missing fields." + "reason": "Qwen3-MoE sliding_window behavior and missing config fields from trust_remote_code are separate bugs." }, { "left": "issue:43643", "right": "issue:44877", "accept": false, - "reason": "Missing fields from AutoConfig with trust_remote_code is different from strict config blocking granite_speech loading." + "reason": "Missing fields from trust_remote_code and strict granite_speech config rejection are different config-loading failures." }, { "left": "issue:43550", "right": "issue:43927", "accept": false, - "reason": "Bamba SDPA compile bug is unrelated to DiaConfig losing custom token IDs on save/load." + "reason": "Bamba SDPA compile failure and DiaConfig token-ID persistence loss are unrelated." }, { "left": "issue:43540", "right": "issue:45381", "accept": false, - "reason": "Video-input processing failure in Qwen3OmniMoe is distinct from Qwen2.5-VL video vision_position_ids mismatch." + "reason": "These are different Qwen multimodal bugs with different models and failure modes." }, { "left": "issue:44448", "right": "issue:44671", "accept": false, - "reason": "Pegasus v4/v5 output drift is unrelated to CamemBERT masked-LM prediction regression." + "reason": "Different output regressions in Pegasus vs CamemBERT are not the same bug." }, { "left": "issue:43644", "right": "issue:43749", "accept": false, - "reason": "Non-persistent buffer junk in Transformers v5 is not the same as FSDP CPU RAM efficient loading breakage." + "reason": "Non-persistent buffer initialization junk and FSDP CPU RAM efficient loading are unrelated loading problems." } ] }, @@ -4548,16 +4629,16 @@ "issue:43825|issue:43827", "issue:44821|issue:45084", "issue:41720|issue:44991", - "issue:44485|issue:44509", "issue:44186|issue:44442", + "issue:44485|issue:44509", "issue:44671|issue:45701", "issue:30064|issue:36331", "issue:44805|issue:44936", "issue:43606|issue:45072", "issue:42175|issue:45081", "issue:43650|issue:43906", - "issue:45081|issue:45406", "issue:43782|issue:45325", + "issue:45081|issue:45406", "issue:44060|issue:44368", "issue:34634|issue:36010", "issue:44610|issue:45072", @@ -4568,212 +4649,209 @@ "issue:43526|issue:44554", "issue:43262|issue:43819" ], - "prepared_review_unit_hash": "399d3c76bd6649d7fcbde980289f5e4987a051d2a38adbdb09275ec2c53d1368", + "prepared_review_unit_hash": "52a60483563db2d382ffe5e1c330a824a7f9888d1a2916a36013965a252936ea", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 28, - "serialized_chars": 35710, - "estimated_input_tokens": 8928, - "estimated_eval_tokens": 18112 + "serialized_chars": 35719, + "estimated_input_tokens": 8930, + "estimated_eval_tokens": 18116 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Mostly unrelated issues; the only clear duplicate-style match is the v5 pipeline-removal docs/error-message cleanup pair.", - "confidence": 0.88, - "canonical_issue_reason": "issue:43825 is the best representative of the one real cluster theme: it is the concrete runtime-facing v5 pipeline-removal mismatch, and issue:43827 looks like the docs-only sibling of the same cleanup.", + "summary": "Mostly a heterogeneous set of Transformers regressions across unrelated models, tokenizers, processors, and trainer paths. The titles are thematically similar in places (especially v5/remote_code regressions), but none of the soft pairs looks like the same concrete bug/change.", + "confidence": 0.74, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "issue:43825 is the most actionable and specific item in the set, with the clearest duplicate relationship and strongest chance to anchor a merged fix.", + "best_issue_reason": "issue:45020 is the broadest umbrella for the repeated recent-version/remote_code regression reports, and it is open with the widest apparent user impact among the set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43782", "right": "issue:44977", "accept": false, - "reason": "Different models and failures: a Qwen3VL weight_only load error versus a Qwen3.5 flash-attention generation bug." + "reason": "Different model families and failure modes: Qwen3VL weight-only loading vs Qwen3.5 flash-attention generation." }, { "left": "issue:44561", "right": "issue:45020", "accept": false, - "reason": "Both mention remote_code breakage, but one is a specific v5 API removal and the other is a broad umbrella report; not the same concrete bug." + "reason": "Both concern recent-version/remote_code breakage, but 44561 is a specific removed-helper failure; 45020 is a broader umbrella report, not the same bug." }, { "left": "issue:43653", "right": "issue:43819", "accept": false, - "reason": "Unrelated tokenizer special-token handling versus DAC latent reconstruction mismatch." + "reason": "Tokenizer special-token registration vs DAC latent/STE mismatch are unrelated code paths." }, { "left": "issue:43825", "right": "issue:43994", "accept": false, - "reason": "One is a v5 pipeline-removal error-message/docs mismatch; the other is a model output quality regression." + "reason": "One is an error-message/docs issue around removed translation tasks; the other is a model output correctness regression." }, { "left": "issue:43577", "right": "issue:43582", "accept": false, - "reason": "Different subsystems and symptoms: dtype propagation in BLIP2 loading versus an Apple Silicon allocator TypeError." + "reason": "BLIP2 dtype loading bug and Apple Silicon allocator TypeError are separate subsystems and symptoms." }, { "left": "issue:43550", "right": "issue:43819", "accept": false, - "reason": "Bamba torch.compile/SDPA failure is unrelated to the DAC forward/from_latents mismatch." + "reason": "torch.compile/SDPA failure in Bamba is unrelated to DAC.from_latents parity with the forward pass." }, { "left": "issue:43825", "right": "issue:43827", - "accept": true, - "reason": "Same underlying v5 pipeline-removal cleanup: runtime/error-message wording and docs still referencing removed pipeline tasks." + "accept": false, + "reason": "Runtime error-message wording and docs drift after v5 removals are related thematically but not the same underlying fix." }, { "left": "issue:44821", "right": "issue:45084", "accept": false, - "reason": "AutoImageProcessor URL loading and template compilation errors are unrelated code paths." + "reason": "AutoImageProcessor URL loading and template-node compilation are unrelated." }, { "left": "issue:41720", "right": "issue:44991", "accept": false, - "reason": "Different regressions: Qwen3 auto device mapping CUDA assert versus tokenizer loading for EMBEDDIA/est-roberta." + "reason": "Qwen3 device-mapping CUDA assert and tokenizer loading for EMBEDDIA/est-roberta are different failures." }, { - "left": "issue:44485", - "right": "issue:44509", + "left": "issue:44186", + "right": "issue:44442", "accept": false, - "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still mentioning removed pipeline tasks." + "reason": "Both are tokenizer-related, but LayoutLMv2 NER/padding crashes and FastSpeech2Conformer loading are distinct code paths." }, { - "left": "issue:44186", - "right": "issue:44442", + "left": "issue:44485", + "right": "issue:44509", "accept": false, - "reason": "LayoutLMv2 NER/padding crash and FastSpeech2Conformer tokenizer loading failure are different tokenizer issues." + "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still mentioning removed pipeline tasks." }, { "left": "issue:44671", "right": "issue:45701", "accept": false, - "reason": "CamemBERT masked-LM regression is not the same as a generic version-dependent tokenization change report." + "reason": "CamemBERT masked-LM regression and general tokenization changes are not the same bug." }, { "left": "issue:30064", "right": "issue:36331", "accept": false, - "reason": "Image processor void segmentation maps and CustomTrainer.compute_loss kwarg handling are unrelated." + "reason": "Image processor segmentation-map handling and CustomTrainer.compute_loss signature mismatch are unrelated." }, { "left": "issue:44805", "right": "issue:44936", "accept": false, - "reason": "Mask-shape IndexError and trainer train/evaluate lifecycle failure are different bugs." + "reason": "Mask-shape IndexError in one model path and trainer.evaluate() failure after training are different issues." }, { "left": "issue:43606", "right": "issue:45072", "accept": false, - "reason": "CPU offload device mismatch is unrelated to bfloat16 dtype mismatches." + "reason": "CPU offload device mismatch in Bark-small is unrelated to bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:42175", "right": "issue:45081", "accept": false, - "reason": "Packaging/back-end installation and Mistral regex patch crashes are unrelated." + "reason": "Package install backend selection and Mistral regex patching are different concerns." }, { "left": "issue:43650", "right": "issue:43906", "accept": false, - "reason": "A malformed/empty issue title is not the same as an isolated reproduction of another issue." + "reason": "A low-information title does not establish the same underlying reproducer as an isolated reproduction of issue 38071." }, { - "left": "issue:45081", - "right": "issue:45406", + "left": "issue:43782", + "right": "issue:45325", "accept": false, - "reason": "Both are tokenizer/processor loading issues, but they involve different missing attributes and different models." + "reason": "Different Qwen variants and different bugs: weight_only loading error vs RoPE position-id scaling." }, { - "left": "issue:43782", - "right": "issue:45325", + "left": "issue:45081", + "right": "issue:45406", "accept": false, - "reason": "Qwen3VL weight_only loading and Qwen2.5-VL rope-index scaling are different failure modes." + "reason": "Mistral tokenizer patching crash and Gemma4Processor missing _tokenizer are separate loading failures." }, { "left": "issue:44060", "right": "issue:44368", "accept": false, - "reason": "Both are about Qwen weight-tying warnings, but they describe different warning sources and different model behaviors." + "reason": "Both mention tied-weights warnings, but one is a false tie warning in Qwen3-Next and the other is a repeated LoRA config warning in Qwen3.5." }, { "left": "issue:34634", "right": "issue:36010", "accept": false, - "reason": "BarkProcessor voice_preset and GenerationMixin import errors are unrelated." + "reason": "BarkProcessor voice_preset and GenerationMixin import failure are unrelated." }, { "left": "issue:44610", "right": "issue:45072", "accept": false, - "reason": "Processor image-size mismatch and dtype mismatch in inference are different bugs." + "reason": "Processor output size mismatch for OmDet-Turbo and dtype mismatches in inference wrappers are different bugs." }, { "left": "issue:43749", "right": "issue:44291", "accept": false, - "reason": "FSDP CPU RAM efficient loading and init_empty_weights argument handling are separate loading-path issues." + "reason": "FSDP CPU RAM-efficient loading and init_empty_weights argument handling are separate loading mechanisms." }, { "left": "issue:43720", "right": "issue:45072", "accept": false, - "reason": "BitNet packed-weight unpacking under accelerate loading is unrelated to bfloat16 dtype mismatches." + "reason": "Packed-weight unpacking in accelerate loading is unrelated to bfloat16 dtype mismatches." }, { "left": "issue:43526", "right": "issue:43697", "accept": false, - "reason": "BeitImageProcessorFast label reduction and RTDetrV2 output differences are different image-processing/model regressions." + "reason": "BeitImageProcessorFast label reduction and RTDetrV2 output differences are different model/processor regressions." }, { "left": "issue:43582", "right": "issue:44857", "accept": false, - "reason": "AppleSilicon caching allocator TypeError and CUDA AMP crash are unrelated platform-specific failures." + "reason": "AppleSilicon caching allocator TypeError and LwDetrImageLoss AMP/CUDA crash are unrelated." }, { "left": "issue:43526", "right": "issue:44554", "accept": false, - "reason": "Label-array reduction bug and MPS attention correctness issue are unrelated." + "reason": "Label reduction in an image processor and an MPS attention correctness issue are not the same bug." }, { "left": "issue:43262", "right": "issue:43819", "accept": false, - "reason": "Audio chat-template sampling-rate defaulting and DAC latent reconstruction mismatch are unrelated." + "reason": "Audio chat-template sampling-rate defaults and DAC latent reconstruction mismatch are unrelated." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "Grounded and conservative overall. The only accepted soft edge, issue:43825 \u2194 issue:43827, is well-supported by the packet as the same v5 pipeline-removal cleanup theme (runtime wording plus docs references). The remaining rejections stay cautious and do not overclaim duplication beyond the evidence shown." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -4872,236 +4950,227 @@ "issue:43697|issue:45357", "issue:43329|issue:43450" ], - "prepared_review_unit_hash": "76ed711b7e3ba35c25b66980d46795fb5625e2281d66bb64986cda7b27a72fd9", + "prepared_review_unit_hash": "82ccb925142449a579afe4b3ae8514fd2f7c7e44328b847830011cc4b3ecd921", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37130, - "estimated_input_tokens": 9283, - "estimated_eval_tokens": 18822 + "serialized_chars": 37133, + "estimated_input_tokens": 9284, + "estimated_eval_tokens": 18824 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Most candidate pairs are clearly different bugs despite some shared subsystems. The only plausible duplicate is the Qwen/VL video-input regression pair, where one issue gives the concrete `_get_num_multimodal_tokens` video-branch failure and the other reports the broader regression across Qwen VL variants.", - "confidence": 0.92, - "canonical_issue_reason": "issue:43329 is the most concrete root-cause report: it names the broken video branch in `_get_num_multimodal_tokens` and the undefined helpers, which makes it the best anchor for the shared Qwen video regression.", + "summary": "The soft pairs mostly span unrelated Transformer bugs across tokenization, multimodal/video, training, loading, and quantization. None are strong enough to treat as the same underlying fix. Issue 43329 is the best representative because it pinpoints a concrete failing code path.", + "confidence": 0.73, + "canonical_issue_reason": "issue:43329 is the most concrete, code-path-specific report here: it names `_get_num_multimodal_tokens`, the video branch, and the undefined symbols causing the failure.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44479 is the clearest user-facing umbrella: it reports the regression across multiple Qwen VL variants, so it's the best triage entry point.", + "best_issue_reason": "issue:43329 is the strongest standalone representative of an actual bug in this set; the others are broader symptoms or different subsystems.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43329", "right": "issue:45600", "accept": false, - "reason": "Different bugs: multimodal video token counting vs stale auto_mappings config references." + "reason": "Different bugs: multimodal video token counting vs stale auto_mappings CI references." }, { "left": "issue:45084", "right": "issue:45538", "accept": false, - "reason": "Unrelated failures: template compilation error vs tokenizer max-length constant issue." + "reason": "Unrelated: compile-time template-node error vs CLIP tokenizer max-length behavior." }, { "left": "issue:34634", "right": "issue:36331", "accept": false, - "reason": "Different code paths: BarkProcessor voice preset vs Trainer custom compute_loss signature." + "reason": "Different subsystems: BarkProcessor preset loading vs trainer loss signature mismatch." }, { "left": "issue:43653", "right": "issue:44912", "accept": false, - "reason": "Tokenizer special-token registration bug vs MXFP4 quantization/loading fallback bug." + "reason": "Special-token registration bug is unrelated to MXFP4 quantization fallback." }, { "left": "issue:44568", "right": "issue:45488", "accept": false, - "reason": "Both are tokenizer-related, but they affect different models and different mechanisms." + "reason": "Both are tokenizer regressions, but they affect different code paths and models." }, { "left": "issue:43452", "right": "issue:44991", "accept": false, - "reason": "Different loading bugs: gguf_file regression vs a specific tokenizer-loading failure for est-roberta." + "reason": "Different loading failures for different APIs/models; not the same defect." }, { "left": "issue:43638", "right": "issue:44936", "accept": false, - "reason": "Different training/evaluation failures with different triggers and symptoms." + "reason": "Separate trainer/deepspeed failure from post-train evaluate regression." }, { "left": "issue:43257", "right": "issue:43329", "accept": false, - "reason": "Different issues: Qwen3 MoE weight conversion/loading vs multimodal video token counting." + "reason": "Qwen3 MoE weight conversion and video token counting are distinct bugs." }, { "left": "issue:38175", "right": "issue:44610", "accept": false, - "reason": "Both are vision-model mismatches, but they are different models and different concrete bugs." + "reason": "Different vision-model issues: zero probabilities vs processor output shape mismatch." }, { "left": "issue:43454", "right": "issue:43577", "accept": false, - "reason": "Different model-init bugs: lm_head tying vs dtype propagation to qformer." + "reason": "lm_head weight tying bug is unrelated to BLIP2 dtype propagation." }, { "left": "issue:43854", "right": "issue:44977", "accept": false, - "reason": "Different model-specific flash-attention/load problems; same general area is not enough." + "reason": "Different model families and different failures; not one code path." }, { "left": "issue:43746", "right": "issue:44811", "accept": false, - "reason": "Local checkpoint/PEFT loading bug vs processor batch_decode skip_special_tokens bug." + "reason": "PEFT checkpoint loading and Whisper batch_decode special-token handling are unrelated." }, { "left": "issue:43257", "right": "issue:43696", "accept": false, - "reason": "Different failures: MoE weight conversion/loading vs CUDA OOM." + "reason": "Weight conversion bug is unrelated to an out-of-memory error." }, { "left": "issue:44589", "right": "issue:45042", "accept": false, - "reason": "Unrelated issues: Float8 storage lookup vs image processor torchvision dependency." + "reason": "Float8 storage lookup failure is unrelated to torchvision being required." }, { "left": "issue:39692", "right": "issue:44912", "accept": false, - "reason": "Documentation example/model-processor mismatch vs quantization fallback on a different model." + "reason": "Documentation/example errors do not match the gpt-oss quantization fallback bug." }, { "left": "issue:36010", "right": "issue:43645", "accept": false, - "reason": "Import-path regression and Jupyter custom-model initialization are different bugs." + "reason": "Importing GenerationMixin and Jupyter custom-model init breakage are different problems." }, { "left": "issue:44568", "right": "issue:45356", "accept": false, - "reason": "Different tokenizer regressions affecting different models and different failure modes." + "reason": "Both concern tokenizers, but the failures are different and model-specific." }, { "left": "issue:43819", "right": "issue:43927", "accept": false, - "reason": "Different model behavior: DAC latent reconstruction mismatch vs Dia config token ID persistence." + "reason": "DAC STE mismatch and DiaConfig token-ID persistence are unrelated." }, { "left": "issue:43720", "right": "issue:44610", "accept": false, - "reason": "Different subsystems: BitNet packed-weight loading vs OmDet processor/model image-size mismatch." + "reason": "Packed-weight unpacking in accelerate is unrelated to OmDet processor sizing." }, { "left": "issue:43749", "right": "issue:44756", "accept": false, - "reason": "Both concern loading/memory, but one is FSDP CPU RAM-efficient loading and the other is mmap on Strix Halo." + "reason": "Both involve loading/memory, but the root causes are different." }, { "left": "issue:43550", "right": "issue:44912", "accept": false, - "reason": "Different bugs: torch.compile+SDPA failure vs quantization fallback." + "reason": "torch.compile SDPA failure is unrelated to MXFP4 quantization loading." }, { "left": "issue:43329", "right": "issue:44479", - "accept": true, - "reason": "Same underlying video-input regression in the Qwen/VL multimodal token path; 43329 identifies the concrete failing branch that plausibly explains the broader 44479 report." + "accept": false, + "reason": "Same broad video area, but one is undefined helper variables and the other is a broader regression." }, { "left": "issue:43262", "right": "issue:44811", "accept": false, - "reason": "Audio chat-template sampling-rate default vs Whisper batch_decode special-token handling are different bugs." + "reason": "Audio chat-template sampling rate bug differs from Whisper batch_decode behavior." }, { "left": "issue:43531", "right": "issue:45042", "accept": false, - "reason": "Different failures: Qwen3-MoE sliding_window behavior vs PIL image-processor torchvision dependency." + "reason": "Qwen3-MoE sliding-window bug is unrelated to PIL backend torchvision requirements." }, { "left": "issue:36010", "right": "issue:36331", "accept": false, - "reason": "Different regression categories: import error vs custom-trainer method signature breakage." + "reason": "ImportError for GenerationMixin and custom compute_loss kwarg are unrelated." }, { "left": "issue:43257", "right": "issue:43931", "accept": false, - "reason": "Different load issues on different Qwen model families; not the same concrete bug." + "reason": "Same model family, but different failure modes and loading paths." }, { "left": "issue:34567", "right": "issue:40990", "accept": false, - "reason": "Trainer token-count tracking bug vs gpt-oss perplexity issue are unrelated." + "reason": "TrainerState token counting and GPT-OSS perplexity are unrelated." }, { "left": "issue:44849", "right": "issue:45701", "accept": false, - "reason": "Qwen3.5 hidden-state bug vs generic tokenization-version change are not the same issue." + "reason": "Qwen3.5 hidden-states bug and tokenization-version change are not the same issue." }, { "left": "issue:43257", "right": "issue:43756", "accept": false, - "reason": "Different Qwen3 MoE loading bug vs Smollm3 RoPE-layer mismatch." + "reason": "MoE weight conversion bug is unrelated to Smollm3 RoPE-layer dropping." }, { "left": "issue:43697", "right": "issue:45357", "accept": false, - "reason": "Different model regressions: RTDetrV2 output changes vs Qwen3.5 save_pretrained visual-key regression." - }, - { - "left": "issue:43720", - "right": "issue:44610", - "accept": false, - "reason": "Different subsystems and failure modes; no shared concrete code path." + "reason": "RTDetr output drift and Qwen3.5 visual-key saving are different bugs." }, { - "left": "issue:43746", - "right": "issue:44811", + "left": "issue:43329", + "right": "issue:43450", "accept": false, - "reason": "Local checkpoint loading for GraniteSpeech vs Whisper batch_decode are unrelated." + "reason": "Both touch video processors, but one is token counting and the other is batched shape output." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The clustering is grounded in the packet and the soft-edge calls are generally conservative. The Qwen video pair (issue:43329 / issue:44479) is a plausible duplicate/umbrella match, and the other rejections are supported by clearly different titles and bug surfaces." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -5173,47 +5242,47 @@ "issue:41720|issue:43540", "issue:43577|issue:45072", "issue:43452|issue:43582", - "issue:43540|issue:44560", "issue:33357|issue:34567", + "issue:43540|issue:44560", "issue:43317|issue:45538", "issue:42175|issue:43824", - "issue:42175|issue:44938", "issue:44291|issue:45042", - "issue:43927|issue:44912", "issue:42915|issue:43742", + "issue:42175|issue:44938", + "issue:43927|issue:44912", "issue:42915|issue:45538", - "issue:44062|issue:44991", "issue:42915|issue:44821", + "issue:44062|issue:44991", "issue:43540|issue:44479", "issue:43526|issue:45479", "issue:42175|issue:44488", "issue:43906|issue:45381", "issue:38175|issue:44898", "issue:45137|issue:45405", - "issue:43749|issue:44164", "issue:44737|issue:45588", + "issue:43749|issue:44164", "issue:43756|issue:45325", "issue:28282|issue:34689", "issue:36010|issue:42175", - "issue:43643|issue:45375", "issue:28282|issue:29127", + "issue:43643|issue:45375", "issue:43493|issue:43994", "issue:43742|issue:44291" ], - "prepared_review_unit_hash": "b9a0d3129b7c4e783b4b63fd7c49fa59213918e74dfbe945a81f3385a38e9c7d", + "prepared_review_unit_hash": "3eed4d5c97e623344dc153291b2a5f6d0707370049d77732bbf0c78082e56c73", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 37116, + "serialized_chars": 37114, "estimated_input_tokens": 9279, "estimated_eval_tokens": 18814 }, @@ -5222,202 +5291,202 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is still mostly heterogeneous. With only titles/metadata, the Qwen video-related issues and the config-field-loss issues are better treated as theme-level similarities than confirmed duplicates; no soft edge is strong enough to merge conservatively.", - "confidence": 0.36, - "canonical_issue_reason": "issue:44479 is the most representative anchor for the Qwen video-input theme, but the packet does not justify treating it as the same underlying bug as another issue; it should be used as a cautious thematic anchor, not a proven duplicate root.", + "summary": "Mostly a heterogeneous cluster of Transformers bug reports spanning unrelated areas (model loading/imports, tokenizer/config issues, vision/video regressions, quantization, and training/runtime bugs). Only a couple of soft edges look like true duplicates; the rest are just broad topical similarity.", + "confidence": 0.68, + "canonical_issue_reason": "No single canonical issue fits the whole cluster cleanly; the items split into several unrelated bug families. If forced, the Qwen video-regression reports are the closest to a reusable anchor, but the cluster is not a true duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44479 is the best global issue representative because it is a concrete, specific regression report with a clear version scope and it sits in the strongest apparent subtheme, but the evidence here is not enough to collapse the subtheme into a confirmed duplicate set.", + "best_issue_reason": "issue:44479 is the most representative concrete regression report in the set: it is specific, user-facing, and broad enough to capture the strongest Qwen video-input subcluster. It is still not a good umbrella for the entire heterogeneous cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44987", "right": "issue:45538", "accept": false, - "reason": "Different bugs: a model-loading failure for one model vs a tokenizer max-length complaint for CLIPTokenizer." + "reason": "Different failures: one is a model-loading regression for a specific repo, the other is a tokenizer max-length constant issue." }, { "left": "issue:44479", "right": "issue:45381", - "accept": false, - "reason": "Both are Qwen video-related regressions, but the packet only gives titles and does not establish the same underlying bug or a shared fix path." + "accept": true, + "reason": "Both describe the same Qwen video-input regression around incorrect position IDs / video handling in transformers 5.3.0." }, { "left": "issue:41720", "right": "issue:43540", "accept": false, - "reason": "Both involve Qwen and runtime failures, but the symptoms and code paths differ materially: device-mapping cuda assert vs video-input ValueError." + "reason": "A device-mapping CUDA assert on A800 is not the same bug as a Qwen3OmniMoe video-input ValueError." }, { "left": "issue:43577", "right": "issue:45072", "accept": false, - "reason": "Both mention dtype problems, but they affect different models and behaviors; not the same concrete bug." + "reason": "Both mention dtype mismatches, but they affect different models and different code paths; not the same concrete bug." }, { "left": "issue:43452", "right": "issue:43582", "accept": false, - "reason": "Tokenizer/model loading for gguf_file is unrelated to the Apple Silicon allocator warmup TypeError." + "reason": "GGUF loading / from_pretrained behavior and an Apple Silicon allocator warmup TypeError are unrelated." }, { - "left": "issue:43540", - "right": "issue:44560", + "left": "issue:33357", + "right": "issue:34567", "accept": false, - "reason": "Both are video-input issues, but the titles indicate different Qwen model families and different failures, so this is not enough to merge." + "reason": "CLIP/Mac bus error and a TrainerState token counter not updating are unrelated runtime behaviors." }, { - "left": "issue:33357", - "right": "issue:34567", + "left": "issue:43540", + "right": "issue:44560", "accept": false, - "reason": "Completely different problems: MacOS CLIP bus error vs TrainerState token counter not updating." + "reason": "Both are video-related, but the model families and reported failure modes differ too much to treat as the same bug." }, { "left": "issue:43317", "right": "issue:45538", "accept": false, - "reason": "Dequantized model offload/load failure is unrelated to CLIPTokenizer max-length behavior." + "reason": "Dequantized model offload failure is unrelated to CLIPTokenizer's model_max_length behavior." }, { "left": "issue:42175", "right": "issue:43824", "accept": false, - "reason": "Packaging/backend availability issue vs missing Qwen class import; different subsystems and failure modes." + "reason": "Packaging/backend omission for TensorFlow is unrelated to an ImportError for a specific Qwen class." }, { - "left": "issue:42175", - "right": "issue:44938", + "left": "issue:44291", + "right": "issue:45042", "accept": false, - "reason": "Packaging/backend availability issue vs Python 3.14 load failure; not the same bug." + "reason": "init_empty_weights/_is_hf_initialized and PIL backend torchvision requirements are different code paths." }, { - "left": "issue:44291", - "right": "issue:45042", + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and MobileLLM config-key loading error are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44938", "accept": false, - "reason": "Different code paths: init_empty_weights argument handling vs PIL image processor torchvision dependency regression." + "reason": "Missing TensorFlow backend from installation and Python 3.14 loading failure are not the same bug." }, { "left": "issue:43927", "right": "issue:44912", "accept": false, - "reason": "Config save/load token-ID loss is unrelated to MXFP4 quantization falling back to bf16." + "reason": "A save/load config-token-ID bug is unrelated to MXFP4 quantization falling back to bf16." }, { "left": "issue:42915", - "right": "issue:43742", + "right": "issue:45538", "accept": false, - "reason": "Qwen3Moe FP8 loading failure vs MobileLLM key error; unrelated model-loading bugs." + "reason": "Qwen3Moe FP8 loading and CLIPTokenizer max length have no concrete overlap." }, { "left": "issue:42915", - "right": "issue:45538", + "right": "issue:44821", "accept": false, - "reason": "Different areas entirely: Qwen3Moe FP8 loading vs CLIPTokenizer max-length." + "reason": "An FP8 model-loading bug is unrelated to loading AutoImageProcessor from a URL." }, { "left": "issue:44062", "right": "issue:44991", "accept": false, - "reason": "Both are tokenizer-loading failures, but the concrete errors and affected models differ too much to treat as one bug." - }, - { - "left": "issue:42915", - "right": "issue:44821", - "accept": false, - "reason": "FP8 config failure vs AutoImageProcessor URL loading; unrelated." + "reason": "AddedToken special-argument handling and tokenizer loading for EMBEDDIA/est-roberta are different failures." }, { "left": "issue:43540", "right": "issue:44479", "accept": false, - "reason": "Related Qwen/video theme, but different model families and different error types; not enough evidence for the same underlying bug." + "reason": "Both involve Qwen video inputs, but the reported models and symptoms differ; not enough to call them the same bug." }, { "left": "issue:43526", "right": "issue:45479", "accept": false, - "reason": "ImageProcessorFast label reduction bug vs sequence-classification zero-loss bug; different functionality." + "reason": "BEiT Fast label reduction and sequence-classification zero-loss behavior are unrelated." }, { "left": "issue:42175", "right": "issue:44488", "accept": false, - "reason": "Packaging/backend availability issue vs a specific model not loading; not the same root cause." + "reason": "A backend-install issue is not the same as a model-specific loading failure." }, { "left": "issue:43906", "right": "issue:45381", "accept": false, - "reason": "An isolated reproduction reference vs a specific Qwen2.5-VL video-position regression; no direct match." + "reason": "An isolated reproduction of another issue is too vague to match the specific Qwen2.5-VL video-position bug." }, { "left": "issue:38175", "right": "issue:44898", "accept": false, - "reason": "SigLIP2 zero-probabilities vs Perceiver interpolation failure are different model bugs." + "reason": "SigLIP2 zero probabilities and a Perceiver resolution bug are different models and different defects." }, { "left": "issue:45137", "right": "issue:45405", "accept": false, - "reason": "DeepSpeed ZeRO3 deque error is unrelated to the PEFT version bump." + "reason": "DeepSpeed ZeRO3 deque errors and a PEFT version bump are unrelated." }, { - "left": "issue:43749", - "right": "issue:44164", + "left": "issue:44737", + "right": "issue:45588", "accept": false, - "reason": "FSDP CPU RAM efficient loading regression vs save/from_pretrained extra_state handling; different code paths." + "reason": "XLNet CPU tensor placement and flash_attention s_aux handling are different bugs." }, { - "left": "issue:44737", - "right": "issue:45588", + "left": "issue:43749", + "right": "issue:44164", "accept": false, - "reason": "XLNet CPU placement bug vs flash-attention crash on sink-less models; unrelated." + "reason": "FSDP CPU RAM efficient loading and extra_state save/load handling are not the same issue." }, { "left": "issue:43756", "right": "issue:45325", "accept": false, - "reason": "Different rope/position-id problems in different models; not the same underlying bug." + "reason": "Both mention RoPE/position logic, but they concern different models and different incorrect behaviors." }, { "left": "issue:28282", "right": "issue:34689", "accept": false, - "reason": "PyTorch-missing ImportError vs Llama 3.2 Vision loading regression; unrelated." + "reason": "A missing PyTorch dependency error is unrelated to a specific Llama 3.2 Vision model-loading regression." }, { "left": "issue:36010", "right": "issue:42175", "accept": false, - "reason": "GenerationMixin import failure is unrelated to the tensorflow backend packaging issue." + "reason": "GenerationMixin import failure and TensorFlow backend packaging are unrelated." }, { "left": "issue:43643", "right": "issue:45375", "accept": false, - "reason": "Both concern config fields, but the packet does not establish the same preservation bug; one is trust_remote_code missing fields, the other is a specific config field dropped by strict parsing." - }, - { - "left": "issue:28282", - "right": "issue:29127", - "accept": false, - "reason": "ImportError due to missing PyTorch vs LayoutLMv3 error-message clarity; unrelated." + "reason": "Generic AutoConfig field-loss with trust_remote_code and a specific config field dropped by @strict are not the same concrete bug." }, { "left": "issue:43493", "right": "issue:43994", + "accept": true, + "reason": "Both are about SigLIP2 producing wrong/nonsensical outputs, and the titles point to the same underlying implementation discrepancy." + }, + { + "left": "issue:43742", + "right": "issue:44291", "accept": false, - "reason": "Both are SigLIP2-related, but the packet does not prove the same underlying defect from titles alone." + "reason": "MobileLLM loading failure and init_empty_weights argument mismatch are unrelated." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The summary and verdicts are a bit too strong for the packet. In particular, the accepted pair issue:44479 \u2194 issue:45381 is not grounded enough: the packet only gives titles, and while both are Qwen video-input regressions, it does not justify claiming they are the same bug or that they \u201ccould be fixed together.\u201d That makes the canonical anchor/duplicate-subcluster claim too assertive. The 43643 \u2194 45375 accept is also somewhat speculative, but the qwen accept is enough to reject this as not conservative enough." + "accept": true, + "feedback": "The summary is grounded in the packet and stays appropriately conservative about the cluster being heterogeneous. The two accepted soft edges are plausible duplicate pairs from the titles, and the rejected pairs are mostly cautious rather than overstated. No clear evidence of overclaiming." }, "evaluator_used": true, - "retried": true, - "accepted_nontrivial_soft_edge": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -5499,224 +5568,209 @@ "issue:43742|issue:44589", "issue:43531|issue:44821", "issue:44514|issue:45290", - "issue:43452|issue:44843", "issue:43452|issue:43577", + "issue:43452|issue:44843", "issue:44625|issue:44877", "issue:43688|issue:45440", "issue:41762|issue:44936", - "issue:43653|issue:44568", - "issue:43295|issue:45356", "issue:43720|issue:44898", + "issue:43295|issue:45356", + "issue:43653|issue:44568", "issue:43525|issue:43643", "issue:43819|issue:44811", "issue:44466|issue:45005" ], - "prepared_review_unit_hash": "fc6e53ca6d87b7252597aeaa4d6fac1ae11c452984b95774da4542a30c206606", + "prepared_review_unit_hash": "7aaba8370e6ac5b7574ac8a1bef706a7aa06ad21c3236a8775e0ca1e9de382c9", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 27, - "serialized_chars": 35532, - "estimated_input_tokens": 8883, - "estimated_eval_tokens": 18022 + "serialized_chars": 35537, + "estimated_input_tokens": 8885, + "estimated_eval_tokens": 18026 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These issues are mostly unrelated despite broad overlap around model loading, tokenizers, and training APIs. The only strong duplicate-looking pair is the v5 tied-weights serialization regression.", - "confidence": 0.86, - "canonical_issue_reason": "Use issue 44466 as the canonical issue: it states the broader tied-weights/lm_head serialization regression clearly, while 45005 is a narrower translation-model manifestation of the same problem.", + "summary": "This cluster is highly heterogeneous: the soft-similarity pairs mostly share only broad Transformer themes (tokenizers, loading, processors, training) but not the same concrete bug. I found no safe duplicate merges among the proposed edges. The broadest/highest-activity issue is FSDP_CPU_RAM_EFFICIENT_LOADING, but it is not actually a duplicate center for the rest of the cluster.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43749 has the strongest discussion/inbound activity and the broadest impact, so it is the best representative issue in an otherwise unrelated set.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44466 is the best representative because it describes the underlying bug in a general, reproducible way and covers the broader failure mode.", + "best_issue_reason": "issue:43749 is the most prominent issue here by activity and scope; it is the safest global representative even though the surrounding items are not true duplicates.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43582", "right": "issue:43782", "accept": false, - "reason": "Different failures: Apple Silicon allocator warmup TypeError vs Qwen3-VL weight_only loading error. No shared code path." + "reason": "Different failures in different loading paths; Apple Silicon allocator warmup TypeError is unrelated to Qwen3VL weight_only loading." }, { "left": "issue:44062", "right": "issue:45081", "accept": false, - "reason": "Both touch tokenizers, but one is an AddedToken constructor bug and the other is a Mistral regex/backend_tokenizer crash. Different root causes." + "reason": "Both are tokenizer-related, but one is an AddedToken special-arg bug and the other is a Mistral regex/backend_tokenizer crash." }, { "left": "issue:34567", "right": "issue:36331", "accept": false, - "reason": "Trainer state counter not updating and custom compute_loss signature break are separate trainer API issues." + "reason": "Trainer metrics bookkeeping vs a changed compute_loss signature are separate bugs." }, { "left": "issue:43452", "right": "issue:44488", "accept": false, - "reason": "Both are loading problems, but one is about gguf_file handling and the other is a specific model/config load failure. Not the same bug." + "reason": "gguf_file/from_pretrained breakage is a different loading issue than the cjvt/sleng-bert model load failure." }, { "left": "issue:44484", "right": "issue:44704", "accept": false, - "reason": "save_pretrained shard-size behavior and AutoProcessor cached_file kwargs forwarding are unrelated." + "reason": "max_shard_size behavior and AutoProcessor cached_file kwargs passthrough are unrelated." }, { "left": "issue:44291", "right": "issue:44821", "accept": false, - "reason": "init_empty_weights/_is_hf_initialized argument issue is unrelated to AutoImageProcessor URL loading." + "reason": "init_empty_weights/_is_hf_initialized and AutoImageProcessor-from-URL are different code paths and failures." }, { "left": "issue:44792", "right": "issue:44964", "accept": false, - "reason": "Different multimodal model failures with different entry points; not enough evidence of one shared bug." + "reason": "Both involve multimodal models, but the failing models and concrete code paths differ." }, { "left": "issue:43643", "right": "issue:44561", "accept": false, - "reason": "One is missing fields in AutoConfig with trust_remote_code; the other is a missing helper in v5 breaking remote-code models. Different fixes." + "reason": "Both mention trust_remote_code, but one is missing config fields and the other is removal of is_torch_fx_available." }, { "left": "issue:45405", "right": "issue:45600", "accept": false, - "reason": "Dependency-version bump and removed-config auto_mappings CI failure are unrelated maintenance issues." + "reason": "A PEFT version pin issue and removed Sam3LiteText auto-mapping references are unrelated." }, { "left": "issue:43697", "right": "issue:45216", "accept": false, - "reason": "RTDetr output drift and Qwen3.5 save_pretrained checkpoint corruption are different regressions." + "reason": "Different regressions: output drift in RTDetrV2 vs save_pretrained checkpoint serialization for Qwen3.5." }, { "left": "issue:43688", "right": "issue:43749", "accept": false, - "reason": "Auxiliary-loss normalization and FSDP CPU RAM-efficient loading breakage are different training-path bugs." + "reason": "Auxiliary loss normalization and FSDP CPU RAM efficient loading are unrelated bugs." }, { "left": "issue:45042", "right": "issue:45081", "accept": false, - "reason": "Image processor torchvision dependency regression and Mistral regex crash are unrelated subsystems." + "reason": "Dependency requirement regression in image processors is not the same as the Mistral regex crash." }, { "left": "issue:44514", "right": "issue:45381", "accept": false, - "reason": "Chat-template batching crash and Qwen2.5-VL video position-id bug are different problems." + "reason": "Both are Qwen2.5-VL issues, but one is apply_chat_template batching/padding and the other is video vision_position_ids." }, { "left": "issue:43742", "right": "issue:44589", "accept": false, - "reason": "Key error when loading a model and missing Float8 storage lookup are different deserialization/load failures." + "reason": "KeyError loading MobileLLM-125M and Float8 storage lookup failure are different loading bugs." }, { "left": "issue:43531", "right": "issue:44821", "accept": false, - "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to loading AutoImageProcessor from URL." + "reason": "Qwen3-MoE sliding_window behavior and AutoImageProcessor URL loading are unrelated." }, { "left": "issue:44514", "right": "issue:45290", "accept": false, - "reason": "Both involve apply_chat_template, but one is batching/padding and the other is assistant tool-call messages with empty content. Not clearly the same underlying bug." + "reason": "Same function name, but different inputs and failure modes; not the same concrete chat-template bug." }, { "left": "issue:43452", - "right": "issue:44843", + "right": "issue:43577", "accept": false, - "reason": "gguf_file loading regression and offline model_info() call in _patch_mistral_regex are separate tokenizer-loading issues." + "reason": "gguf_file loading breakage and Blip2 dtype propagation are separate issues." }, { "left": "issue:43452", - "right": "issue:43577", + "right": "issue:44843", "accept": false, - "reason": "gguf_file loading breakage and Blip2 dtype propagation are unrelated." + "reason": "gguf_file/from_pretrained breakage is unrelated to the offline Mistral regex model_info() call." }, { "left": "issue:44625", "right": "issue:44877", "accept": false, - "reason": "Qwen3.5 num_labels propagation and granite_speech strict-config loading are different config issues." + "reason": "Qwen3.5 num_labels propagation and granite_speech strict config loading are different config bugs." }, { - "left": "issue:43688", - "right": "issue:45440", - "accept": false, - "reason": "Aux-loss normalization bug and DeepSeekV3MoE divergence from remote implementation are not the same concrete defect." - }, - { - "left": "issue:41762", - "right": "issue:44936", - "accept": false, - "reason": "Gemma3 ZeRO-3 load IndexError and trainer.evaluate() after train() failure are unrelated." - }, - { - "left": "issue:43653", - "right": "issue:44568", + "left": "issue:43720", + "right": "issue:44898", "accept": false, - "reason": "Both concern special tokens, but one is BigBird mask-token registration and the other is mdeberta add_special_tokens behavior. Different tokenizer-specific bugs." + "reason": "BitNet packed-weight loading and Perceiver image resolution handling do not share the same bug." }, { "left": "issue:43295", "right": "issue:45356", "accept": false, - "reason": "Processor.tokenizer/images regression and Kimi-K2.5 codec/fix_mistral_regex warning regression are different loading/tokenizer problems." + "reason": "processor.tokenizer/image input regression and Kimi-K2.5 codec/fix_mistral_regex regression are different tokenizer issues." }, { - "left": "issue:43720", - "right": "issue:44898", + "left": "issue:43653", + "right": "issue:44568", "accept": false, - "reason": "BitNet packed-weight loading and Perceiver interpolation failure are unrelated model-specific regressions." + "reason": "Both involve special tokens, but they affect different tokenizers and different symptoms." }, { "left": "issue:43525", "right": "issue:43643", "accept": false, - "reason": "Missing pad_token_id on a config and missing fields from trust_remote_code AutoConfig are distinct config-shape problems." + "reason": "Missing pad_token_id on Llama4Config is unrelated to trust_remote_code config field loss." }, { "left": "issue:43819", "right": "issue:44811", "accept": false, - "reason": "DAC.from_latents vs forward mismatch and Whisper batch_decode skip_special_tokens behavior are unrelated." + "reason": "DAC latent reconstruction mismatch and Whisper batch_decode skip_special_tokens are unrelated." }, { "left": "issue:44466", "right": "issue:45005", - "accept": true, - "reason": "Both describe the same v5 tied-weights serialization bug around lm_head.weight/save_pretrained; 45005 is just a translation-model-specific instance." + "accept": false, + "reason": "Both mention tied weights, but one is device-dependent lm_head serialization and the other is translation-model save issues; not the same concrete bug." }, { - "left": "issue:43589", - "right": "issue:44561", + "left": "issue:43749", + "right": "issue:44589", "accept": false, - "reason": "Not enough relation; Float8 storage lookup failure is unrelated to the removal of is_torch_fx_available." + "reason": "FSDP CPU RAM efficient loading and Float8 storage lookup are different serialization/loading failures." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The analysis is well grounded in the packet and stays mostly conservative. The only accepted pair, 44466/45005, is plausibly the same tied-weights serialization regression. The rejected pairs are generally framed cautiously as distinct issues without overstating shared evidence. The summary matches the evidence available." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -5817,20 +5871,20 @@ "issue:43334|issue:43572", "issue:32090|issue:33666" ], - "prepared_review_unit_hash": "200cd44a223ffee01aff40799117fdc4c3e680322467224adcbf40c34acdcdfb", + "prepared_review_unit_hash": "73a0eba4b02b728f156a6faed1cd7d75337919175b9622b568c0299655a00fa2", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 33, - "serialized_chars": 37743, + "serialized_chars": 37744, "estimated_input_tokens": 9436, "estimated_eval_tokens": 19128 }, @@ -5839,210 +5893,210 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The set is highly heterogeneous: most items are unrelated model-, tokenizer-, training-, or version-specific regressions. There is no strong duplicate cluster here, so the best we can do is pick a broad regression ticket as a loose representative.", - "confidence": 0.18, - "canonical_issue_reason": "issue:43646 is the broadest umbrella in the group: a Transformers 5.0 initialization regression that matches the general version-breakage theme better than the narrower model-specific reports.", + "summary": "This cluster is mostly a false positive: the issues share only superficial regression/model-loading language, but they cover distinct bugs across tokenization, multimodal processing, training, device mapping, and config handling. None of the soft pairs look like the same underlying issue.", + "confidence": 0.99, + "canonical_issue_reason": "No clear canonical duplicate exists; the items are too heterogeneous to collapse onto one underlying bug.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43646 is the most suitable anchor only because it is a concrete, broadly worded regression ticket; it is still not a true duplicate of most other issues in this set.", + "best_issue_reason": "If one issue must stand in for the cluster, issue 43646 is the broadest umbrella regression ('Transformers 5.0.0 breaks custom model initialization'), but it is not a duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:24643", "right": "issue:29127", "accept": false, - "reason": "Different bugs: DeepSpeed training weight shape error vs. LayoutLMv3 box-content validation/error messaging." + "reason": "DeepSpeed weight-shape training error vs LayoutLMv3 box-content validation; unrelated failure modes." }, { "left": "issue:44464", "right": "issue:44743", "accept": false, - "reason": "Both touch generation, but one is chunked/compiled-forward inconsistency and the other is Qwen3.5 cache state reset; not the same concrete bug." + "reason": "Both mention generation behavior, but one is chunked generation with compiled forward and the other is recurrent-state reset in modular_qwen3_5; different code paths." }, { "left": "issue:41720", "right": "issue:43906", "accept": false, - "reason": "Unrelated failures: Qwen3 auto device mapping cuda assert vs. an isolated reproduction of a different issue." + "reason": "Qwen3 auto device-mapping cuda assert vs an isolated reproduction of a separate issue; no shared concrete bug." }, { "left": "issue:43577", "right": "issue:43697", "accept": false, - "reason": "Different model paths and symptoms: Blip2 dtype loading bug vs. RTDetrV2 output divergence in v5." + "reason": "Blip2 dtype staying float32 vs RTDetrV2 output divergence; different model families and defects." }, { "left": "issue:38175", "right": "issue:43295", "accept": false, - "reason": "Different regressions: SigLIP2 zero probabilities vs. processor/tokenizer regression for custom model code." + "reason": "Siglip2 zero-probabilities issue vs v4.57.5 custom processor/tokenizer regression; not the same path." }, { "left": "issue:43688", "right": "issue:45704", "accept": false, - "reason": "Different subsystems entirely: auxiliary loss normalization vs. T5 Apex RMSNorm memory leak." + "reason": "Aux-loss normalization bug in MoE models vs T5/apex RMSNorm memory leak; unrelated." }, { "left": "issue:44843", "right": "issue:44991", "accept": false, - "reason": "Both are tokenizer-loading regressions, but one is an offline model_info call in regex patching and the other is a specific tokenizer loading failure; root causes differ." + "reason": "HF Hub offline/tokenizer patch issue vs tokenizer loading regression for a specific model; different root cause." }, { "left": "issue:43646", "right": "issue:43749", "accept": false, - "reason": "Different issues: custom model initialization breakage vs. FSDP CPU RAM efficient loading regression." + "reason": "Generic custom-model initialization regression vs FSDP CPU RAM-efficient loading breakage; not the same fix." }, { "left": "issue:44361", "right": "issue:44442", "accept": false, - "reason": "Both are tokenizer/model loading failures, but they affect different tokenizer classes and failure modes." + "reason": "MLukeTokenizer task AttributeError vs AutoTokenizer failing to load FastSpeech2ConformerTokenizer; separate tokenizer bugs." }, { "left": "issue:43688", "right": "issue:45701", "accept": false, - "reason": "One is auxiliary-loss normalization; the other is a broad tokenization change report. Not the same bug." + "reason": "Auxiliary-loss normalization bug vs general tokenization changes across versions; no shared implementation defect." }, { "left": "issue:43653", "right": "issue:44361", "accept": false, - "reason": "Different tokenizer defects: BigBird special-token registration vs. MLuke tokenizer AttributeError." + "reason": "BigBirdTokenizer special-token registration bug vs MLukeTokenizer task AttributeError; different tokenizer subsystems." }, { "left": "issue:44479", "right": "issue:44936", "accept": false, - "reason": "Video-input regression for Qwen VL models vs. trainer evaluate-after-train failure; unrelated code paths." + "reason": "Video-input regression for VLMs vs trainer.evaluate() failing after trainer.train(); unrelated." }, { "left": "issue:28282", "right": "issue:32090", "accept": false, - "reason": "Different layers: missing PyTorch import error vs. Trainer broadcast TypeError with NoneType." + "reason": "Missing PyTorch import error vs Trainer _gpu_broadcast_one NoneType error; different environment/runtime problems." }, { "left": "issue:44977", "right": "issue:45356", "accept": false, - "reason": "Different regressions: flash-attention generation issue vs. tokenizer codec/regression and warning behavior." + "reason": "Qwen3.5 flash-attention generation bug vs Kimi-K2.5 tokenizer codec/regex regression; distinct model and layer." }, { "left": "issue:28282", "right": "issue:33357", "accept": false, - "reason": "Unrelated: environment import error vs. MacOS bus error with a community CLIP model." + "reason": "PyTorch-not-found ImportError vs MacOS bus error on CLIP community model; unrelated failures." }, { "left": "issue:30064", "right": "issue:33666", "accept": false, - "reason": "Different areas: void segmentation map processing vs. Qwen2-VL multi-GPU training." + "reason": "Void segmentation-map image processor bug vs Qwen2-VL multi-GPU training request; not the same issue." }, { "left": "issue:28282", "right": "issue:35141", "accept": false, - "reason": "Different bugs: missing PyTorch dependency vs. output embedding reinitialization after resizing embeddings." + "reason": "PyTorch missing import vs output-embedding reinitialized after resize_token_embeddings; different domains." }, { "left": "issue:43122", "right": "issue:43906", "accept": false, - "reason": "Tokenization/version-change report vs. isolated reproduction of another issue; not the same defect." + "reason": "Tokenizer version change behavior vs reproduction of a different issue; no concrete overlap." }, { "left": "issue:43577", "right": "issue:45588", "accept": false, - "reason": "Different bugs: dtype not propagated in Blip2 vs. flash_attention crash on s_aux=None." + "reason": "Blip2 dtype propagation bug vs flash_attention integration crash on s_aux=None; different model/integration paths." }, { "left": "issue:43577", "right": "issue:44554", "accept": false, - "reason": "Different issues: Blip2 dtype loading vs. MPS attention correctness when head dims differ." + "reason": "Blip2 dtype issue vs MPS attention correctness when value-head dim differs; not the same code-path problem." }, { "left": "issue:43653", "right": "issue:43927", "accept": false, - "reason": "Different bugs: BigBird special-token handling vs. DiaConfig custom token IDs lost on save/load." + "reason": "BigBird mask token special-token registration vs DiaConfig losing custom token IDs; both token-related but distinct bugs." }, { "left": "issue:43531", "right": "issue:43742", "accept": false, - "reason": "Different failures: Qwen3-MoE sliding_window handling vs. MobileLLM-125M key error on load." + "reason": "Qwen3-MoE sliding_window issue vs MobileLLM loading key error; different model-loading/runtime defects." }, { "left": "issue:43329", "right": "issue:45325", "accept": false, - "reason": "Both involve multimodal/video code, but one is undefined helper usage in token counting and the other is temporal position scaling in rope index." + "reason": "Undefined video-patch helpers in multimodal token counting vs Qwen2.5-VL rope-index temporal scaling bug; different logic." }, { "left": "issue:44464", "right": "issue:44977", "accept": false, - "reason": "Different generation regressions: compiled-forward chunking vs. flash-attention behavior in Qwen3.5." + "reason": "Chunked generation inconsistency with compiled forward vs Qwen3.5 flash-attention generation bug; not the same failure." }, { "left": "issue:44479", "right": "issue:44625", "accept": false, - "reason": "Different bugs: video regression in multiple VLMs vs. num_labels propagation in Qwen3.5 config." + "reason": "Video input regression for several VLMs vs Qwen3.5 num_labels propagation bug; unrelated multimodal vs config issue." }, { "left": "issue:43452", "right": "issue:44451", "accept": false, - "reason": "Different issues: gguf_file breaks two from_pretrained entry points vs. a specific tokenizer load failure for a model repo." + "reason": "gguf_file tokenizer/model loading breakage vs a specific tokenizer load failure for ScandiBERT; separate loading paths." }, { "left": "issue:43550", "right": "issue:44361", "accept": false, - "reason": "Different code paths: torch.compile/SDPA failure vs. tokenizer AttributeError." + "reason": "Bamba torch.compile/SDPA failure vs MLukeTokenizer AttributeError; unrelated model execution and tokenizer issues." }, { "left": "issue:43504", "right": "issue:43720", "accept": false, - "reason": "Different bugs: legacy field during Beit loading vs. unpacking packed weights during accelerate loading." + "reason": "Beit legacy-field load failure vs BitNet packed-weight unpacking during accelerate loading; different model-loading mechanisms." }, { "left": "issue:43504", "right": "issue:43550", "accept": false, - "reason": "Different model-loading and runtime-compile bugs; no shared concrete code path." + "reason": "Beit pretrained-load issue vs Bamba torch.compile/SDPA failure; no shared underlying bug." }, { "left": "issue:32090", "right": "issue:34567", "accept": false, - "reason": "Different Trainer problems: NoneType broadcast failure vs. num_input_tokens_seen not updating." + "reason": "Trainer _gpu_broadcast_one NoneType error vs TrainerState num_input_tokens_seen not updating; different trainer behaviors." }, { "left": "issue:43504", "right": "issue:44610", "accept": false, - "reason": "Different bugs: Beit legacy field load failure vs. OmDet-Turbo processor/model input-size mismatch." + "reason": "Beit legacy field load failure vs OmDet-Turbo processor/model input-size mismatch; unrelated." }, { "left": "issue:43334", "right": "issue:43572", "accept": false, - "reason": "Both are config-field regressions, but they affect different models and different missing fields." + "reason": "Qwen3-VL missing pad_token_id on text config vs StableLmConfig missing pad_token_idx after 5.0; similar theme but different model/config fields and fixes." }, { "left": "issue:32090", "right": "issue:33666", "accept": false, - "reason": "Different training issues: broadcast TypeError vs. Qwen2-VL multi-GPU training." + "reason": "NoneType broadcast error in Trainer vs Qwen2-VL multi-GPU training request; different areas entirely." } ] }, @@ -6136,8 +6190,8 @@ "issue:39692|issue:43493", "issue:43720|issue:44361", "issue:43540|issue:44977", - "issue:43756|issue:44479", "issue:33666|issue:36331", + "issue:43756|issue:44479", "issue:45200|issue:45229", "issue:44625|issue:45584", "issue:43824|issue:44589", @@ -6147,214 +6201,214 @@ "issue:39692|issue:43295", "issue:33357|issue:33666" ], - "prepared_review_unit_hash": "388a2b484695b446f08731c54da1c72e968fac28f580465bfd4c7b2ff9b9af53", + "prepared_review_unit_hash": "5aa2bf896172acaeb5598d7615155775165f4a7700c7d14beeaf266a97b0c504", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36733, - "estimated_input_tokens": 9184, - "estimated_eval_tokens": 18624 + "serialized_chars": 36738, + "estimated_input_tokens": 9185, + "estimated_eval_tokens": 18626 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is not a duplicate set; it mixes many unrelated issues across model families, configs, training, serving, and serialization. None of the soft edges are strong enough to merge.", - "confidence": 0.95, - "canonical_issue_reason": "No single canonical issue fits the cluster: the items describe distinct bugs and regressions in different subsystems and model families, so there is no clear duplicate anchor.", + "summary": "This cluster is mostly a grab bag of unrelated model-specific regressions and API issues. A few pairs share a broad theme (loading/config regressions), but none look like the same concrete bug or change closely enough to merge as duplicates.", + "confidence": 0.66, + "canonical_issue_reason": "Issue 43646 is the broadest, most representative regression report in the set (v5 custom model initialization breakage), so it is the best single issue to anchor the cluster even though the rest are largely unrelated.", "canonical_pr_reason": null, - "best_issue_reason": "No issue is a good global representative because the cluster is heterogeneous rather than one underlying bug.", + "best_issue_reason": "43646 is the strongest standalone issue for triage because it describes a general, version-wide init/load regression rather than a model-specific symptom.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43525", "right": "issue:43572", "accept": false, - "reason": "Both involve missing padding-related config fields after v5, but they affect different model classes and different attributes (`pad_token_id` vs `pad_token_idx`)." + "reason": "Both are config attribute regressions, but they affect different model families and different missing fields; not the same underlying bug." }, { "left": "issue:43278", "right": "issue:43688", "accept": false, - "reason": "Different failure modes: dtype drift in evaluate vs auxiliary-loss normalization in specific MoE models." + "reason": "Different failure modes: dtype drift in evaluation vs auxiliary-loss normalization; only a loose training-related theme." }, { "left": "issue:41720", "right": "issue:44464", "accept": false, - "reason": "Unrelated code paths: auto device mapping CUDA assert versus compiled-forward chunked generation inconsistency." + "reason": "Unrelated code paths and symptoms: device-mapping CUDA assert vs compiled-forward chunked generation mismatch." }, { "left": "issue:44849", "right": "issue:45357", "accept": false, - "reason": "Both are Qwen3.5 regressions, but one is about hidden states output and the other about save_pretrained visual encoder keys." + "reason": "One is an output_hidden_states bug, the other is bad save_pretrained key serialization; not the same defect." }, { "left": "issue:43756", "right": "issue:44913", "accept": false, - "reason": "Both mention rotary/RoPE settings, but one is a SmolLM3 architecture issue and the other is a GPTNeoX config reload bug." + "reason": "Both mention RoPE/config behavior, but one is a model architecture omission and the other is a config reload bug." }, { "left": "issue:43493", "right": "issue:43825", "accept": false, - "reason": "SigLIP2 implementation mismatch and pipeline translation-task messaging are unrelated bugs." + "reason": "Different subsystems entirely: SigLIP2 implementation discrepancy vs pipeline task-support messaging." }, { "left": "issue:43742", "right": "issue:44821", "accept": false, - "reason": "Different loading failures: a specific model key error versus AutoImageProcessor URL loading support." + "reason": "Different loading failures with different triggers; no shared concrete code path." }, { "left": "issue:43540", "right": "issue:43716", "accept": false, - "reason": "Both are multimodal input issues, but one is video processing in Qwen3OmniMoe and the other is image-preprocessor dtype mismatch in Mistral-3." + "reason": "Both are multimodal issues, but one is video processing and the other is image-preprocessor dtype mismatch." }, { "left": "issue:43262", "right": "issue:45084", "accept": false, - "reason": "Audio chat-template sampling rate behavior is unrelated to template compilation errors." + "reason": "Audio sampling-rate default bug vs template-compilation error; unrelated behavior." }, { "left": "issue:33666", "right": "issue:36010", "accept": false, - "reason": "Multi-GPU training for Qwen2-VL is unrelated to the GenerationMixin import regression." + "reason": "Qwen2-VL multi-GPU training and a GenerationMixin import error are unrelated." }, { "left": "issue:43720", "right": "issue:43927", "accept": false, - "reason": "Packed-weight unpacking during accelerate loading is a different issue from losing custom token IDs on save/load." + "reason": "Packed-weight loading bug vs config token-id persistence bug; different objects and failure points." }, { "left": "issue:43606", "right": "issue:43927", "accept": false, - "reason": "CPU offload device mismatch is unrelated to DiaConfig token ID persistence." + "reason": "CPU offload device mismatch is unrelated to DiaConfig token ID loss." }, { "left": "issue:43257", "right": "issue:43299", "accept": false, - "reason": "Both concern Qwen3 MoE loading, but one is accelerate+deepspeed weight conversion and the other is a broader v5.0 dev loading break." + "reason": "Both involve Qwen3 MoE loading, but one is accelerate/deepspeed weight conversion and the other is a v5 model-loading regression; not clearly the same bug." }, { "left": "issue:43278", "right": "issue:43844", "accept": false, - "reason": "Different numerical/training problems: evaluation dtype change versus ZeRO-3 gradient growth." + "reason": "Different training issues with different symptoms; no evidence of one shared root cause." }, { "left": "issue:44488", "right": "issue:44843", "accept": false, - "reason": "Loading a specific model is not the same underlying bug as unconditional Hub metadata access breaking offline mode." + "reason": "Specific model load failure vs offline-mode hub call bug; only loosely related through tokenizer loading." }, { "left": "issue:43493", "right": "issue:43844", "accept": false, - "reason": "Implementation mismatch in SigLIP2 is unrelated to DeepSpeed gradient instability." + "reason": "SigLIP2 implementation discrepancy and ZeRO-3 gradient inflation are unrelated." }, { "left": "issue:44964", "right": "issue:45405", "accept": false, - "reason": "Model loading failure and a version requirement bump are not the same bug." + "reason": "Model-loading failure vs dependency-version bump; not the same underlying issue." }, { "left": "issue:39692", "right": "issue:43493", "accept": false, - "reason": "Doc-example errors are distinct from a model implementation discrepancy with the JAX reference." + "reason": "Doc-example errors and implementation mismatch are different problem types, even if both touch SigLIP2." }, { "left": "issue:43720", "right": "issue:44361", "accept": false, - "reason": "Different loader failures in different components: packed BitNet weights versus MLukeTokenizer attribute handling." + "reason": "Different tokenizer/model-loading failures with no shared concrete code path." }, { "left": "issue:43540", "right": "issue:44977", "accept": false, - "reason": "Both are Qwen3.5-family issues, but one is video-input validation and the other is flash-attention generation behavior." + "reason": "Video-input processing bug vs flash-attention generation bug; unrelated." }, { - "left": "issue:43756", - "right": "issue:44479", + "left": "issue:33666", + "right": "issue:36331", "accept": false, - "reason": "RoPE-layer documentation/model-structure issue is unrelated to the Qwen video-input regression." + "reason": "Multi-GPU training and trainer compute_loss signature mismatch are different bugs." }, { - "left": "issue:33666", - "right": "issue:36331", + "left": "issue:43756", + "right": "issue:44479", "accept": false, - "reason": "Training setup for Qwen2-VL is unrelated to a Trainer API signature mismatch." + "reason": "Different Qwen/Smollm model regressions with different mechanics and failure modes." }, { "left": "issue:45200", "right": "issue:45229", "accept": false, - "reason": "Defaulting mm_token_type_ids to zeros and multi-GPU inference OOM are unrelated." + "reason": "One is a missing default for text-only fine-tuning, the other is GPU OOM in multi-GPU inference; not duplicates." }, { "left": "issue:44625", "right": "issue:45584", "accept": false, - "reason": "Config propagation for Qwen3.5 labels is unrelated to Whisper empty-transcription generation failure." + "reason": "Different model families and different bugs: num_labels propagation vs Whisper empty-transcription generation." }, { "left": "issue:43824", "right": "issue:44589", "accept": false, - "reason": "Importing a missing class and failing to find Float8 storage are different version/runtime issues." + "reason": "ImportError for a specific model class and missing Float8 storage are unrelated." }, { "left": "issue:43646", "right": "issue:44466", "accept": false, - "reason": "Both involve v5 behavior, but custom model initialization and lm_head serialization are different code paths and bugs." + "reason": "Both are v5 regressions, but one is custom model initialization and the other is tied-weight serialization depending on device." }, { "left": "issue:44423", "right": "issue:44734", "accept": false, - "reason": "Serving crashes in different endpoints and failure modes: multimodal `.to()` handling versus KV-cache continuation indexing." + "reason": "Both hit serve paths, but the concrete crashes and code paths differ." }, { "left": "issue:44589", "right": "issue:45520", "accept": false, - "reason": "Float8 storage lookup and Python 3.13 flash_attn import key errors are unrelated." + "reason": "Float8 storage lookup failure is unrelated to the flash_attn import_utils KeyError on Python 3.13." }, { "left": "issue:39692", "right": "issue:43295", "accept": false, - "reason": "A documentation example bug is not the same as a regression in custom model code using processor.tokenizer." + "reason": "Documentation example problems and a processor.tokenizer regression are distinct issues." }, { "left": "issue:33357", "right": "issue:33666", "accept": false, - "reason": "MacOS bus error with CLIP is unrelated to Qwen2-VL multi-GPU training." + "reason": "MacOS CLIP bus error and Qwen2-VL multi-GPU training are unrelated." } ] }, @@ -6432,22 +6486,22 @@ "issue:44521|issue:45290", "issue:43504|issue:43653", "issue:43927|issue:44361", - "issue:43994|issue:44493", "issue:43575|issue:45405", + "issue:43994|issue:44493", "issue:44792|issue:45405", "issue:44464|issue:45584", "issue:43278|issue:43701", "issue:43606|issue:44112", - "issue:44568|issue:45701", "issue:29942|issue:30333", "issue:44554|issue:45446", + "issue:44568|issue:45701", "issue:15354|issue:38175", "issue:43844|issue:44964", "issue:43540|issue:45200", "issue:45084|issue:45520", - "issue:39401|issue:44568", "issue:43582|issue:44977", "issue:43232|issue:44849", + "issue:39401|issue:44568", "issue:45072|issue:45198", "issue:37428|issue:43957", "issue:44315|issue:44849", @@ -6455,20 +6509,20 @@ "issue:43334|issue:43525", "issue:45200|issue:45325" ], - "prepared_review_unit_hash": "99f87a597a6426f04244d45185d76d12399c100e1f875f2ba7b30eeb753f6e09", + "prepared_review_unit_hash": "8e7769aa2f211f6a408c5273bf11086792e3936b6010139a1b9d98c10a364e6a", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 27, - "serialized_chars": 35289, + "serialized_chars": 35292, "estimated_input_tokens": 8823, "estimated_eval_tokens": 17902 }, @@ -6477,8 +6531,8 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a heterogeneous grab-bag of unrelated Transformers issues; the soft pairs mostly share only broad keywords like tokenization, loading, or generation, not the same underlying bug. No pair looks safe to merge as a duplicate, and there is no strong single canonical issue for the cluster.", - "confidence": 0.94, + "summary": "These items are a grab bag of unrelated Transformers bug reports across different models, tokenizers, generation paths, and CI regressions. The soft pairs mostly share broad vocabulary like \"pad_token\" or \"loading\" but do not describe the same concrete bug/change, so none should be merged as duplicates.", + "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, @@ -6488,163 +6542,163 @@ "left": "issue:43572", "right": "issue:44291", "accept": false, - "reason": "Both are Transformers 5.x regressions, but one is StableLmConfig padding metadata and the other is init_empty_weights handling; different code paths and fixes." + "reason": "Both involve loading/config initialization, but one is about missing `pad_token_idx` after a 5.0 update and the other is an `init_empty_weights`/`_is_hf_initialized` argument regression; different failure modes and code paths." }, { "left": "issue:43504", "right": "issue:43606", "accept": false, - "reason": "Different bug classes: BEiT pretrained loading with a legacy field vs bark-small CPU offload device mismatch." + "reason": "Different models and bugs: BEiT pretrained preset loading with a legacy field vs. `suno/bark-small` device mismatch under CPU offload." }, { "left": "issue:44521", "right": "issue:45290", "accept": false, - "reason": "Both touch apply_chat_template, but one is wrong assistant mask generation for multimodal inputs and the other is a crash on tool-call messages with no content." + "reason": "Both mention chat templating, but one is about all-zero assistant masks for multimodal inputs and the other is a crash on assistant tool-call messages without content; not the same defect." }, { "left": "issue:43504", "right": "issue:43653", "accept": false, - "reason": "Unrelated failures: model loading legacy-field cleanup vs BigBirdTokenizer special-token registration/empty decode." + "reason": "One is a BEiT model-loading legacy-field issue; the other is a BigBird tokenizer special-token registration/empty decode bug." }, { "left": "issue:43927", "right": "issue:44361", "accept": false, - "reason": "Different components and symptoms: DiaConfig losing custom token IDs vs MLukeTokenizer attribute errors on tasks." + "reason": "Both are config/token-related, but `DiaConfig` loses custom token IDs on save/load, while `MLukeTokenizer` raises an AttributeError on tasks; distinct artifacts and symptoms." }, { - "left": "issue:43994", - "right": "issue:44493", + "left": "issue:43575", + "right": "issue:45405", "accept": false, - "reason": "Different regressions: Siglip2 producing bad outputs vs a position-id key warning/error affecting many models." + "reason": "OOM during tensor-parallel loading of a specific model is unrelated to a package-version pin on `MIN_PEFT_VERSION`." }, { - "left": "issue:43575", - "right": "issue:45405", + "left": "issue:43994", + "right": "issue:44493", "accept": false, - "reason": "No shared underlying bug; one is tensor-parallel OOM, the other is a dependency version bump for PEFT." + "reason": "`siglip2` nonsensical outputs with AutoModel/pipeline is not the same as the generic unexpected position-id key issue affecting many models." }, { "left": "issue:44792", "right": "issue:45405", "accept": false, - "reason": "A failing janus test is unrelated to an unreleased PEFT minimum-version change." + "reason": "A janus test failure is unrelated to a PEFT minimum-version bump." }, { "left": "issue:44464", "right": "issue:45584", "accept": false, - "reason": "Different generation-path bugs in different models: compiled-forward chunking vs Whisper empty transcription after token alignment." + "reason": "Chunked generation inconsistency with compiled forward is a generation/compilation bug; empty transcription after `align_special_tokens` is a Whisper-specific generation edge case." }, { "left": "issue:43278", "right": "issue:43701", "accept": false, - "reason": "One is a BF16-to-FP32 dtype change during eval, the other a resume_from_checkpoint key mismatch; no common fix." + "reason": "BF16-to-FP32 dtype change in evaluation is unrelated to a `resume_from_checkpoint` key mismatch." }, { "left": "issue:43606", "right": "issue:44112", "accept": false, - "reason": "Runtime CPU offload device mismatch is not the same as a stale CI test override in GraniteSpeech." - }, - { - "left": "issue:44568", - "right": "issue:45701", - "accept": false, - "reason": "The second issue is a broad version-dependent tokenization complaint, not evidence of the same specific BOS/EOS bug." + "reason": "Different CI failures in different models: CPU offload device mismatch vs. stale device override test in GraniteSpeech." }, { "left": "issue:29942", "right": "issue:30333", "accept": false, - "reason": "Completely unrelated: Flash Attention 2 test failures vs MLFlow reporting a job as still running after failure." + "reason": "Flash Attention 2 test failures and MLFlow reporting of failed jobs are unrelated." }, { "left": "issue:44554", "right": "issue:45446", "accept": false, - "reason": "Distinct areas: MPS attention correctness with mismatched head dims vs a PyTorch version check in flex_attention import logic." + "reason": "An MPS attention correctness issue is not the same as a PyTorch version-check bug for `AuxRequest` import in flex_attention." + }, + { + "left": "issue:44568", + "right": "issue:45701", + "accept": false, + "reason": "`add_special_tokens=True` not adding BOS/EOS for one tokenizer is different from a broad version-dependent tokenization change report." }, { "left": "issue:15354", "right": "issue:38175", "accept": false, - "reason": "TorchScript GeneratorExp export failure and Siglip2 zero-probability outputs are unrelated bugs." + "reason": "GeneratorExp unsupported in `torch.jit.script` is unrelated to zero probabilities in `siglip2-base-patch16-224`." }, { "left": "issue:43844", "right": "issue:44964", "accept": false, - "reason": "Different problems: ZeRO-3 gradient growth during training vs failure loading Phi-4 multimodal models." + "reason": "DeepSpeed ZeRO-3 gradient growth and loading `microsoft/Phi-4-multimodal-instruct` are separate problems with different code paths." }, { "left": "issue:43540", "right": "issue:45200", "accept": false, - "reason": "Different multimodal input bugs: Qwen3OmniMoe video processing vs Gemma 4 text-only mm_token_type_ids defaults." + "reason": "Video-input processing failure in Qwen3OmniMoe is unrelated to Gemma 4 requiring `mm_token_type_ids` to default to zeros." }, { "left": "issue:45084", "right": "issue:45520", "accept": false, - "reason": "No shared code path: template compilation TypeError vs flash_attn import KeyError on Python 3.13." - }, - { - "left": "issue:39401", - "right": "issue:44568", - "accept": false, - "reason": "Both are tokenizer issues, but one is offset_mapping for Qwen3 and the other is add_special_tokens not adding BOS/EOS for mdeberta-v3." + "reason": "Template-node compilation error and Python 3.13 `flash_attn` import KeyError are unrelated failures." }, { "left": "issue:43582", "right": "issue:44977", "accept": false, - "reason": "Different runtime/model issues: AppleSilicon allocator warmup TypeError vs Qwen3.5 flash-attention generation regression." + "reason": "Apple Silicon allocator warmup TypeError is not the same bug as Qwen3.5 flash-attention generation issues." }, { "left": "issue:43232", "right": "issue:44849", "accept": false, - "reason": "Separate generation bugs: sync_gpus/_update_model_kwargs_for_generation vs Qwen3.5 output_hidden_states behavior." + "reason": "`_update_model_kwargs_for_generation` after `sync_gpus` concerns generation state handling; the Qwen3.5 issue is about `output_hidden_states=True` behavior." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Wrong `offset_mapping` in Qwen3 tokenizer is a tokenizer-span bug, not the BOS/EOS insertion bug in mDeBERTa." }, { "left": "issue:45072", "right": "issue:45198", "accept": false, - "reason": "Different subsystems and failures: bfloat16 dtype mismatches vs Wav2Vec2 save_pretrained/tokenization problems." + "reason": "Dtype mismatches in CI for SwitchTransformers/TimmWrapperModel are unrelated to Wav2Vec2 save/tokenization failures." }, { "left": "issue:37428", "right": "issue:43957", "accept": false, - "reason": "Import-time flash-attn symbol breakage is unrelated to meta-device model loading regressions." + "reason": "ImportError for `_flash_supports_window_size` and meta-device loading breakage are different regressions affecting different subsystems." }, { "left": "issue:44315", "right": "issue:44849", "accept": false, - "reason": "Liger Kernel not being applied in model_init is unrelated to Qwen3.5 hidden-state generation behavior." + "reason": "`model_init` not applying Liger Kernel is a training-setup issue, not the same as Qwen3.5 hidden-state output behavior." }, { "left": "issue:44554", "right": "issue:45356", "accept": false, - "reason": "Different issues: MPS attention correctness vs Kimi-K2.5 tokenizer codec handling/regression." + "reason": "Different attention/tokenization concerns: MPS attention correctness vs. Kimi-K2.5 tokenizer codec handling and warning message regression." }, { "left": "issue:43334", "right": "issue:43525", "accept": false, - "reason": "Both mention missing pad_token_id, but they are model-specific config errors in different classes and are not the same underlying bug." + "reason": "Qwen3-VL missing `pad_token_id` and Llama4 missing `pad_token_id` are similar symptoms but different configs/models and likely separate fixes." }, { "left": "issue:45200", "right": "issue:45325", "accept": false, - "reason": "Unrelated multimodal bugs: Gemma 4 token-type IDs vs Qwen2.5-VL rope-index scaling for still images." + "reason": "Gemma 4 multimodal token type IDs defaulting to zeros is unrelated to Qwen2.5-VL rope-index scaling for still images." } ] }, @@ -6721,8 +6775,8 @@ "issue:43295|issue:44610", "issue:44466|issue:45127", "issue:44898|issue:45412", - "issue:44792|issue:45563", "issue:43493|issue:43701", + "issue:44792|issue:45563", "issue:37428|issue:43824", "issue:43388|issue:43526", "issue:43873|issue:44464", @@ -6736,35 +6790,35 @@ "issue:38175|issue:45412", "issue:43295|issue:43873", "issue:42915|issue:43824", - "issue:43295|issue:43994", "issue:44442|issue:45198", + "issue:43295|issue:43994", "issue:43827|issue:43901", "issue:38175|issue:43493", "issue:43278|issue:43856", "issue:44206|issue:45584", "issue:43653|issue:44112", - "issue:44462|issue:44561", - "issue:45127|issue:45245", "issue:43450|issue:43493", + "issue:45127|issue:45245", + "issue:44462|issue:44561", "issue:44561|issue:44779", "issue:43819|issue:45083", "issue:45290|issue:45381", "issue:43450|issue:43575" ], - "prepared_review_unit_hash": "0ca9fbe24d0caa4a60284d1e14a35eb9fae0e40983dd8c9fc8936ac8f471f5c6", + "prepared_review_unit_hash": "1fd012d66153b0084bb0eaee880342cfc57d061ff8991fb33265d59b1549833a", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 32, - "serialized_chars": 37338, + "serialized_chars": 37340, "estimated_input_tokens": 9335, "estimated_eval_tokens": 18926 }, @@ -6773,214 +6827,217 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a mostly heterogeneous cluster of regressions, API breakages, model-specific bugs, and docs/test noise. Only the two SigLIP2 reports look like the same underlying issue; the rest are not true duplicates.", - "confidence": 0.66, - "canonical_issue_reason": "issue:43493 is the clearest root-cause report in the only true duplicate pair: it describes the SigLIP2 HF-vs-JAX implementation discrepancy directly, while issue:38175 reads more like a symptom report.", + "summary": "This cluster is a loose grab-bag of mostly unrelated bug reports with only superficial overlap around Transformers v5 regressions, tokenizers, and multimodal processors. None of the soft pairs look like the same concrete underlying bug/change, so I would not merge any of them.", + "confidence": 0.19, + "canonical_issue_reason": "issue:43295 is the broadest, most reusable regression report in the set: it describes a v4.57.5 API break affecting custom model/processor code, which makes it a reasonable duplicate bucket for adjacent processor-regression reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43295 is the most central issue in the overall cluster, with the broadest regression framing and the strongest soft-link connectivity to other nearby API-breakage reports.", + "best_issue_reason": "issue:43295 is the most generally framed, high-impact issue here and the best candidate to serve as a duplicate target for nearby API/regression complaints.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43295", "right": "issue:44610", "accept": false, - "reason": "Processor/tokenizer regression vs. a feature-extractor input-size mismatch; related area, but different bug and code path." + "reason": "Both are processor-related regressions, but one is a custom code/API break and the other is a wrong image size for OmDet-Turbo; different bugs and fixes." }, { "left": "issue:44466", "right": "issue:45127", "accept": false, - "reason": "Inconsistent lm_head serialization/tied-weights behavior vs. LoRA-merge collapse with extended vocabulary; not the same failure." + "reason": "One is device-dependent serialization of tied weights; the other is LoRA merge collapse with extended vocab. Different code paths." }, { "left": "issue:44898", "right": "issue:45412", "accept": false, - "reason": "Perceiver interpolation failure vs. RT-DETR memory leak; unrelated symptoms and code paths." + "reason": "Perceiver interpolation failure and RT-DETR memory leak are unrelated model bugs." }, { - "left": "issue:44792", - "right": "issue:45563", + "left": "issue:43493", + "right": "issue:43701", "accept": false, - "reason": "janus image-generation test failure vs. stale paged-generate warning; different issue types." + "reason": "SigLIP2 output discrepancy and resume-from-checkpoint key mismatch are not the same underlying issue." }, { - "left": "issue:43493", - "right": "issue:43701", + "left": "issue:44792", + "right": "issue:45563", "accept": false, - "reason": "SigLIP2 output discrepancy vs. resume_from_checkpoint key mismatch; distinct regressions." + "reason": "A Janus image-generation test failure and a stale generate() warning are unrelated." }, { "left": "issue:37428", "right": "issue:43824", "accept": false, - "reason": "Both are import errors, but they involve different missing symbols and different modules/models." + "reason": "Both are ImportErrors, but for different symbols in different modules; no shared concrete code-path bug is evident." }, { "left": "issue:43388", "right": "issue:43526", "accept": false, - "reason": "Both mention label handling, but one is gather_for_metrics truncation and the other is BeitImageProcessorFast reduce_labels behavior." + "reason": "Both involve labels, but one drops tuple elements in gather_for_metrics while the other truncates labels in BeitImageProcessorFast." }, { "left": "issue:43873", "right": "issue:44464", "accept": false, - "reason": "Quantization/offloading problem vs. compiled-forward chunked generation inconsistency; not the same bug." + "reason": "Quantization/offloading behavior and compiled-forward chunked generation are different bugs." }, { "left": "issue:43572", "right": "issue:44987", "accept": false, - "reason": "Missing pad_token_idx in config vs. model-loading failure for a specific checkpoint; unrelated." + "reason": "Missing pad_token_idx in StableLmConfig and loading physical-intelligence/fast are unrelated failures." }, { "left": "issue:43844", "right": "issue:44936", "accept": false, - "reason": "DeepSpeed ZeRO-3 gradient explosion vs. trainer.evaluate() after train; different training lifecycle issues." + "reason": "ZeRO-3 gradient growth and trainer.evaluate() failure after train are different training/runtime problems." }, { "left": "issue:43572", "right": "issue:43957", "accept": false, - "reason": "Config field regression vs. meta-device model-loading breakage; separate problems." + "reason": "Config regression for StableLm and meta-device loading breakage are distinct issues." }, { "left": "issue:43824", "right": "issue:43975", "accept": false, - "reason": "Qwen2.5-VL import failure vs. DeepSeek detokenization regression; different model families and failures." + "reason": "Model import/export error and DeepSeek detokenization regression are unrelated." }, { "left": "issue:44514", "right": "issue:44521", "accept": false, - "reason": "Both concern apply_chat_template, but one crashes on batched padding=False input while the other yields empty assistant masks; related but not the same concrete bug." + "reason": "Both touch apply_chat_template, but one is a batched padding=False crash and the other returns all-zero assistant masks; not the same concrete bug." }, { "left": "issue:44360", "right": "issue:44561", "accept": false, - "reason": "DSA/ReLU discussion vs. removal of is_torch_fx_available breaking trust_remote_code models; unrelated." + "reason": "A missing ReLU in DSA indexer and removal of is_torch_fx_available are unrelated." }, { "left": "issue:43295", "right": "issue:43450", "accept": false, - "reason": "Processor.tokenizer/image handling regression vs. batched video-shape bug; both in processors, but different modalities and code paths." + "reason": "Processor.tokenizer regression and batched video-shape bug affect different paths and fixes." }, { "left": "issue:38175", "right": "issue:45412", "accept": false, - "reason": "SigLIP2 zero-probability outputs vs. RT-DETR GC/memory leak; unrelated." + "reason": "SigLIP2 zero probabilities and RT-DETR memory release are unrelated model issues." }, { "left": "issue:43295", "right": "issue:43873", "accept": false, - "reason": "Custom processor regression vs. quantization/offloading bug; different subsystems." + "reason": "Custom processor API breakage and quantization/offloading behavior are different bugs." }, { "left": "issue:42915", "right": "issue:43824", "accept": false, - "reason": "Qwen3MoE FP8 config failure vs. Qwen2.5-VL import failure; different models and symptoms." + "reason": "Qwen3Moe FP8 failure and Qwen2_5_VL import error are unrelated." }, { - "left": "issue:43295", - "right": "issue:43994", + "left": "issue:44442", + "right": "issue:45198", "accept": false, - "reason": "Processor/tokenizer regression vs. SigLIP2 nonsensical outputs; different affected areas." + "reason": "FastSpeech2ConformerTokenizer loading and Wav2Vec2 save/tokenization failures are different speech-tokenizer bugs." }, { - "left": "issue:44442", - "right": "issue:45198", + "left": "issue:43819", + "right": "issue:45083", "accept": false, - "reason": "AutoTokenizer loading failure for one tokenizer vs. save_pretrained/tokenization failure for another audio model; not a duplicate." + "reason": "DAC latent/STE mismatch and qwen3_omni_moe helper behavior are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Processor.tokenizer regression and SigLIP2 nonsensical outputs are not the same bug." }, { "left": "issue:43827", "right": "issue:43901", "accept": false, - "reason": "Both are docs inconsistencies, but they refer to different deprecated arguments and different pipeline docs." + "reason": "One is docs still mentioning pipeline(), the other is docs still mentioning return_all_scores; related only at a documentation level, not the same change." }, { "left": "issue:38175", "right": "issue:43493", - "accept": true, - "reason": "Both point to the same SigLIP2 model-implementation bug; the zero-probability report is consistent with the broader HF-vs-JAX discrepancy." + "accept": false, + "reason": "Zero probabilities in SigLIP2 and HF-vs-JAX discrepancy are different manifestations and likely different fixes." }, { "left": "issue:43278", "right": "issue:43856", "accept": false, - "reason": "Embedding dtype drift in eval vs. memory inefficiency in Qwen3 MoE training; distinct issues." + "reason": "Embedding dtype drift in evaluation and Qwen3 MoE memory inefficiency are unrelated." }, { "left": "issue:44206", "right": "issue:45584", "accept": false, - "reason": "Feature extractor center-arg crash vs. Whisper empty-transcription generation failure; different code paths." + "reason": "Unsupported center argument in LaserFeatureExtractor and Whisper empty-transcription failure are different code paths." }, { "left": "issue:43653", "right": "issue:44112", "accept": false, - "reason": "BigBirdTokenizer special-token registration bug vs. a GraniteSpeech CI device-override test; unrelated." + "reason": "BigBirdTokenizer special-token registration and GraniteSpeech CI flakiness are unrelated." }, { - "left": "issue:44462", - "right": "issue:44561", + "left": "issue:43450", + "right": "issue:43493", "accept": false, - "reason": "AutoTokenizer ignoring tokenizer.json vs. removal of is_torch_fx_available breaking remote code; separate regressions." + "reason": "Batched video processor shape bug and SigLIP2 implementation discrepancy are different issues." }, { "left": "issue:45127", "right": "issue:45245", "accept": false, - "reason": "LoRA merge / tied-weights collapse vs. category-count limit runtime error; unrelated." + "reason": "LoRA merge collapse and category-count limit are unrelated." }, { - "left": "issue:43450", - "right": "issue:43493", + "left": "issue:44462", + "right": "issue:44561", "accept": false, - "reason": "Video processor batched-shape bug vs. SigLIP2 implementation discrepancy; different subsystems." + "reason": "AutoTokenizer ignoring tokenizer.json and removal of is_torch_fx_available are different regressions." }, { "left": "issue:44561", "right": "issue:44779", "accept": false, - "reason": "Remote-code breakage from removed helper vs. DeepSeek tokenizer regression; distinct issues." + "reason": "trust_remote_code breakage from a removed helper and DeepSeek tokenizer output changes are unrelated." }, { "left": "issue:43819", "right": "issue:45083", "accept": false, - "reason": "DAC forward/from_latents mismatch vs. Qwen3 Omni MoE feature-length helper behavior; not the same bug." + "reason": "Duplicate of the earlier pairing: DAC.from_latents mismatch and qwen3_omni_moe helper behavior are not the same bug." }, { "left": "issue:45290", "right": "issue:45381", "accept": false, - "reason": "Chat-template tool-call crash vs. Qwen2.5-VL video vision_position_ids bug; separate multimodal regressions." + "reason": "Chat-template crash with tool calls and wrong video vision_position_ids are different multimodal bugs." }, { "left": "issue:43450", "right": "issue:43575", "accept": false, - "reason": "Batched video processor shape issue vs. tensor-parallel OOM on model load; unrelated." + "reason": "Video batch-shape bug and Qwen2-57B TP OOM are unrelated." } ] }, - "evaluator_result": { - "accept": true, - "feedback": "The cluster summary is grounded in the packet, and the soft-edge calls are generally conservative. The only accepted duplicate pair, issue:38175 \u2194 issue:43493, is well supported by the SigLIP2 titles, while the rest are reasonably treated as related but distinct." - }, - "evaluator_used": true, + "evaluator_result": null, + "evaluator_used": false, "retried": false, - "accepted_nontrivial_soft_edge": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -7047,12 +7104,12 @@ ], "soft_pairs": [ "issue:37428|issue:43425", - "issue:43819|issue:44112", "issue:44610|issue:44857", + "issue:43819|issue:44112", "issue:44857|issue:45593", "issue:43278|issue:43825", - "issue:45200|issue:45588", "issue:43262|issue:43450", + "issue:45200|issue:45588", "issue:43606|issue:45083", "issue:45310|issue:45478", "issue:44448|issue:44743", @@ -7066,259 +7123,266 @@ "issue:43335|issue:43653", "issue:29127|issue:32090", "issue:44292|issue:45520", - "issue:43335|issue:45083", "issue:44448|issue:44779", + "issue:43335|issue:45083", "issue:44855|issue:45083", "issue:39692|issue:44493", "issue:43335|issue:43504", - "issue:38175|issue:44493", "issue:43550|issue:44112", - "issue:44568|issue:45491", + "issue:38175|issue:44493", "issue:43723|issue:44554", "issue:44977|issue:45229", - "issue:40444|issue:43526", + "issue:44568|issue:45491", "issue:43335|issue:45072", + "issue:40444|issue:43526", "issue:43335|issue:45593", - "issue:43720|issue:44112" + "issue:43720|issue:44112", + "issue:44448|issue:44977" ], - "prepared_review_unit_hash": "ecdd49ce1a0682c54be375b38bfddb1d2ef97ef70827be4aff934b013ad283b6", + "prepared_review_unit_hash": "afe43b7a3fa0947d1c04b55f7eef29233597301c101d106f9fef07373a4a78d1", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, - "soft_pair_count": 34, - "serialized_chars": 37450, - "estimated_input_tokens": 9363, - "estimated_eval_tokens": 18982 + "soft_pair_count": 35, + "serialized_chars": 37750, + "estimated_input_tokens": 9438, + "estimated_eval_tokens": 19132 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a heterogeneous bundle of unrelated issues. The only clear duplicate pair is the repeated Qwen3.5 MoE `from_pretrained` failure reported in issues 45310 and 45478; the rest are different bugs or regressions and should not be merged together.", + "summary": "Most pairs are related only at a broad subsystem level and should not be merged. The only clear duplicate in this cluster is the repeated Qwen3.5 MoE `from_pretrained` regression (`issue:45310` / `issue:45478`).", "confidence": 0.94, - "canonical_issue_reason": "issue:45310 is the best canonical issue because it is the earlier report of the same Qwen3.5 MoE `from_pretrained` error, and issue:45478 is a near-identical follow-up duplicate.", + "canonical_issue_reason": "`issue:45310` is the earliest report of the same Qwen3.5 MoE `from_pretrained` failure and appears to be the primary discussion thread for that bug.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45310 is the strongest representative issue in this set, but only for the 45310/45478 duplicate pair; it should not be treated as a canonical for the whole cluster since the remaining items are unrelated.", + "best_issue_reason": "`issue:45310` is the best canonical issue candidate because it is the first/central report of the exact same error as `issue:45478`.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:37428", "right": "issue:43425", "accept": false, - "reason": "Different problems: one is a missing `_flash_supports_window_size` import, the other is a broad Torch 2.10 incompatibility report." + "reason": "Both are import/compatibility complaints, but they point to different failures: a missing flash-attention symbol vs broad Torch 2.10 incompatibility." }, { - "left": "issue:43819", - "right": "issue:44112", + "left": "issue:44610", + "right": "issue:44857", "accept": false, - "reason": "Unrelated failures: DAC latent reconstruction mismatch vs a GraniteSpeech CI device-override test issue." + "reason": "Different concrete bugs: OmDet-Turbo input-size mismatch versus LwDetrImageLoss AMP/CUDA crash." }, { - "left": "issue:44610", - "right": "issue:44857", + "left": "issue:43819", + "right": "issue:44112", "accept": false, - "reason": "Different code paths: OmDet-Turbo processor input sizing vs LwDetrImageLoss AMP/CUDA crash." + "reason": "Unrelated code paths: DAC latent reconstruction mismatch vs a GraniteSpeech CI device-override test." }, { "left": "issue:44857", "right": "issue:45593", "accept": false, - "reason": "One is an AMP/CUDA crash, the other is missing auxiliary losses when denoising is off; not the same bug." + "reason": "Both touch detection training, but one is an AMP/CUDA loss crash and the other is missing auxiliary losses when denoising is off." }, { "left": "issue:43278", "right": "issue:43825", "accept": false, - "reason": "Different symptoms and components: dtype mismatch during evaluate vs a pipeline error message about unsupported translation tasks." + "reason": "Different problems: dtype drift in evaluation versus a pipeline error-message regression about translation support." }, { - "left": "issue:45200", - "right": "issue:45588", + "left": "issue:43262", + "right": "issue:43450", "accept": false, - "reason": "Different issues in multimodal/flash-attention handling; no shared concrete failing code path." + "reason": "Audio chat-template sampling-rate handling is not the same bug as batched video processor shape handling." }, { - "left": "issue:43262", - "right": "issue:43450", + "left": "issue:45200", + "right": "issue:45588", "accept": false, - "reason": "Audio chat-template sampling-rate default vs batched video processor output shapes are unrelated." + "reason": "Different models and failures: Gemma 4 token-type defaults vs flash-attention crash on sink-less models." }, { "left": "issue:43606", "right": "issue:45083", "accept": false, - "reason": "CPU offload device mismatch in bark-small is not the same as the qwen3_omni_moe feature-length helper behavior." + "reason": "CPU offload device mismatch in Bark is unrelated to the qwen3_omni_moe feature-length helper behavior." }, { "left": "issue:45310", "right": "issue:45478", "accept": true, - "reason": "Same underlying bug: both report `transformers>=5.4.0` Qwen3.5 MoE `from_pretrained` loading failure with essentially identical title/body." + "reason": "Same titled Qwen3.5 MoE `from_pretrained` error with the same transformers>=5.4.0 regression." }, { "left": "issue:44448", "right": "issue:44743", "accept": false, - "reason": "Different regressions: Pegasus v4/v5 output drift vs recurrent-state reset in modular Qwen3.5 code with cache." + "reason": "Pegasus v4/v5 output drift and qwen3_5 recurrent-state reset are distinct correctness issues." }, { "left": "issue:29942", "right": "issue:32090", "accept": false, - "reason": "Flash Attention 2 test failures and a Trainer `_gpu_broadcast_one` NoneType error are unrelated." + "reason": "Flash Attention 2 test failures and a Trainer `_gpu_broadcast_one` NoneType error are different bugs." }, { "left": "issue:37428", "right": "issue:44351", "accept": false, - "reason": "Both are import errors, but for different symbols (`_flash_supports_window_size` vs `HybridCache`) and likely different fixes." + "reason": "Both are import errors, but they involve different missing symbols and different root causes." }, { "left": "issue:43335", "right": "issue:43550", "accept": false, - "reason": "SwitchTransformers sparse-layer configuration bug is unrelated to Bamba-9B-v2 torch.compile/SDPA failure." + "reason": "SwitchTransformers sparse-layer construction and Bamba torch.compile/SDPA failure are unrelated code-paths." }, { "left": "issue:39692", "right": "issue:43526", "accept": false, - "reason": "SigLIP2 docs/model mismatch and BeitImageProcessorFast `reduce_labels` returning one label are different bugs." + "reason": "SigLIP2 docs/quantization problems are not the same as BeitImageProcessorFast `reduce_labels` returning only one label." }, { "left": "issue:43611", "right": "issue:43931", "accept": false, - "reason": "Base-model-prefix loading breakage and Qwen3-VL weight-shape mismatch are distinct load failures." + "reason": "General v5 `base_model_prefix` loading breakage is different from Qwen3-VL weight-shape mismatch." }, { "left": "issue:43819", "right": "issue:44493", "accept": false, - "reason": "DAC forward/latent mismatch is unrelated to the generic `UNEXPECTED Key with position id` issue." + "reason": "DAC latent reconstruction mismatch and unexpected position-id keys are separate model behaviors." }, { "left": "issue:45200", "right": "issue:45381", "accept": false, - "reason": "Different multimodal bugs: missing default `mm_token_type_ids` vs wrong `vision_position_ids` for Qwen2.5-VL video input." + "reason": "Both are Gemma/Qwen-VL-related, but one is `mm_token_type_ids` defaults and the other is video `vision_position_ids`." }, { "left": "issue:43335", "right": "issue:43653", "accept": false, - "reason": "SwitchTransformers config bug and BigBirdTokenizer mask-token registration are unrelated." + "reason": "Sparse-layer config bug and BigBird mask-token registration are different tokenizer/model issues." }, { "left": "issue:29127", "right": "issue:32090", "accept": false, - "reason": "LayoutLMv3 error-message clarity issue is not the same as the Trainer `_gpu_broadcast_one` NoneType crash." + "reason": "LayoutLMv3 box-content error messaging is unrelated to the Trainer GPU broadcast NoneType failure." }, { "left": "issue:44292", "right": "issue:45520", "accept": false, - "reason": "Qwen-3-8B-NVFP4 runtime error and Python 3.13 `flash_attn` import KeyError are different failures." + "reason": "Qwen-3-8B-NVFP4 runtime failure and Python 3.13 `flash_attn` import lookup are different problems." }, { - "left": "issue:43335", - "right": "issue:45083", + "left": "issue:44448", + "right": "issue:44779", "accept": false, - "reason": "Sparse-layer config bug in SwitchTransformers is unrelated to the qwen3_omni_moe feature-extraction helper behavior." + "reason": "Pegasus output differences and Deepseek tokenizer regressions are separate model/tokenizer issues." }, { - "left": "issue:44448", - "right": "issue:44779", + "left": "issue:43335", + "right": "issue:45083", "accept": false, - "reason": "Pegasus output drift between v4/v5 is unrelated to DeepSeek tokenizer changes in v5." + "reason": "SwitchTransformers sparse-layer config and qwen3_omni_moe feature-length helper behavior do not share the same bug." }, { "left": "issue:44855", "right": "issue:45083", "accept": false, - "reason": "Python 3.13 `IndentationError` from `@torch.jit.script` parsing is a different problem than qwen3_omni_moe feature-length behavior." + "reason": "Python 3.13 `@torch.jit.script` parsing failure is unrelated to the qwen3_omni_moe helper function." }, { "left": "issue:39692", "right": "issue:44493", "accept": false, - "reason": "SigLIP2 documentation errors do not match the generic position-id key warning issue." + "reason": "SigLIP2 docs/quantization errors are not the same as widespread unexpected position-id key warnings." }, { "left": "issue:43335", "right": "issue:43504", "accept": false, - "reason": "SwitchTransformers sparse-layer creation bug is unrelated to BEiT pretrained preset loading with a legacy field." - }, - { - "left": "issue:38175", - "right": "issue:44493", - "accept": false, - "reason": "Zero probabilities in SigLIP2 and unexpected position-id keys are different symptoms with different roots." + "reason": "Sparse-layer creation and BEiT semantic-segmentation legacy-field loading are different bugs." }, { "left": "issue:43550", "right": "issue:44112", "accept": false, - "reason": "Bamba torch.compile/SDPA issue is unrelated to GraniteSpeech CI stale device override behavior." + "reason": "Bamba torch.compile/SDPA failure and GraniteSpeech stale CI device override are unrelated." }, { - "left": "issue:44568", - "right": "issue:45491", + "left": "issue:38175", + "right": "issue:44493", "accept": false, - "reason": "Tokenizer special-token behavior and Gemma3 NaN embeddings under mixed-length batching are different bugs." + "reason": "Unexpected zero probabilities in SigLIP2 and unexpected position-id keys are different symptoms with different causes." }, { "left": "issue:43723", "right": "issue:44554", "accept": false, - "reason": "Tokenizer loading in v5 and MPS attention correctness with mismatched head dims are unrelated." + "reason": "Tokenizer loading in v5 is unrelated to an MPS attention correctness issue." }, { "left": "issue:44977", "right": "issue:45229", "accept": false, - "reason": "Qwen3.5 flash-attention generation issue and Gemma4 multi-GPU OOM are different failure modes." + "reason": "Qwen3.5 flash-attention generation issues and Gemma4 multi-GPU CUDA OOM are not the same failure." }, { - "left": "issue:40444", - "right": "issue:43526", + "left": "issue:44568", + "right": "issue:45491", "accept": false, - "reason": "Qwen2.5-VL iterable-dataset multi-image finetuning failure is unrelated to BeitImageProcessorFast `reduce_labels`." + "reason": "mDeBERTa special-token handling and Gemma3 NaN embeddings from sliding-window attention are different bugs." }, { "left": "issue:43335", "right": "issue:45072", "accept": false, - "reason": "SwitchTransformers sparse-layer bug is not the same as the bfloat16 dtype mismatch in SwitchTransformers/TimmWrapperModel inference." + "reason": "SwitchTransformers sparse-layer config and dtype mismatches in bfloat16 inference are unrelated." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Qwen2.5-VL iterable-dataset multi-image finetuning failure is not the same as BeitImageProcessorFast label reduction." }, { "left": "issue:43335", "right": "issue:45593", "accept": false, - "reason": "SwitchTransformers config bug and D-FINE auxiliary-loss behavior are unrelated." + "reason": "SwitchTransformers sparse-layer creation and D-FINE auxiliary-loss suppression are distinct issues." }, { "left": "issue:43720", "right": "issue:44112", "accept": false, - "reason": "BitNet packed-weight unpacking during accelerate loading is not the same as the GraniteSpeech CI device override failure." + "reason": "BitNet packed-weight unpacking during accelerate loading is unrelated to the GraniteSpeech CI device-override failure." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Pegasus v4/v5 output drift and Qwen3.5 flash-attention generation are different model-specific regressions." } ] }, "evaluator_result": { "accept": true, - "feedback": "The analysis is well grounded in the packet and stays conservative. The only clearly supported duplicate pair is issue:45310/45478, and the summary correctly treats the rest as heterogeneous rather than trying to force cluster-wide canonicality. The soft-edge verdicts are appropriately cautious and do not overstate evidence." + "feedback": "The canonical match is well grounded: `issue:45310` and `issue:45478` have the same title, same version context, and `45310` is earlier. The soft-edge verdicts are generally conservative and supported by the packet metadata, with reasons that mostly rely on clearly different titles/codes paths rather than assuming deeper evidence. I don\u2019t see an overstatement that would warrant rejection." }, "evaluator_used": true, "retried": false, @@ -7389,7 +7453,6 @@ ], "soft_pairs": [ "issue:43066|issue:44462", - "issue:44448|issue:44977", "issue:34689|issue:35141", "issue:44977|issue:45563", "issue:43927|issue:45698", @@ -7403,238 +7466,238 @@ "issue:43412|issue:45588", "issue:43550|issue:45083", "issue:43873|issue:44493", - "issue:43873|issue:45229", "issue:29942|issue:35141", + "issue:43873|issue:45229", "issue:43278|issue:43329", "issue:45372|issue:45459", "issue:45372|issue:45406", "issue:32090|issue:33357", "issue:41720|issue:45229", "issue:43065|issue:43994", - "issue:43065|issue:45412", "issue:44448|issue:44466", + "issue:43065|issue:45412", "issue:44451|issue:45520", "issue:25251|issue:32090", "issue:43653|issue:44743", - "issue:43421|issue:45491", "issue:44857|issue:45588", + "issue:43421|issue:45491", "issue:44186|issue:45198", "issue:43994|issue:45412" ], - "prepared_review_unit_hash": "4df963f96764a8402df8c6e6deba2fcd9b56703c392328e8ba276a92cc3278ca", + "prepared_review_unit_hash": "1f53ae72546a6296445f14374aab2a46e7f8f765b42b2089ba9c98d72d20e5fa", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, - "soft_pair_count": 32, - "serialized_chars": 36809, - "estimated_input_tokens": 9203, - "estimated_eval_tokens": 18662 + "soft_pair_count": 31, + "serialized_chars": 36509, + "estimated_input_tokens": 9128, + "estimated_eval_tokens": 18512 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is heterogeneous: the items span unrelated bugs in tokenization, model loading, generation, offloading, dtype handling, and export. No soft pair looks like the same underlying issue, so all candidate duplicate links should be rejected.", - "confidence": 0.98, - "canonical_issue_reason": "No canonical issue stands out because these reports are not one duplicate family; they cover different subsystems and failure modes, so consolidating them into a single issue would be misleading.", + "summary": "The set is heterogeneous and does not form a real duplicate cluster. The soft edges are mostly broad thematic similarities (tokenizers, flash-attn, save/load, dtype/offload), but they do not look like the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No single issue is a good global representative for this cluster; the most similar titles still describe different bugs, not one shared code-path regression.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43066", "right": "issue:44462", "accept": false, - "reason": "Different tokenizer bugs: decoder type handling in v5 vs ignoring tokenizer.json on load." - }, - { - "left": "issue:44448", - "right": "issue:44977", - "accept": false, - "reason": "Different models and symptoms: Pegasus output drift vs Qwen3.5 flash-attention generation failure." + "reason": "Both touch tokenizer loading, but one is a v5 decoder-type issue and the other is AutoTokenizer ignoring tokenizer.json; different root causes." }, { "left": "issue:34689", "right": "issue:35141", "accept": false, - "reason": "One is a model-loading regression for Llama 3.2 Vision; the other is post_init reinitializing resized embeddings." + "reason": "Different failures: model loading for Llama 3.2 vision vs output embedding reinit after resize in post_init." }, { "left": "issue:44977", "right": "issue:45563", "accept": false, - "reason": "Flash-attention generation bug vs a stale warning in paged generate(); not the same code path." + "reason": "Generation with flash-attention vs a stale paged-generate warning; not the same bug." }, { "left": "issue:43927", "right": "issue:45698", "accept": false, - "reason": "Both involve save/load behavior, but one is custom token IDs and the other is loading the wrong custom module." + "reason": "Both involve save/load behavior, but one loses custom token IDs and the other loads the wrong custom module." }, { "left": "issue:45372", "right": "issue:45588", "accept": false, - "reason": "Gemma4 processor import failure vs flash_attention.py crashing on s_aux=None; unrelated modules." + "reason": "Different subsystems and symptoms: Gemma 4 processor import/loading vs flash_attention.py crashing on s_aux=None." }, { "left": "issue:37428", "right": "issue:44987", "accept": false, - "reason": "Missing flash-support helper import vs a specific model load failure for physical-intelligence/fast." + "reason": "Shared flash-attn context, but one is a missing internal import and the other is a model load failure." }, { "left": "issue:43606", "right": "issue:44857", "accept": false, - "reason": "CPU offload device mismatch is unrelated to the float16 AMP CUDA crash in LwDetrImageLoss." + "reason": "Both are device/dtype crashes, but they affect different models and code paths." }, { "left": "issue:44857", "right": "issue:45072", "accept": false, - "reason": "Both mention dtype issues, but they affect different models and different failure modes." + "reason": "Both mention dtype mismatches, but the affected models and failure mechanics are unrelated." }, { "left": "issue:43701", "right": "issue:45083", "accept": false, - "reason": "Checkpoint key mismatch is unrelated to the qwen3_omni_moe helper-function bug." + "reason": "Checkpoint key mismatch is unrelated to the qwen3_omni_moe feature-extract length helper bug." }, { "left": "issue:43976", "right": "issue:45440", "accept": false, - "reason": "Python version compatibility regression vs native DeepseekV3MoE divergence; different problem classes." + "reason": "Environment compatibility issue vs model implementation divergence; not the same underlying defect." }, { "left": "issue:45245", "right": "issue:45563", "accept": false, - "reason": "Category-count limit error is not the same as the stale num_return_sequences warning." + "reason": "Unrelated bugs: category-count runtime limit vs a stale generate warning." }, { "left": "issue:43412", "right": "issue:45588", "accept": false, - "reason": "Export/tracing guard and erfinv support issue vs an attention-path AttributeError; unrelated." + "reason": "Export-time guard/erfinv problem is unrelated to the sink-less flash_attention AttributeError." }, { "left": "issue:43550", "right": "issue:45083", "accept": false, - "reason": "torch.compile/SDPA failure in Bamba is unrelated to qwen3_omni_moe length computation." + "reason": "Different code paths: torch.compile+SDPA on Bamba vs a helper-function behavior issue." }, { "left": "issue:43873", "right": "issue:44493", "accept": false, - "reason": "Quantization/offloading behavior is a different bug from unexpected position-id keys." + "reason": "Quantization/offload behavior and unexpected position-id keys are different bugs." }, { - "left": "issue:43873", - "right": "issue:45229", + "left": "issue:29942", + "right": "issue:35141", "accept": false, - "reason": "Offloading/quantization issue vs Gemma4 multi-GPU OOM; not the same defect." + "reason": "FlashAttention test failures and embedding resize reinitialization are unrelated regressions." }, { - "left": "issue:29942", - "right": "issue:35141", + "left": "issue:43873", + "right": "issue:45229", "accept": false, - "reason": "Flash Attention 2 test failures are unrelated to embedding reinitialization after resize." + "reason": "Both are memory-related at a high level, but offloading/quantization and multi-GPU OOM are distinct problems." }, { "left": "issue:43278", "right": "issue:43329", "accept": false, - "reason": "Embedding dtype drift in eval vs undefined variables in video multimodal token counting." + "reason": "Training/evaluate dtype drift is unrelated to undefined video-token helper variables." }, { "left": "issue:45372", "right": "issue:45459", "accept": false, - "reason": "Gemma4 processor import breakage is unrelated to tokenizer error masking when protobuf is absent." + "reason": "Both are tokenizer/processor loading adjacent, but one is a missing dependency import and the other is exception masking." }, { "left": "issue:45372", "right": "issue:45406", "accept": false, - "reason": "ImportError for ReasoningEffort is a different failure than Gemma4Processor missing _tokenizer in serve." + "reason": "Both affect Gemma 4 processor loading, but they fail at different points with different causes." }, { "left": "issue:32090", "right": "issue:33357", "accept": false, - "reason": "Trainer _gpu_broadcast_one NoneType error vs MacOS bus error in CLIP loading; different code paths." + "reason": "Trainer distributed broadcast error and MacOS bus error are unrelated." }, { "left": "issue:41720", "right": "issue:45229", "accept": false, - "reason": "Qwen3 auto device-mapping cudaErrorAssert is not the same as Gemma4 multi-GPU OOM." + "reason": "Both are CUDA/multi-GPU failures, but one is a cudaErrorAssert and the other is OOM." }, { "left": "issue:43065", "right": "issue:43994", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder is unrelated to ScandiBERT loading failure." + "reason": "Different model-specific bugs: dummy Conv2d in Sam3PixelDecoder vs nonsensical outputs in Siglip2." }, { - "left": "issue:43065", - "right": "issue:45412", + "left": "issue:44448", + "right": "issue:44466", "accept": false, - "reason": "A placeholder Conv2d bug is unrelated to RT-DETR memory not being released." + "reason": "Both are v4/v5 regressions, but one changes generation output and the other is serialization of lm_head.weight." }, { - "left": "issue:44448", - "right": "issue:44466", + "left": "issue:43065", + "right": "issue:45412", "accept": false, - "reason": "Output drift between v4/v5 Pegasus is not the same as lm_head.weight serialization depending on device." + "reason": "One is a placeholder module issue, the other is a memory leak on RT-DETR; unrelated." }, { "left": "issue:44451", "right": "issue:45520", "accept": false, - "reason": "ScandiBERT loading failure is unrelated to the Python 3.13 flash_attn KeyError." + "reason": "Model load failure for ScandiBERT vs Python 3.13 flash_attn import KeyError are different problems." }, { "left": "issue:25251", "right": "issue:32090", "accept": false, - "reason": "Pipeline top_k nesting issue is unrelated to the Trainer broadcast TypeError." + "reason": "Pipeline output shape regression and a trainer broadcast type error are unrelated." }, { "left": "issue:43653", "right": "issue:44743", "accept": false, - "reason": "BigBirdTokenizer special-token decoding bug is unrelated to qwen3_5 recurrent-state reset behavior." + "reason": "Tokenizer special-token registration bug vs recurrent cache state reset bug; different subsystems." + }, + { + "left": "issue:44857", + "right": "issue:45588", + "accept": false, + "reason": "Both are runtime crashes, but one is AMP/dtype-related and the other is a flash_attention auxiliary-state None crash." }, { "left": "issue:43421", "right": "issue:45491", "accept": false, - "reason": "Runtime post-processor update bug is unrelated to Gemma3 NaN embeddings with sliding-window attention." + "reason": "Special-token post-processor updates and Gemma3 NaN embeddings are unrelated." }, { "left": "issue:44186", "right": "issue:45198", "accept": false, - "reason": "LayoutLMv2 tokenizer padding/truncation crash and Wav2Vec2 save_pretrained/tokenization failure are different bugs." + "reason": "Different tokenizer/model families and different failure modes." }, { "left": "issue:43994", "right": "issue:45412", "accept": false, - "reason": "ScandiBERT loading failure is unrelated to RT-DETR memory-leak behavior." + "reason": "Inference correctness regression and a model memory leak are not the same bug." } ] }, @@ -7717,12 +7780,12 @@ "issue:44871|issue:44977", "issue:43257|issue:43278", "issue:44610|issue:45412", - "issue:45440|issue:45593", "issue:29942|issue:30064", "issue:43824|issue:44279", "issue:45103|issue:45372", - "issue:33357|issue:39692", + "issue:45440|issue:45593", "issue:29127|issue:34567", + "issue:33357|issue:39692", "issue:32090|issue:33290", "issue:43334|issue:43531", "issue:45072|issue:45229", @@ -7738,20 +7801,20 @@ "issue:43232|issue:43388", "issue:43761|issue:44079" ], - "prepared_review_unit_hash": "e41c9e1678e739bca6841a1f292400d3f14262fbbd77952f8a6d385b819a2c5b", + "prepared_review_unit_hash": "c06e97cb6588923f6962a1fbc7af4e35cfee6a9fbb66eccdec22620b83e0bf37", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 29, - "serialized_chars": 36448, + "serialized_chars": 36446, "estimated_input_tokens": 9112, "estimated_eval_tokens": 18480 }, @@ -7760,186 +7823,186 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster looks noisy and mostly heterogeneous: tokenizers, processors, generation, quantization, evaluation, and trainer bugs are all mixed together. None of the soft-edge pairs look like true duplicates, so all soft edges are rejected.", - "confidence": 0.24, - "canonical_issue_reason": "Issue 36010 is the broadest, most core-library regression here (a central import failure in the generation API) and is clearer as a representative issue than the many model-specific reports.", + "summary": "This cluster is highly heterogeneous: the soft pairs span unrelated bugs in tokenizers, generation, training, vision processors, model loading, and docs. I rejected every soft edge because none look like the same underlying defect or fixable in one PR.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue stands out; the items are not duplicates and do not share one underlying bug or code path.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 36010 has the strongest combination of breadth, clarity, and likely downstream impact, making it the best single issue representative of this set.", + "best_issue_reason": "No global best issue either: the cluster is too mixed to nominate one issue as a representative duplicate target.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42915", "right": "issue:42994", "accept": false, - "reason": "Both are quantization-related, but one is an FP8 config failure and the other is quantized-model saving; different bugs." + "reason": "Different problems: FineGrainedFP8Config failure in Qwen3MoE vs quantized model saving." }, { "left": "issue:43278", "right": "issue:43381", "accept": false, - "reason": "BF16/FP32 eval dtype drift and a gradient-checkpointing eval restriction are separate code-path issues." + "reason": "Embedding dtype drift in eval is unrelated to gradient checkpointing being disallowed in eval mode." }, { "left": "issue:43756", "right": "issue:45440", "accept": false, - "reason": "Both concern model parity, but they affect different models and different mechanisms, so not the same bug." + "reason": "Smollm3 RoPE-layer mismatch and DeepseekV3 implementation divergence are different model-specific issues." }, { "left": "issue:44361", "right": "issue:45198", "accept": false, - "reason": "Different tokenizer failures in different models; too broad to be duplicates." + "reason": "MLukeTokenizer task AttributeError and Wav2Vec2 save/tokenization failures affect different tokenization paths." }, { "left": "issue:44206", "right": "issue:44479", "accept": false, - "reason": "Processor center-arg crash and video-input regression are unrelated processor bugs." + "reason": "Unsupported `center` arg in LasrFeatureExtractor is unrelated to Qwen video-input regression." }, { "left": "issue:44265", "right": "issue:45198", "accept": false, - "reason": "torch.export/torch_compilable_check failure is unrelated to save_pretrained/tokenization failure." + "reason": "`torch.export` with `torch_compilable_check` is a different failure mode than Wav2Vec2 save_pretrained/tokenization." }, { "left": "issue:44871", "right": "issue:44977", "accept": false, - "reason": "Eos-token config mismatch and flash-attention generation failure are distinct bugs." + "reason": "Gemma eos_token_id mismatch is unrelated to Qwen3.5 flash-attention generation issues." }, { "left": "issue:43257", "right": "issue:43278", "accept": false, - "reason": "Qwen3 MOE weight conversion and embedding dtype changes are different issues." + "reason": "MoE weight conversion under accelerate+deepspeed is not the same as eval-time embedding dtype changes." }, { "left": "issue:44610", "right": "issue:45412", "accept": false, - "reason": "Input-size mismatch and a memory-leak/deallocation problem are not the same underlying bug." - }, - { - "left": "issue:45440", - "right": "issue:45593", - "accept": false, - "reason": "DeepSeek implementation divergence and missing auxiliary losses are separate model/training issues." + "reason": "Processor input-size mismatch and RT-DETR memory not being released are different bugs." }, { "left": "issue:29942", "right": "issue:30064", "accept": false, - "reason": "Failing Flash Attention tests and void segmentation map processing target different failures." + "reason": "Flash Attention 2 test failures and void segmentation-map processing are unrelated." }, { "left": "issue:43824", "right": "issue:44279", "accept": false, - "reason": "A specific Qwen2.5-VL import error is not the same as a generic dependency issue." + "reason": "Missing `Qwen2_5_VLForConditionalGeneration` import is not the same as a generic dependency issue." }, { "left": "issue:45103", "right": "issue:45372", "accept": false, - "reason": "Auto-docstring annotation handling and a missing ReasoningEffort import are unrelated." + "reason": "Auto-docstring annotation handling and mistral_common ReasoningEffort import failures are unrelated." + }, + { + "left": "issue:45440", + "right": "issue:45593", + "accept": false, + "reason": "DeepseekV3 implementation divergence does not match D-FINE auxiliary-loss handling." }, { - "left": "issue:33357", - "right": "issue:39692", + "left": "issue:29127", + "right": "issue:34567", "accept": false, - "reason": "MacOS bus error loading CLIP and SigLIP2 docs/example errors are different failure modes." + "reason": "LayoutLMv3 error clarity and TrainerState token-count tracking are different concerns." }, { - "left": "issue:29127", - "right": "issue:34567", + "left": "issue:33357", + "right": "issue:39692", "accept": false, - "reason": "LayoutLMv3 error messaging and TrainerState token counting are unrelated." + "reason": "MacOS CLIP bus error and SigLIP2 documentation mistakes are not the same bug." }, { "left": "issue:32090", "right": "issue:33290", "accept": false, - "reason": "NoneType broadcast errors and deepspeed OOM with Adafactor are different trainer/optimizer problems." + "reason": "`_gpu_broadcast_one` NoneType error in Trainer is unrelated to Adafactor OOM in deepspeed." }, { "left": "issue:43334", "right": "issue:43531", "accept": false, - "reason": "Missing pad_token_id on Qwen3-VL and sliding-window behavior in Qwen3-MoE are separate model bugs." + "reason": "Qwen3-VL pad_token_id load error and Qwen3-MoE sliding_window behavior are separate model issues." }, { "left": "issue:45072", "right": "issue:45229", "accept": false, - "reason": "bfloat16 dtype mismatches and multi-GPU CUDA OOM are not the same underlying issue." + "reason": "dtype mismatches in inference are unrelated to Gemma4 multi-GPU OOM." }, { "left": "issue:43526", "right": "issue:44186", "accept": false, - "reason": "BeitImageProcessorFast label reduction and LayoutLMv2Tokenizer NER/padding crashes are unrelated." + "reason": "BeitImageProcessorFast label reduction bug and LayoutLMv2Tokenizer padding/truncation crash differ in scope and code path." }, { "left": "issue:43653", "right": "issue:44442", "accept": false, - "reason": "BigBirdTokenizer special-token registration and AutoTokenizer loading FastSpeech2ConformerTokenizer are different tokenizer bugs." + "reason": "BigBirdTokenizer special-token registration and AutoTokenizer loading FastSpeech2ConformerTokenizer are distinct tokenizer bugs." }, { "left": "issue:43381", "right": "issue:43701", "accept": false, - "reason": "Eval-mode checkpointing limitation and resume_from_checkpoint key mismatch do not share a concrete code path." + "reason": "Gradient checkpointing in eval mode and resume_from_checkpoint key mismatch are unrelated." }, { "left": "issue:44464", "right": "issue:45491", "accept": false, - "reason": "Compiled-forward chunked generation inconsistency and Gemma3 NaN embeddings are distinct generation/attention issues." + "reason": "Compiled-forward chunked generation inconsistency is not the same as Gemma3 NaN embeddings from sliding-window padding." }, { "left": "issue:44977", "right": "issue:45245", "accept": false, - "reason": "Flash-attention generation breakage and a category-count runtime limit are unrelated." + "reason": "Qwen3.5 flash-attention generation and category-count overflow are different failures." }, { "left": "issue:29942", "right": "issue:36010", "accept": false, - "reason": "Flash Attention test failures and GenerationMixin import failure are different regressions." + "reason": "Flash Attention 2 CI failures are unrelated to the GenerationMixin import change." }, { "left": "issue:29127", "right": "issue:33357", "accept": false, - "reason": "LayoutLMv3 error clarity and CLIP bus error on macOS are not duplicates." + "reason": "LayoutLMv3 error-message clarity and CLIP MacOS bus error do not share a code path." }, { "left": "issue:42915", "right": "issue:43931", "accept": false, - "reason": "FP8 config failure in Qwen3MoE and Qwen3-VL weight-shape mismatch are separate model-loading bugs." + "reason": "Qwen3MoE FP8 config and Qwen3-VL weight-shape mismatch are different model-loading bugs." }, { "left": "issue:29127", "right": "issue:35141", "accept": false, - "reason": "LayoutLMv3 error messaging and embedding reinitialization after resize are unrelated." + "reason": "Improving a LayoutLMv3 error message is unrelated to embedding reinitialization after resize." }, { "left": "issue:43232", "right": "issue:43388", "accept": false, - "reason": "Generation kwargs update after sync_gpus and metric-gather label dropping are different trainer internals." + "reason": "Generation kwargs sync issue and metric-gathering label truncation are separate trainer paths." }, { "left": "issue:43761", "right": "issue:44079", "accept": false, - "reason": "CLIPVisionModel hidden_states handling and ModelOutput key assignment are separate issues." + "reason": "CLIPVisionModel hidden_states regression and ModelOutput key assignment are different layers of the stack." } ] }, @@ -8023,228 +8086,221 @@ "issue:44743|issue:44811", "issue:43452|issue:45588", "issue:43316|issue:43525", - "issue:43761|issue:44188", - "issue:44479|issue:45290", "issue:43646|issue:44077", + "issue:44479|issue:45290", + "issue:43761|issue:44188", "issue:45588|issue:45600", "issue:44977|issue:45584", - "issue:43931|issue:45216", "issue:43575|issue:43701", + "issue:43931|issue:45216", "issue:42907|issue:45698", "issue:39692|issue:45491", - "issue:43994|issue:44206", "issue:43575|issue:44387", + "issue:43994|issue:44206", "issue:43873|issue:45588", "issue:34689|issue:43611", "issue:43673|issue:44743", "issue:44442|issue:44898", "issue:33290|issue:34567", "issue:43493|issue:43827", - "issue:41628|issue:45372", - "issue:44077|issue:44479" + "issue:41628|issue:45372" ], - "prepared_review_unit_hash": "2ad312bdab7ffaa700a8c2c08312469345e5b334135c687cf615c8118e53d7ba", + "prepared_review_unit_hash": "f43e9c893ed6354abeb45208208a10adb6e8b0d7ccb0a72bc64646abf945ae3b", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, - "soft_pair_count": 29, - "serialized_chars": 36464, - "estimated_input_tokens": 9116, - "estimated_eval_tokens": 18488 + "soft_pair_count": 28, + "serialized_chars": 36167, + "estimated_input_tokens": 9042, + "estimated_eval_tokens": 18340 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a noisy similarity cluster, not a duplicate set: every soft pair describes a different bug, regression, or docs issue, so all soft edges should be rejected.", - "confidence": 0.91, - "canonical_issue_reason": "No canonical issue stands out; the items span unrelated bugs across audio, vision, tokenizer loading, quantization, checkpointing, and model internals, so they should not be collapsed into one duplicate set.", + "summary": "This cluster is highly heterogeneous: it mixes unrelated Transformers bugs around tokenizers, vision/audio processors, model loading, flash-attention, quantization, and save/load regressions. None of the soft pairs look like the same underlying defect, so no duplicates are accepted.", + "confidence": 0.92, + "canonical_issue_reason": "No true duplicate canonical exists for the whole cluster; if one issue must represent it, issue 43873 is the broadest and most active recent regression report, but it is not a duplicate of the others.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45584 is the strongest standalone report: it is open, concrete, and describes a specific Whisper generation regression after `align_special_tokens` with a clear failure mode.", + "best_issue_reason": "Issue 43873 is the best overall representative because it is open, broad, and has the most cluster-like activity/inbound references; it is still only a representative, not a merge point for the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43262", "right": "issue:44206", "accept": false, - "reason": "Different subsystems and failures: audio chat-template sampling-rate default vs a feature-extractor `center`-argument crash." + "reason": "Audio chat-template sampling-rate bug vs. LasrFeatureExtractor center-arg crash; different components and failure modes." }, { "left": "issue:43441", "right": "issue:45588", "accept": false, - "reason": "Both mention flash attention, but they are different bugs in different code paths/models: Ministral-3 FA failure vs `s_aux=None` AttributeError in flash_attention integration." + "reason": "Both mention flash attention, but one is a Ministral/Transformers v5 RC generation bug and the other is an AttributeError on sink-less models; not the same code path." }, { "left": "issue:43526", "right": "issue:44857", "accept": false, - "reason": "Unrelated bugs: `reduce_labels` returning one label in BEiT fast processor vs LwDetrImageLoss crashing under float16 AMP on CUDA." + "reason": "BeitImageProcessorFast label reduction bug vs. LwDetrImageLoss AMP crash; unrelated processor vs. loss issues." }, { "left": "issue:45003", "right": "issue:45440", "accept": false, - "reason": "These are different model/framework issues: unsafe `sys.modules` access in modeling_utils vs DeepSeekV3MoE divergence from the remote implementation." + "reason": "sys.modules access in modeling_utils is unrelated to DeepseekV3MoE divergence from remote implementation." }, { "left": "issue:43723", "right": "issue:45479", "accept": false, - "reason": "Tokenizer loading regression vs a sequence-classification loss bug; no shared code-path." + "reason": "Tokenizer loading regression vs. zero-loss sequence-classification bug; different APIs and root causes." }, { "left": "issue:44442", "right": "issue:45072", "accept": false, - "reason": "FastSpeech2Conformer tokenizer loading failure vs bfloat16 dtype mismatches in inference; different problems." + "reason": "FastSpeech2ConformerTokenizer loading failure vs. bfloat16 dtype mismatch in model inference; not the same bug." }, { "left": "issue:43526", "right": "issue:44610", "accept": false, - "reason": "BEiT label reduction bug and OmDet-Turbo processor image-size mismatch are unrelated." + "reason": "Both are processor-related, but one is reduce_labels returning a single label and the other is an image size mismatch in OmDet-Turbo." }, { "left": "issue:44743", "right": "issue:44811", "accept": false, - "reason": "Recurrent state reset with cache vs Whisper `batch_decode()` ignoring `skip_special_tokens`; distinct behaviors." + "reason": "Qwen3.5 recurrent-state reset in cached generation is unrelated to Whisper batch_decode skipping special tokens." }, { "left": "issue:43452", "right": "issue:45588", "accept": false, - "reason": "GGUF tokenizer/model loading breakage is not the same as the flash-attention `s_aux` AttributeError." + "reason": "gguf_file breaks tokenizer/model loading, whereas this is a flash-attention AttributeError on s_aux=None; different subsystems." }, { "left": "issue:43316", "right": "issue:43525", "accept": false, - "reason": "API discrepancy in `Gemma3TextConfig` vs missing `pad_token_id` on `Llama4Config`; different config regressions." + "reason": "Two API discrepancies, but for different configs and missing fields; not one shared bug." }, { - "left": "issue:43761", - "right": "issue:44188", + "left": "issue:43646", + "right": "issue:44077", "accept": false, - "reason": "CLIPVisionModel hidden-states regression and torch.compile attention-kernel divergence are separate bugs." + "reason": "General custom model init regression vs. patchtsmixer post_init API restriction; similar theme, different concrete issue." }, { "left": "issue:44479", "right": "issue:45290", "accept": false, - "reason": "Qwen video-input regression vs chat-template tool-call crash; different input pipelines and failure modes." + "reason": "Video-input regression for Qwen models vs. apply_chat_template crash on tool-call assistant messages; unrelated code paths." }, { - "left": "issue:43646", - "right": "issue:44077", + "left": "issue:43761", + "right": "issue:44188", "accept": false, - "reason": "General custom model initialization regression is not the same as forbidding optional `post_init` in patchtsmixer." + "reason": "CLIPVisionModel hidden_states regression vs. torch.compile attention-kernel divergence; distinct defects." }, { "left": "issue:45588", "right": "issue:45600", "accept": false, - "reason": "Flash-attention `AttributeError` and stale `auto_mappings.py` references are unrelated CI/runtime issues." + "reason": "Flash-attention crash on sink-less models vs. auto_mappings.py referencing removed configs; different failure causes." }, { "left": "issue:44977", "right": "issue:45584", "accept": false, - "reason": "Qwen3.5 flash-attention generation bug vs Whisper empty-transcription failure; different models and code paths." + "reason": "Qwen3.5 flash-attention generation bug vs. Whisper empty-transcription generation failure after align_special_tokens; not the same model or path." }, { - "left": "issue:43931", - "right": "issue:45216", + "left": "issue:43575", + "right": "issue:43701", "accept": false, - "reason": "Model weight-shape mismatch on load vs incorrect checkpoint saving in Qwen3.5; related theme, but not the same underlying bug." + "reason": "Tensor-parallel load OOM vs. resume_from_checkpoint key mismatch; unrelated training/loading problems." }, { - "left": "issue:43575", - "right": "issue:43701", + "left": "issue:43931", + "right": "issue:45216", "accept": false, - "reason": "TP load OOM and resume-from-checkpoint key mismatch are distinct training-time issues." + "reason": "Qwen3-VL shape mismatch on load vs. Qwen3.5 save_pretrained checkpoint corruption; load-time vs save-time issues." }, { "left": "issue:42907", "right": "issue:45698", "accept": false, - "reason": "Saving dequantized Ministral/Devstral models is not the same as loading the wrong custom module after `save_pretrained`." + "reason": "Dequantized save failure vs. wrong custom module loaded after save_pretrained; both involve persistence, but not the same bug." }, { "left": "issue:39692", "right": "issue:45491", "accept": false, - "reason": "SigLIP2 docs-example errors and Gemma3 NaN embeddings are unrelated model bugs." + "reason": "SigLIP2 docs-example errors vs. Gemma3 NaN embeddings with mixed-length batching; unrelated models and symptoms." }, { - "left": "issue:43994", - "right": "issue:44206", + "left": "issue:43575", + "right": "issue:44387", "accept": false, - "reason": "SigLIP2 nonsensical outputs vs a feature extractor crash from unsupported `center`; different layers of the stack." + "reason": "Both end in OOM, but one is tensor-parallel loading and the other is increased reserved memory under int4 quantization; different root causes." }, { - "left": "issue:43575", - "right": "issue:44387", + "left": "issue:43994", + "right": "issue:44206", "accept": false, - "reason": "Both are OOMs, but one is TP model loading and the other is int4 quantization reserved-memory growth; not the same concrete bug." + "reason": "SigLIP2 nonsensical outputs vs. LasrFeatureExtractor crash on unsupported center arg; not the same underlying defect." }, { "left": "issue:43873", "right": "issue:45588", "accept": false, - "reason": "Quantization/offloading behavior and a flash-attention AttributeError are different problems." + "reason": "Quantization/offloading behavior issue vs. flash-attention AttributeError; no concrete shared code path." }, { "left": "issue:34689", "right": "issue:43611", "accept": false, - "reason": "Llama 3.2 vision loading breakage vs base_model_prefix handling regression; too different to merge." + "reason": "Both are model-loading regressions, but one is a Llama 3.2 vision model breakage and the other is a base_model_prefix-related v5 loading regression." }, { "left": "issue:43673", "right": "issue:44743", "accept": false, - "reason": "Both involve cache/state, but one is missing GenerationMixin cache in v5 chunked_prefill and the other is recurrent-state reset; separate bugs." + "reason": "Missing GenerationMixin cache during chunked_prefill vs. recurrent state reset in modular_qwen3_5; distinct generation bugs." }, { "left": "issue:44442", "right": "issue:44898", "accept": false, - "reason": "Tokenizer loading failure vs Perceiver non-default resolution failure; no shared underlying defect." + "reason": "Tokenizer loading failure vs. Perceiver non-default-resolution image-classification failure; different model families and APIs." }, { "left": "issue:33290", "right": "issue:34567", "accept": false, - "reason": "AdaFactor/DeepSpeed OOM is unrelated to `num_input_tokens_seen` not updating." + "reason": "DeepSpeed adafactor OOM vs. TrainerState num_input_tokens_seen not updating; unrelated training subsystems." }, { "left": "issue:43493", "right": "issue:43827", "accept": false, - "reason": "SigLIP2 HF-vs-JAX implementation discrepancy is unrelated to docs still referencing `pipeline()`." + "reason": "SigLIP2 implementation mismatch vs. docs still referencing pipeline() after removals; model behavior vs documentation." }, { "left": "issue:41628", "right": "issue:45372", "accept": false, - "reason": "Missing `AutoImageProcessor` import and `ReasoningEffort` import failure in Gemma 4 processor loading are different import regressions." - }, - { - "left": "issue:44077", - "right": "issue:44479", - "accept": false, - "reason": "`post_init` policy for patchtsmixer has nothing to do with Qwen video-input regression." + "reason": "AutoImageProcessor import failure vs. ReasoningEffort import failure breaking Gemma 4 processor loading; different import paths and packages." } ] }, @@ -8298,6 +8354,7 @@ "issue:43856", "issue:43873", "issue:43931", + "issue:44077", "issue:44162", "issue:44190", "issue:44442", @@ -8318,6 +8375,7 @@ ], "soft_pairs": [ "issue:43381|issue:43873", + "issue:44077|issue:44479", "issue:33290|issue:34689", "issue:33290|issue:35141", "issue:43643|issue:44913", @@ -8337,211 +8395,217 @@ "issue:41950|issue:43475", "issue:43316|issue:43716", "issue:43388|issue:43688", - "issue:16998|issue:22355", "issue:43381|issue:43742", + "issue:16998|issue:22355", "issue:25251|issue:28282", "issue:43295|issue:43825", - "issue:43761|issue:44479", "issue:43065|issue:44617", + "issue:43761|issue:44479", "issue:44871|issue:45584", "issue:45127|issue:45200" ], - "prepared_review_unit_hash": "ec5d8858a0c1fde3cd0c7dea8af68b64fa03749b52702516ef51a2b44399736a", + "prepared_review_unit_hash": "d2b7d1500c9632866b5191fac037f301dde312f975b52aaa9f05dcafa5bde905", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { - "node_count": 47, - "item_count": 47, - "soft_pair_count": 28, - "serialized_chars": 35447, - "estimated_input_tokens": 8862, - "estimated_eval_tokens": 17980 + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36315, + "estimated_input_tokens": 9079, + "estimated_eval_tokens": 18414 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous: the items span unrelated bugs across model loading, configs, pipelines, distributed training, tokenizer behavior, and model-specific regressions. None of the soft pairs look like true duplicates, so all candidate merges should be rejected.", - "confidence": 0.97, - "canonical_issue_reason": "No single canonical issue: the cluster is a grab-bag of unrelated regressions and feature/behavior bugs, so nothing cleanly represents the whole set.", + "summary": "This cluster is heterogeneous: the soft pairs mostly share only broad Transformers subsystems or model families, not the same concrete bug/change. I would not merge any of the soft edges.", + "confidence": 0.9, + "canonical_issue_reason": "issue:43749 is the broadest, most high-traffic regression in the set (many comments and inbound refs) and is the best anchor if a single issue must represent the cluster, though the items are not true duplicates.", "canonical_pr_reason": null, - "best_issue_reason": "No issue cleanly subsumes the others; the closest candidates are still model- or subsystem-specific and not representative of the cluster as a whole.", + "best_issue_reason": "issue:43749 has the strongest signs of being the most impactful and widely discussed issue in this list, so it is the best general issue candidate.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43381", "right": "issue:43873", "accept": false, - "reason": "Gradient checkpointing in eval mode and offloading with quantization are different training-path bugs." + "reason": "Different bugs: eval-mode gradient checkpointing vs quantization/offloading behavior." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "Different model/tooling problems; no shared code-path or symptom." }, { "left": "issue:33290", "right": "issue:34689", "accept": false, - "reason": "AdaFactor/deepspeed OOM is unrelated to the Llama 3.2 Vision model-loading regression." + "reason": "Deepspeed Adafactor OOM is unrelated to Llama 3.2 model-loading regression." }, { "left": "issue:33290", "right": "issue:35141", "accept": false, - "reason": "Optimizer OOM and embedding reinitialization after resize are distinct bugs in different code paths." + "reason": "Training OOM vs embedding reinit bug; unrelated failure modes." }, { "left": "issue:43643", "right": "issue:44913", "accept": false, - "reason": "Remote-code config field loss and GPTNeoX rotary_pct reload mismatch are separate config-handling issues." + "reason": "Remote-code config field loss is unrelated to GPTNeoX rotary_pct reload behavior." }, { "left": "issue:44190", "right": "issue:44496", "accept": false, - "reason": "Local dataset loading failure in an example script is unrelated to an unrecognized model/config error." + "reason": "Dataset-loading script issue vs model auto-detection/config error; different surfaces." }, { "left": "issue:43749", "right": "issue:43931", "accept": false, - "reason": "FSDP CPU RAM efficient loading and Qwen3-VL weight-shape mismatch are different loading failures." + "reason": "Different loading regressions: FSDP CPU RAM efficient loading vs shape-mismatch on Qwen3-VL." }, { "left": "issue:43720", "right": "issue:44442", "accept": false, - "reason": "BitNet packed-weight loading and FastSpeech2Conformer tokenizer loading are unrelated code paths." + "reason": "BitNet accelerate-loading packing bug is unrelated to tokenizer class loading." }, { "left": "issue:43873", "right": "issue:44871", "accept": false, - "reason": "Quantization offloading and Gemma eos_token_id inconsistency are unrelated bugs." + "reason": "Quantization/offloading issue vs Gemma eos_token configuration mismatch." }, { "left": "issue:33290", "right": "issue:45656", "accept": false, - "reason": "Both involve deepspeed/optimizer behavior, but OOM from AdaFactor is not the same as double optimizer-step invocation." + "reason": "Both involve deepspeed/optimizer, but the concrete bug is different (OOM vs double optimizer step)." }, { "left": "issue:43316", "right": "issue:44877", "accept": false, - "reason": "Gemma3TextConfig API mismatch and strict granite_speech config loading are different config-schema problems." + "reason": "API discrepancy in Gemma3TextConfig is not the same as strict config rejection for granite_speech." }, { "left": "issue:44162", "right": "issue:44792", "accept": false, - "reason": "ESM2 breakage and a janus image-generation test failure are unrelated model-specific issues." + "reason": "ESM2 model breakage and janus image-generation test failure are unrelated." }, { "left": "issue:34689", "right": "issue:36010", "accept": false, - "reason": "Llama 3.2 Vision loading regression and missing GenerationMixin import are separate import/load failures." + "reason": "Model-loading regression and GenerationMixin import error are different import paths and fixes." }, { "left": "issue:44855", "right": "issue:45137", "accept": false, - "reason": "TorchScript parsing on Python 3.13 and DeepSpeed ZeRO3 deque errors do not share an underlying bug." + "reason": "Python 3.13 TorchScript parse error is unrelated to DeepSpeed ZeRO3 deque underflow." }, { "left": "issue:43540", "right": "issue:44514", "accept": false, - "reason": "Qwen3OmniMoe video-input processing and Qwen2_5_VL batched chat-template padding are different failures." + "reason": "Both are Qwen video-related, but one is video input processing and the other is batched chat-template padding behavior." }, { "left": "issue:43381", "right": "issue:43856", "accept": false, - "reason": "Gradient checkpointing eval-mode behavior and Qwen3 MoE memory usage are not the same bug." + "reason": "Gradient-checkpointing eval-mode bug is unrelated to Qwen3 MoE memory usage." }, { "left": "issue:42175", "right": "issue:43316", "accept": false, - "reason": "Missing TensorFlow in a pip extra and a Gemma3TextConfig API discrepancy are unrelated." + "reason": "Backend packaging dependency issue is unrelated to Gemma3TextConfig API inconsistency." }, { "left": "issue:43404", "right": "issue:43441", "accept": false, - "reason": "Weight tying in Mistral3ForConditionalGeneration and FlashAttention failure in Ministral-3 are different concrete problems." + "reason": "Mistral3 lm_head tying bug and Ministral FlashAttention failure are distinct code paths." }, { "left": "issue:41950", "right": "issue:43475", "accept": false, - "reason": "Video-classification pipeline processor selection and Sam3VisionEncoderOutput missing attribute are unrelated." + "reason": "Video-classification pipeline processor lookup is unrelated to Sam3VisionEncoderOutput missing attribute." }, { "left": "issue:43316", "right": "issue:43716", "accept": false, - "reason": "Gemma3TextConfig API mismatch and Mistral-3 image-preprocessor dtype mismatch are different model-specific bugs." + "reason": "Config API mismatch and image-preprocessor dtype mismatch are different problems." }, { "left": "issue:43388", "right": "issue:43688", "accept": false, - "reason": "Last-batch label truncation in gather_for_metrics and auxiliary-loss normalization are separate training issues." + "reason": "Metrics label-truncation bug is unrelated to auxiliary-loss normalization." }, { - "left": "issue:16998", - "right": "issue:22355", + "left": "issue:43381", + "right": "issue:43742", "accept": false, - "reason": "DeBERTa-V3 model_max_length discussion and missing transformers.onnx module are unrelated." + "reason": "Gradient-checkpointing eval-mode issue is unrelated to MobileLLM key error on load." }, { - "left": "issue:43381", - "right": "issue:43742", + "left": "issue:16998", + "right": "issue:22355", "accept": false, - "reason": "Gradient checkpointing eval-mode limitation is unrelated to MobileLLM-125M loading key errors." + "reason": "Model max length question and missing transformers.onnx module are unrelated." }, { "left": "issue:25251", "right": "issue:28282", "accept": false, - "reason": "Pipeline top_k nesting behavior and AutoModel PyTorch import errors are unrelated." + "reason": "Pipeline top_k output-shape bug is unrelated to missing PyTorch import." }, { "left": "issue:43295", "right": "issue:43825", "accept": false, - "reason": "Processor/tokenizer regression and a v5 translation error-message issue are different API regressions." + "reason": "Custom processor/tokenizer regression is unrelated to the v5 translation-task error message." }, { - "left": "issue:43761", - "right": "issue:44479", + "left": "issue:43065", + "right": "issue:44617", "accept": false, - "reason": "CLIPVisionModel hidden_states regression and Qwen video-input regression do not share the same failure mode." + "reason": "Dummy Conv2d implementation question is unrelated to Sam3Video OOM." }, { - "left": "issue:43065", - "right": "issue:44617", + "left": "issue:43761", + "right": "issue:44479", "accept": false, - "reason": "Sam3PixelDecoder dummy Conv2d and Sam3Video CUDA OOM are different Sam3 issues." + "reason": "Hidden-states regression in CLIPVisionModel is unrelated to Qwen video-input regression." }, { "left": "issue:44871", "right": "issue:45584", "accept": false, - "reason": "Gemma eos_token_id inconsistency and Whisper empty-transcription failure are unrelated tokenization/generation bugs." + "reason": "EOS-token config inconsistency is unrelated to Whisper empty-transcription generation failure." }, { "left": "issue:45127", "right": "issue:45200", "accept": false, - "reason": "LoRA merge collapse with tied embeddings and Gemma 4 mm_token_type_ids defaults are different model/config problems." + "reason": "LoRA merge collapse with tied embeddings is unrelated to Gemma 4 mm_token_type_ids defaults." } ] }, @@ -8616,15 +8680,15 @@ ], "soft_pairs": [ "issue:44077|issue:44534", - "issue:44734|issue:45406", "issue:44936|issue:45698", + "issue:44734|issue:45406", "issue:43295|issue:44871", "issue:43550|issue:43701", "issue:43606|issue:43701", "issue:43646|issue:44913", "issue:43452|issue:44496", - "issue:43931|issue:44060", "issue:43761|issue:44242", + "issue:43931|issue:44060", "issue:43575|issue:43742", "issue:44617|issue:45584", "issue:45563|issue:45584", @@ -8646,14 +8710,14 @@ "issue:44792|issue:45600", "issue:45127|issue:45357" ], - "prepared_review_unit_hash": "bb6fd536b1973c01c5da1e46bddb08436a119d15305a9c1ad3cb15853acf843c", + "prepared_review_unit_hash": "dd11201ce7ea6765bf191ef8b2fd6b72b0af9db110fc6a561e5700dfe8074836", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, @@ -8668,186 +8732,192 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous: many issues mention loading, initialization, or multimodal regressions, but they point to different code paths and symptoms. I found no true duplicate pair among the soft edges, so all soft links should stay rejected.", - "confidence": 0.96, - "canonical_issue_reason": "No exact canonical duplicate emerges; issue 43646 is the broadest representative of the initialization/loading regression subset, but it is not a duplicate of the others.", + "summary": "This cluster is heterogeneous: the soft pairs mostly share broad Transformers/model-loading keywords but describe different failures, models, or code paths. I don\u2019t see a safe canonical issue or any PR candidate here.", + "confidence": 0.97, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "Issue 43646 is the best single representative for the cluster only because it is broadly about model initialization/loading regressions; however, the cluster is too mixed for a real canonical duplicate.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44077", "right": "issue:44534", "accept": false, - "reason": "Both relate to v5/post-init behavior, but one is about patchtsmixer allowing optional post_init and the other about junk non-persistent buffers; different bugs." + "reason": "Different bugs: optional post_init API change vs non-persistent buffer corruption." }, { - "left": "issue:44734", - "right": "issue:45406", + "left": "issue:44936", + "right": "issue:45698", "accept": false, - "reason": "Distinct crashes in different areas: KV-cache continuation indexing in serve vs Gemma4Processor missing _tokenizer." + "reason": "Different code paths: train/evaluate state bug vs wrong custom module loaded after save_pretrained." }, { - "left": "issue:44936", - "right": "issue:45698", + "left": "issue:44734", + "right": "issue:45406", "accept": false, - "reason": "Trainer lifecycle failure after train() is unrelated to from_pretrained loading the wrong custom module after save_pretrained." + "reason": "One is KV-cache indexing in serve responses; the other is a missing _tokenizer attribute on Gemma4Processor." }, { "left": "issue:43295", "right": "issue:44871", "accept": false, - "reason": "Multimodal processor/tokenizer regression and eos_token_id mismatch are different model/config issues." + "reason": "Different regressions: processor.tokenizer access/images handling vs eos_token_id mismatch." }, { "left": "issue:43550", "right": "issue:43701", "accept": false, - "reason": "torch.compile+SDPA failure for Bamba is unrelated to resume_from_checkpoint key mismatch." + "reason": "Unrelated failures: torch.compile+SDPA on Bamba vs resume_from_checkpoint key mismatch." }, { "left": "issue:43606", "right": "issue:43701", "accept": false, - "reason": "CPU offload device mismatch is a different failure mode from checkpoint key mismatch." + "reason": "Device-mismatch with CPU offload is unrelated to checkpoint key mapping." }, { "left": "issue:43646", "right": "issue:44913", "accept": false, - "reason": "Both involve model config/loading, but one is custom model initialization and the other is rotary_pct not persisting on reload." + "reason": "Custom model initialization breakage is not the same as GPTNeoXConfig rotary_pct reload drift." }, { "left": "issue:43452", "right": "issue:44496", "accept": false, - "reason": "Both are loading-related, but gguf_file breaks a specific AutoTokenizer/AutoModel path while the other lacks a model_type in config.json." + "reason": "gguf_file loading issue is different from an unrecognized-model config.json error." }, { - "left": "issue:43931", - "right": "issue:44060", + "left": "issue:43761", + "right": "issue:44242", "accept": false, - "reason": "Weight shape mismatch in Qwen3-VL is not the same bug as the incorrect tied-weights warning in Qwen3-Next." + "reason": "CLIP hidden_states regression and MoE load-balancing loss omission are different issues." }, { - "left": "issue:43761", - "right": "issue:44242", + "left": "issue:43931", + "right": "issue:44060", "accept": false, - "reason": "Hidden_states=None in CLIPVisionModel.forward is unrelated to missing load-balancing loss when output_router_logits=False." + "reason": "Weight-shape mismatch on model load is not the same as a bad tied-weights warning." }, { "left": "issue:43575", "right": "issue:43742", "accept": false, - "reason": "TP OOM on a 57B model and a key error loading MobileLLM-125M are unrelated load failures." + "reason": "tp OOM on Qwen2-57B is unrelated to a MobileLLM loading KeyError." }, { "left": "issue:44617", "right": "issue:45584", "accept": false, - "reason": "CUDA OOM in Sam3Video is unrelated to Whisper empty-transcription failure after align_special_tokens." + "reason": "CUDA OOM in Sam3Video is unrelated to Whisper empty-transcription generation failure." }, { "left": "issue:45563", "right": "issue:45584", "accept": false, - "reason": "A stale warning for num_return_sequences is not the same as an empty-transcription generation crash." + "reason": "A stale warning about num_return_sequences is not the same as a generation crash." }, { "left": "issue:42617", "right": "issue:43575", "accept": false, - "reason": "Failed 3d_parallel.py execution and tp OOM are different problems with different code paths." + "reason": "3d_parallel.py runtime failure and tensor-parallel OOM are distinct problems." }, { "left": "issue:43906", "right": "issue:45290", "accept": false, - "reason": "An isolated reproduction of an older issue is not the same as the assistant tool-call crash in apply_chat_template(tokenize=True)." + "reason": "A reproduction of an older issue is not enough to show the same apply_chat_template/tool-call crash." }, { "left": "issue:44521", "right": "issue:45381", "accept": false, - "reason": "Both are multimodal regressions, but assistant_masks being all zero is different from wrong vision_position_ids for video inputs." + "reason": "Both are multimodal, but assistant_masks being zero is a different bug from vision_position_ids being wrong." }, { "left": "issue:44479", "right": "issue:44734", "accept": false, - "reason": "Video-input regression in Qwen VL models is unrelated to serve KV-cache continuation tensor indexing." + "reason": "Video-input regression in Qwen VL models is unrelated to KV-cache tensor indexing in serve." }, { "left": "issue:44933", "right": "issue:45362", "accept": false, - "reason": "Missing import from image_utils and Qwen3.5 chat crash are unrelated." + "reason": "Missing image_utils import and Qwen3.5 chat crash are unrelated failures." }, { "left": "issue:32090", "right": "issue:35141", "accept": false, - "reason": "Trainer GPU broadcast TypeError and embedding reinitialization in post_init are different bugs." + "reason": "Trainer broadcast TypeError is not the same as output embedding reinitialization after resize." }, { "left": "issue:41720", "right": "issue:43575", "accept": false, - "reason": "Auto device mapping cudaErrorAssert and TP OOM are both resource-related but not the same underlying issue." + "reason": "Auto device-mapping cudaErrorAssert on A800 is a different symptom than tp OOM." }, { "left": "issue:43065", "right": "issue:44610", "accept": false, - "reason": "Dummy Conv2d in Sam3PixelDecoder is unrelated to processor output size mismatch for OmDet-Turbo." + "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo size mismatch are different model/processor bugs." }, { "left": "issue:44464", "right": "issue:44871", "accept": false, - "reason": "Compiled-forward chunked generation inconsistency is not the same as an eos_token_id configuration mismatch." + "reason": "Chunked generation with compiled forward is unrelated to eos_token_id config inconsistency." + }, + { + "left": "issue:44464", + "right": "issue:45563", + "accept": false, + "reason": "Inconsistent compiled generation outputs and a stale warning are not the same bug." }, { "left": "issue:43827", "right": "issue:43994", "accept": false, - "reason": "Docs still referencing pipeline() is a documentation cleanup issue, not the same as nonsensical model outputs." + "reason": "Docs still referencing pipeline() is unrelated to SigLIP2 producing nonsensical outputs." }, { "left": "issue:44792", "right": "issue:44977", "accept": false, - "reason": "Janus image-generation test failure and Qwen3.5 flash-attention generation bug are different models and failure modes." + "reason": "Janus image-generation test failure does not match Qwen3.5 flash-attention generation issues." }, { "left": "issue:44534", "right": "issue:44913", "accept": false, - "reason": "Non-persistent buffer junk and rotary_pct reset on reload both touch persistence, but they are not the same concrete bug." + "reason": "Non-persistent buffer junk in v5 is unrelated to GPTNeoX rotary_pct reload behavior." }, { "left": "issue:41720", "right": "issue:45245", "accept": false, - "reason": "cudaErrorAssert on A800 is unrelated to the category-count limit error." + "reason": "Qwen3 auto device mapping crash and category-count limit error are different failures." }, { "left": "issue:44190", "right": "issue:44964", "accept": false, - "reason": "Local dataset loading in a training script is unrelated to loading a specific multimodal model." + "reason": "Local dataset loading in a trainer script is unrelated to loading Phi-4-multimodal-instruct." }, { "left": "issue:44792", "right": "issue:45600", "accept": false, - "reason": "A failing janus generation test is unrelated to auto_mappings.py referencing removed Sam3LiteText configs." + "reason": "A janus test failure is not the same as CI breakage from removed Sam3LiteText configs." }, { "left": "issue:45127", "right": "issue:45357", "accept": false, - "reason": "LoRA merge collapse with extended vocab and incorrect visual encoder keys saved are different save/merge regressions." + "reason": "LoRA merge collapse with tied embeddings is unrelated to incorrect visual-encoder keys on save_pretrained." } ] }, @@ -8927,8 +8997,8 @@ "issue:43335|issue:44855", "issue:43976|issue:44855", "issue:44077|issue:44079", - "issue:43550|issue:44190", "issue:43644|issue:44913", + "issue:43550|issue:44190", "issue:43540|issue:44734", "issue:43650|issue:44220", "issue:44112|issue:45600", @@ -8939,33 +9009,33 @@ "issue:43479|issue:45375", "issue:44734|issue:45632", "issue:36246|issue:41628", - "issue:43404|issue:45216", "issue:43295|issue:45276", + "issue:43404|issue:45216", "issue:45245|issue:45584", "issue:36246|issue:38617", "issue:44016|issue:44220", "issue:44561|issue:45632", "issue:44186|issue:44734", - "issue:44683|issue:45446", - "issue:38175|issue:43065", "issue:42175|issue:44279", + "issue:38175|issue:43065", + "issue:44683|issue:45446", "issue:45406|issue:45632", "issue:44466|issue:44913" ], - "prepared_review_unit_hash": "0336dbca82552ae41b3801f1a5fca9f4cc0ef0a2e5ef320b8141f4e0b9514e0c", + "prepared_review_unit_hash": "a87b4e1cf8ccdd44767a454b4e6327dfe42944d022fc0708e4f71b3e4e71daaf", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 31, - "serialized_chars": 36640, + "serialized_chars": 36639, "estimated_input_tokens": 9160, "estimated_eval_tokens": 18576 }, @@ -8974,204 +9044,198 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The soft links are overwhelmingly false positives: they span unrelated import errors, config/serialization regressions, model-specific bugs, pipeline issues, and docs/example failures. None of the candidate pairs look like the same underlying defect or change.", - "confidence": 0.93, + "summary": "The cluster is heterogeneous: the soft-similarity pairs mostly share broad transformer/regression vocabulary but do not describe the same concrete bug or change. No single issue stands out as a reliable duplicate canonical.", + "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "issue:41628 is the strongest standalone candidate to surface as a representative user-facing regression: it is a concrete, high-activity import failure with broad impact. That said, the set does not form a true duplicate cluster.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44938", "right": "issue:45542", "accept": false, - "reason": "Unrelated failures: Python import/load issue vs missing TensorFlow backend when only tensorboard is installed." + "reason": "Both mention runtime/backend failures, but one is a Python import/load problem and the other is a TensorFlow backend packaging issue; different code paths." }, { "left": "issue:43388", "right": "issue:44079", "accept": false, - "reason": "Both involve internals around output handling, but one is metric-gathering label truncation and the other is ModelOutput key assignment; different code paths." + "reason": "One is a metrics label-truncation bug and the other is a ModelOutput key-assignment bug; unrelated internals." }, { "left": "issue:43761", "right": "issue:44743", "accept": false, - "reason": "Different models and bugs: CLIPVisionModel hidden_states regression vs Qwen recurrent-state reset with cache." + "reason": "CLIPVision hidden-state regression and Qwen recurrent-state reset are different model-specific failures." }, { "left": "issue:44855", "right": "issue:45588", "accept": false, - "reason": "Completely different problems: Python 3.13 JIT parse/indentation issue vs flash-attention crashing on s_aux=None." + "reason": "Python import parsing error vs flash-attention auxiliary-state AttributeError; no shared underlying defect." }, { "left": "issue:43335", "right": "issue:44855", "accept": false, - "reason": "SwitchTransformers config construction bug is unrelated to the DebertaV2 Python 3.13 import/parser issue." + "reason": "SwitchTransformers layer construction bug is unrelated to the DebertaV2 Python 3.13 import parsing issue." }, { "left": "issue:43976", "right": "issue:44855", "accept": false, - "reason": "A packaging/Python-version compatibility report does not match the specific DebertaV2 @torch.jit.script parsing failure." + "reason": "General Python-version compatibility complaint vs a specific IndentationError on import; not the same bug." }, { "left": "issue:44077", "right": "issue:44079", "accept": false, - "reason": "Both are model-output/config validation themed, but one is patchtsmixer post_init policy and the other is None-key assignment in ModelOutput." + "reason": "One concerns post_init validation policy, the other ModelOutput key handling; different utilities and fixes." }, { - "left": "issue:43550", - "right": "issue:44190", + "left": "issue:43644", + "right": "issue:44913", "accept": false, - "reason": "Torch.compile+SDPA failure in Bamba is unrelated to the image-classification script failing to load a local dataset." + "reason": "Non-persistent buffer initialization and GPTNeoX config reload persistence are separate regressions." }, { - "left": "issue:43644", - "right": "issue:44913", + "left": "issue:43550", + "right": "issue:44190", "accept": false, - "reason": "Both are persistence-related, but one is buffer initialization junk and the other is GPTNeoX rotary_pct not reloading; not the same bug." + "reason": "torch.compile/SDPA failure in Bamba is unrelated to local dataset loading in an image-classification script." }, { "left": "issue:43540", "right": "issue:44734", "accept": false, - "reason": "Video-input ValueError in Qwen3OmniMoe is unrelated to serve KV-cache continuation tensor indexing." + "reason": "Video-input processing in Qwen3OmniMoe and KV-cache indexing in transformers serve are different failures." }, { "left": "issue:43650", "right": "issue:44220", "accept": false, - "reason": "A noisy generic issue title does not match the concrete audio feature-extraction bug." + "reason": "The first issue is non-actionable/empty content, the second is an audio feature extraction bug; no duplicate relationship." }, { "left": "issue:44112", "right": "issue:45600", "accept": false, - "reason": "CI failures in different areas: GraniteSpeech stale device override test vs auto_mappings referencing removed Sam3LiteText configs." + "reason": "A stale CI test in GraniteSpeech and a broken auto_mappings entry for Sam3LiteText are separate maintenance issues." }, { "left": "issue:43116", "right": "issue:44190", "accept": false, - "reason": "Both mention example scripts, but one is multi-label classification output behavior and the other is local dataset loading for image classification." + "reason": "Multi-label classification example output bug and local dataset loading failure are different scripts and failure modes." }, { "left": "issue:45276", "right": "issue:45584", "accept": false, - "reason": "Gemma4 embedding resize behavior and Whisper empty-transcription handling are unrelated." + "reason": "Resize-token-embeddings propagation bug in Gemma4 is unrelated to Whisper empty-transcription generation after token alignment." }, { "left": "issue:39692", "right": "issue:44190", "accept": false, - "reason": "Both touch examples/docs, but one is SigLIP2 documentation errors and the other is a local dataset loading problem." + "reason": "Documentation example errors with SigLIP2 are not the same as a no-trainer image-classification dataset-loading problem." }, { "left": "issue:41950", "right": "issue:44016", "accept": false, - "reason": "Video-classification processor mismatch is unrelated to a notebook syntax error." + "reason": "Video-classification pipeline processor lookup and a notebook syntax error are not the same underlying issue." }, { "left": "issue:43479", "right": "issue:45375", "accept": false, - "reason": "Both are config-field issues, but they affect different multimodal model families and different missing fields." + "reason": "Both are multimodal config bugs, but they affect different model families and different missing/defaulted fields." }, { "left": "issue:44734", "right": "issue:45632", "accept": false, - "reason": "Serve KV-cache indexing and trust_remote_code cache-path collisions are different subsystems and defects." + "reason": "Serve KV-cache continuation indexing and trust_remote_code cache-path collisions are unrelated server/import behaviors." }, { "left": "issue:36246", "right": "issue:41628", "accept": false, - "reason": "Both are import errors, but one is a specific missing Qwen2_5_VLImageProcessor symbol and the other is top-level AutoImageProcessor availability." + "reason": "Both are import errors, but one is a missing model-specific image processor export and the other is a missing top-level AutoImageProcessor export." }, { - "left": "issue:43404", - "right": "issue:45216", + "left": "issue:43295", + "right": "issue:45276", "accept": false, - "reason": "Tied lm_head weights in Mistral3 versus incorrect Qwen3.5 checkpoint saving are different model/regression bugs." + "reason": "Processor/tokenizer regression in v4.57.5 and Gemma4 embedding-resize propagation are different code paths." }, { - "left": "issue:43295", - "right": "issue:45276", + "left": "issue:43404", + "right": "issue:45216", "accept": false, - "reason": "Processor/tokenizer regression and Gemma4 resize_token_embeddings behavior do not share the same concrete code path." + "reason": "Mistral3 tied-weight serialization and Qwen3.5 save_pretrained corruption are both checkpointing-related but not the same concrete defect." }, { "left": "issue:45245", "right": "issue:45584", "accept": false, - "reason": "Category-cardinality runtime error is unrelated to Whisper's empty-transcription failure." + "reason": "Category-cardinality error and Whisper empty-transcription behavior are unrelated model/runtime failures." }, { "left": "issue:36246", "right": "issue:38617", "accept": false, - "reason": "Different missing imports in different modules; no shared underlying code change is evident." + "reason": "Different missing symbols from different modules; these are separate import breakages." }, { "left": "issue:44016", "right": "issue:44220", "accept": false, - "reason": "Notebook syntax error and audio feature extraction bug are unrelated." + "reason": "A notebook syntax error is unrelated to the fbank feature extraction bug." }, { "left": "issue:44561", "right": "issue:45632", "accept": false, - "reason": "Removal of a helper breaking trust_remote_code models is unrelated to local cache-path collisions." + "reason": "Removal of an internal API affecting trust_remote_code and a local-model cache collision are different regressions." }, { "left": "issue:44186", "right": "issue:44734", "accept": false, - "reason": "Tokenizer crashes on NER padding/truncation are not the same as serve KV-cache continuation indexing failures." + "reason": "Tokenizer padding/truncation crash and serve KV-cache tensor indexing are different subsystems." }, { - "left": "issue:44683", - "right": "issue:45446", + "left": "issue:42175", + "right": "issue:44279", "accept": false, - "reason": "Both mention flex_attention/PyTorch versions, but one is compiled flex_attention failing on new torch and the other is an incorrect version gate for AuxRequest import." + "reason": "TensorFlow backend installation issue and a generic dependency issue are too vague and not the same underlying bug." }, { "left": "issue:38175", "right": "issue:43065", "accept": false, - "reason": "SigLIP2 zero-probabilities and a dummy Conv2d in Sam3PixelDecoder are unrelated model bugs." + "reason": "SigLIP2 zero probabilities and Sam3PixelDecoder dummy Conv2d are unrelated model bugs." }, { - "left": "issue:42175", - "right": "issue:44279", + "left": "issue:44683", + "right": "issue:45446", "accept": false, - "reason": "A specific missing TensorFlow dependency from a torch install is not the same as a vague transformers dependency issue." + "reason": "Compiled flex_attention failure on new torch and an incorrect version check for AuxRequest import are separate flex_attention problems." }, { "left": "issue:45406", "right": "issue:45632", "accept": false, - "reason": "Gemma4Processor _tokenizer crash and trust_remote_code cache collision are unrelated." - }, - { - "left": "issue:45588", - "right": "issue:45600", - "accept": false, - "reason": "Flash-attention s_aux=None crash and removed Sam3LiteText config references breaking CI are different failures." + "reason": "Gemma4Processor missing _tokenizer in serve and trust_remote_code cache-path collisions are unrelated failures." }, { "left": "issue:44466", "right": "issue:44913", "accept": false, - "reason": "Both concern state persistence, but one is lm_head.weight serialization and the other is GPTNeoX rotary_pct reload behavior." + "reason": "Serialization of tied lm_head weights and GPTNeoX rotary_pct reload are distinct configuration/serialization bugs." } ] }, @@ -9249,233 +9313,233 @@ "issue:42907|issue:43441", "issue:44016|issue:44062", "issue:44964|issue:45600", - "issue:43299|issue:45216", "issue:43295|issue:45335", + "issue:43299|issue:45216", "issue:43976|issue:44987", "issue:43673|issue:44336", "issue:44112|issue:44964", "issue:43824|issue:44933", "issue:43531|issue:45600", - "issue:43976|issue:44279", "issue:44496|issue:45042", + "issue:43976|issue:44279", "issue:43854|issue:44964", "issue:43526|issue:43901", "issue:45335|issue:45584", "issue:43299|issue:45357", "issue:44075|issue:44368", "issue:44521|issue:45325", - "issue:43577|issue:45216", "issue:43381|issue:45593", "issue:44351|issue:45278", + "issue:43577|issue:45216", "issue:43976|issue:45405", - "issue:41950|issue:45020", "issue:43901|issue:44496", + "issue:41950|issue:45020", "issue:44242|issue:45200", "issue:43479|issue:43525", "issue:44462|issue:45081" ], - "prepared_review_unit_hash": "c7ea46c1a54a2e80ddce8ffefe3f500e239089a6f2e3ef54520abd7bf7889388", + "prepared_review_unit_hash": "fbacfe924c3393a2e28278f83448a387e9f8529c918368b62cad824c44a38a0d", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 29, - "serialized_chars": 35784, - "estimated_input_tokens": 8946, - "estimated_eval_tokens": 18148 + "serialized_chars": 35785, + "estimated_input_tokens": 8947, + "estimated_eval_tokens": 18150 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a collection of related-but-distinct Transformers regression reports, many around model loading, config handling, tokenizers, and multimodal support. There are no clear exact duplicates among the soft pairs, so all candidate edges are rejected. The broadest representative issue is the remote_code/model-loading regression report.", - "confidence": 0.78, - "canonical_issue_reason": "issue:45020 is the broadest, most representative open report in the set: it describes a recent-version regression affecting model loading with remote_code, which is a common umbrella for several adjacent loading/config failures here.", + "summary": "The cluster is heterogeneous: most items are separate Transformers regressions around loading, configs, imports, or generation, but not the same underlying bug. I rejected every soft edge as a non-duplicate. If one issue must serve as the umbrella representative, issue 45020 is the broadest and most central to the version-regression/loading theme.", + "confidence": 0.73, + "canonical_issue_reason": "Issue 45020 is the best umbrella representative because it is broad, open-ended, and closest to the recurring theme of recent Transformers regressions breaking model loading/remote_code behavior. That said, the set is not a true duplicate cluster.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45020 best captures the shared theme of recent-version loading regressions without being too narrowly tied to one model family or one specific symptom.", + "best_issue_reason": "Issue 45020 is the strongest global issue candidate: it has the broadest scope and best matches the common regression theme seen across several other reports, even though the individual bugs are distinct.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44246", "right": "issue:45003", "accept": false, - "reason": "Import slowdown and unsafe sys.modules access are related to import-time behavior, but they are not the same concrete bug or fix path." + "reason": "Import slowness vs unsafe sys.modules access; related area, but different failure modes." }, { "left": "issue:43643", "right": "issue:44496", "accept": false, - "reason": "Both involve config/model loading, but one is missing fields with trust_remote_code and the other is an unrecognized model / missing model_type case." + "reason": "Both involve config/model loading, but one is missing fields under trust_remote_code and the other is model-type resolution." }, { "left": "issue:42907", "right": "issue:43441", "accept": false, - "reason": "Same model family, different failures: saving dequantized models vs FlashAttention load/runtime behavior." + "reason": "Same model family, but save_pretrained on dequantized checkpoints vs a FlashAttention load/runtime failure." }, { "left": "issue:44016", "right": "issue:44062", "accept": false, - "reason": "A notebook syntax error and a tokenizer AddedToken argument error are unrelated." + "reason": "Notebook syntax error vs AddedToken keyword duplication; unrelated bugs." }, { "left": "issue:44964", "right": "issue:45600", "accept": false, - "reason": "Phi-4 multimodal loading and stale auto_mappings references to Sam3LiteText are different breakages." + "reason": "Phi-4 multimodal loading issue vs auto_mappings referencing removed configs; different code paths." }, { - "left": "issue:43299", - "right": "issue:45216", + "left": "issue:43295", + "right": "issue:45335", "accept": false, - "reason": "Both are Qwen-related, but one is a model-loading failure and the other is an incorrect save_pretrained checkpoint issue." + "reason": "Processor/tokenizer regression vs t5gemma embedding resize bug." }, { - "left": "issue:43295", - "right": "issue:45335", + "left": "issue:43299", + "right": "issue:45216", "accept": false, - "reason": "Processor/tokenizer regression in multimodal code and a t5gemma embedding-resize bug are different code paths." + "reason": "Both are Qwen-related, but one is loading failure and the other is incorrect save_pretrained checkpoint contents." }, { "left": "issue:43976", "right": "issue:44987", "accept": false, - "reason": "A Python version compatibility issue is not the same bug as a model-loading failure for one specific repository." + "reason": "Python/version compatibility issue vs a specific model-loading failure." }, { "left": "issue:43673", "right": "issue:44336", "accept": false, - "reason": "Generation cache behavior and ANSI output formatting are unrelated." + "reason": "Generation cache regression vs ANSI formatting in loading_report; unrelated." }, { "left": "issue:44112", "right": "issue:44964", "accept": false, - "reason": "A CI test flake in GraniteSpeech is unrelated to Phi-4 multimodal loading." + "reason": "CI test/device override failure vs model loading failure; not the same bug." }, { "left": "issue:43824", "right": "issue:44933", "accept": false, - "reason": "Two import errors, but for different symbols and likely different causes; not the same fix." + "reason": "Different missing imports from different modules and different APIs." }, { "left": "issue:43531", "right": "issue:45600", "accept": false, - "reason": "Qwen3-MoE sliding-window behavior and removed config references are unrelated." + "reason": "MoE sliding_window bug vs removed config references in auto_mappings." }, { - "left": "issue:43976", - "right": "issue:44279", + "left": "issue:44496", + "right": "issue:45042", "accept": false, - "reason": "A Python support/version issue and a vague dependency problem are too different to treat as the same bug." + "reason": "Model/config resolution failure vs image processor torchvision dependency regression." }, { - "left": "issue:44496", - "right": "issue:45042", + "left": "issue:43976", + "right": "issue:44279", "accept": false, - "reason": "Model config recognition and PIL/torchvision dependency handling are different issues." + "reason": "Python support/version issue vs a generic dependency complaint." }, { "left": "issue:43854", "right": "issue:44964", "accept": false, - "reason": "Both are model-loading failures, but for different models and symptom types." + "reason": "Different models and likely different root causes for the loading failure." }, { "left": "issue:43526", "right": "issue:43901", "accept": false, - "reason": "A BeitImageProcessorFast bug and a docs/behavior mismatch in TextClassificationPipeline do not share a concrete fix path." + "reason": "ImageProcessorFast label reduction bug vs docs/behavior mismatch for return_all_scores." }, { "left": "issue:45335", "right": "issue:45584", "accept": false, - "reason": "Embedding resize behavior and Whisper empty-transcription generation are unrelated." + "reason": "Embedding resize bug vs Whisper empty-transcription generation bug." }, { "left": "issue:43299", "right": "issue:45357", "accept": false, - "reason": "Both concern Qwen3.5, but one is load-time breakage and the other is incorrect save_pretrained output keys." + "reason": "Both involve Qwen3.5, but one is loading and the other is incorrect visual-encoder key saving." }, { "left": "issue:44075", "right": "issue:44368", "accept": false, - "reason": "Optimizer argument handling and a warning about tie_word_embeddings are unrelated." + "reason": "Optimizer argument handling vs LoRA warning in Qwen3.5; unrelated." }, { "left": "issue:44521", "right": "issue:45325", "accept": false, - "reason": "Both are multimodal, but assistant mask generation and rope index scaling are different concrete bugs." - }, - { - "left": "issue:43577", - "right": "issue:45216", - "accept": false, - "reason": "BLIP2 dtype propagation and Qwen3.5 checkpoint saving are unrelated." + "reason": "Assistant mask generation bug vs rope/position-id scaling bug." }, { "left": "issue:43381", "right": "issue:45593", "accept": false, - "reason": "Gradient checkpointing in eval mode and D-FINE auxiliary loss behavior are unrelated." + "reason": "Gradient checkpointing eval-mode restriction vs D-FINE auxiliary-loss omission." }, { "left": "issue:44351", "right": "issue:45278", "accept": false, - "reason": "The latter is a broad umbrella for many import errors, but the pair does not identify the same concrete import failure." + "reason": "One specific HybridCache import error vs a broad collection of import regressions; not enough evidence of one fix." }, { - "left": "issue:43976", - "right": "issue:45405", + "left": "issue:43577", + "right": "issue:45216", "accept": false, - "reason": "Python support compatibility and an unreleased PEFT minimum version are different release-management issues." + "reason": "BLIP2 dtype propagation bug vs Qwen3.5 save_pretrained checkpoint bug." }, { - "left": "issue:41950", - "right": "issue:45020", + "left": "issue:43976", + "right": "issue:45405", "accept": false, - "reason": "Both mention model-loading/pipeline behavior, but image processor lookup in a video pipeline is a different bug from remote_code model loading regressions." + "reason": "Python compatibility issue vs unreleased peft minimum-version bump." }, { "left": "issue:43901", "right": "issue:44496", "accept": false, - "reason": "A documentation/behavior mismatch and an unrecognized model/config issue are not the same underlying bug." + "reason": "Docs mismatch vs model recognition failure; different bugs." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Video-classification processor lookup bug vs remote_code breakage; only loosely related by model loading." }, { "left": "issue:44242", "right": "issue:45200", "accept": false, - "reason": "Load-balancing loss computation and mm_token_type_ids defaults are separate model-training concerns." + "reason": "MoE load-balancing loss omission vs mm_token_type_ids defaulting; different training-path bugs." }, { "left": "issue:43479", "right": "issue:43525", "accept": false, - "reason": "Phi4MultimodalConfig default initialization and Llama4Config pad_token_id absence are unrelated config bugs." + "reason": "Both are config-related, but they affect different models and different missing/default fields." }, { "left": "issue:44462", "right": "issue:45081", "accept": false, - "reason": "Tokenizer.json precedence and a Mistral regex patch crash are different tokenizer-loading problems." + "reason": "Tokenizer.json precedence issue vs Mistral regex patch crash; distinct tokenizer-loading bugs." } ] }, @@ -9560,9 +9624,9 @@ "issue:43295|issue:44779", "issue:38175|issue:44617", "issue:44016|issue:44291", + "issue:43450|issue:44279", "issue:44261|issue:44263", "issue:43723|issue:44162", - "issue:43450|issue:44279", "issue:42994|issue:43317", "issue:43065|issue:44898", "issue:43441|issue:44683", @@ -9571,216 +9635,216 @@ "issue:43262|issue:44521", "issue:44623|issue:45397", "issue:44623|issue:44749", - "issue:43673|issue:43761", "issue:43784|issue:45542", + "issue:43673|issue:43761", "issue:44671|issue:44779", "issue:42175|issue:43976", "issue:43122|issue:43650", "issue:44987|issue:45542", "issue:43653|issue:45593" ], - "prepared_review_unit_hash": "d9725c8c877b54420f59f9685a27a17f9861fc869b61c2800a999a94cdf5205a", + "prepared_review_unit_hash": "94758f60afa7dba9f63aceae22719ea2335bd5a5b88076861cff68382ef3c34f", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 29, - "serialized_chars": 36273, - "estimated_input_tokens": 9069, - "estimated_eval_tokens": 18394 + "serialized_chars": 36269, + "estimated_input_tokens": 9068, + "estimated_eval_tokens": 18392 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is noisy: most items are unrelated v5 regressions across tokenizer loading, processors, quantization, vision, audio, and docs. None of the suggested soft edges look like true duplicates.", - "confidence": 0.91, - "canonical_issue_reason": "No single canonical issue is strong here; the set mixes several different bug classes and model families rather than one underlying defect.", + "summary": "This cluster is heterogeneous and does not contain any convincing duplicate pairs. The soft edges mostly link unrelated model-loading, tokenizer, processor, quantization, attention, or documentation issues. No PRs are present.", + "confidence": 0.96, + "canonical_issue_reason": "Issue 44521 is the best representative because it is a concrete, user-facing regression in a common multimodal API (`apply_chat_template`) and is still open; the other issues are mostly isolated model- or environment-specific reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43723 is the broadest representative of the recurring tokenizer-loading regression theme in v5, but it still does not unify the whole cluster.", + "best_issue_reason": "Issue 44521 is the strongest standalone issue to anchor the cluster: it describes a clear bug, has multiple inbound references, and affects a broadly used multimodal chat-template path.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44683", "right": "issue:45542", "accept": false, - "reason": "Unrelated domains: flex_attention on torch>=2.9 vs TensorFlow backend/package installation." + "reason": "Flex-attention compilation on torch>=2.9 and missing TensorFlow backend from install extras are unrelated failures." }, { "left": "issue:44496", "right": "issue:44877", "accept": false, - "reason": "Both involve model/config loading, but the failures are different: unrecognized model type vs strict config rejection for granite_speech." + "reason": "One is an unrecognized model/config loading error; the other is strict config rejection for granite_speech. Different code paths and causes." }, { "left": "issue:44297", "right": "issue:44462", "accept": false, - "reason": "Both touch tokenizer serialization/loading, but one is save_pretrained tokenizer_class mismatch and the other is AutoTokenizer ignoring tokenizer.json." + "reason": "Tokenizer save metadata mismatch and AutoTokenizer ignoring tokenizer.json are different serialization vs loading bugs." }, { "left": "issue:44279", "right": "issue:45405", "accept": false, - "reason": "Dependency complaints, but about different packages and failures; not the same bug." + "reason": "A generic dependency issue is not the same as bumping MIN_PEFT_VERSION to an unreleased release." }, { "left": "issue:43299", "right": "issue:43854", "accept": false, - "reason": "Different models and different loading failures; only a broad 'model loading broke in v5' theme is shared." + "reason": "These are model-specific loading problems for different models, not the same underlying bug." }, { "left": "issue:41950", "right": "issue:43650", "accept": false, - "reason": "Completely unrelated: video-classification processor bug vs an unrelated placeholder issue." + "reason": "Video pipeline processor lookup and a vague 'ADD THE DATA' issue are not the same concrete problem." }, { "left": "issue:44462", "right": "issue:44779", "accept": false, - "reason": "Tokenizer-related, but one is repository tokenizer.json precedence and the other is Deepseek tokenization correctness; not the same concrete path." + "reason": "Both involve tokenizers, but one is about repository tokenizer.json being ignored and the other is a Deepseek tokenization regression." }, { "left": "issue:39692", "right": "issue:43901", "accept": false, - "reason": "Both are docs-related, but one is SigLIP2 example errors and the other is a TextClassificationPipeline docs mismatch." + "reason": "Both are documentation-related, but they concern different examples and different API mismatches." }, { "left": "issue:43295", "right": "issue:44779", "accept": false, - "reason": "Different regressions: processor.tokenizer/image passing breakage vs Deepseek tokenizer output changes." + "reason": "Custom model processor/tokenizer regression and Deepseek tokenizer incorrect output are distinct bugs." }, { "left": "issue:38175", "right": "issue:44617", "accept": false, - "reason": "Different subsystems and symptoms: zero probabilities on SigLIP2 vs CUDA OOM in Sam3Video." + "reason": "Unexpected zero probabilities in SigLIP2 and Sam3Video CUDA OOM are unrelated symptoms and subsystems." }, { "left": "issue:44016", "right": "issue:44291", "accept": false, - "reason": "Notebook syntax error vs init_empty_weights argument/type error; unrelated code paths." + "reason": "Notebook syntax error and init_empty_weights argument handling are unrelated." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Batched video processor shape handling is unrelated to a dependency issue." }, { "left": "issue:44261", "right": "issue:44263", "accept": false, - "reason": "Both are model-specific correctness bugs, but they affect different components and failure modes." + "reason": "These touch different code paths: MLA layernorm precision vs GlmMoeDsaIndexer torch.split behavior." }, { "left": "issue:43723", "right": "issue:44162", "accept": false, - "reason": "Tokenizer loading regression vs ESM2 model breakage; too broad to be duplicates." - }, - { - "left": "issue:43450", - "right": "issue:44279", - "accept": false, - "reason": "Video processor batching shape bug vs dependency issue; unrelated." + "reason": "AutoTokenizer loading failure and ESM2 being broken are different model/loading issues." }, { "left": "issue:42994", "right": "issue:43317", "accept": false, - "reason": "Both involve quantization/dequantization, but one is saving and the other is device_map offload loading; not the same bug." + "reason": "Quantized model saving failure and dequantized model loading with device_map offload are opposite stages and not the same bug." }, { "left": "issue:43065", "right": "issue:44898", "accept": false, - "reason": "Different model components: dummy Conv2d in Sam3PixelDecoder vs Perceiver interpolation failure." + "reason": "Dummy Conv2d in Sam3PixelDecoder and Perceiver non-default resolution failure are different model-specific defects." }, { "left": "issue:43441", "right": "issue:44683", "accept": false, - "reason": "Both mention attention kernels, but the concrete failures are different code paths and different triggering conditions." + "reason": "Both mention attention, but Ministral-3 FlashAttention failure and torch>=2.9 flex_attention compilation failure are not the same concrete code-path problem." }, { "left": "issue:43650", "right": "issue:45290", "accept": false, - "reason": "Completely unrelated issues." + "reason": "Unrelated issues: one is not a real technical report, the other is an apply_chat_template crash on tool-call messages." }, { "left": "issue:44466", "right": "issue:45276", "accept": false, - "reason": "Both touch Gemma 4 embeddings, but one is serialization of lm_head.weight and the other is resize_token_embeddings not propagating to outputs." + "reason": "Serialization inconsistency for lm_head.weight and resize_token_embeddings not updating embeddings are different behaviors." }, { "left": "issue:43262", "right": "issue:44521", "accept": false, - "reason": "Both use apply_chat_template, but audio sampling-rate defaults and multimodal assistant masks are different bugs." + "reason": "Both involve apply_chat_template, but one is audio sample-rate defaulting and the other is multimodal assistant mask generation." }, { "left": "issue:44623", "right": "issue:45397", "accept": false, - "reason": "processor.save_pretrained missing files vs gemma-4 zero3 from_pretrained; save/load failures but not the same defect." + "reason": "processor.save_pretrained missing files and gemma-4 zero3 from_pretrained are different load/save failures." }, { "left": "issue:44623", "right": "issue:44749", "accept": false, - "reason": "Missing processor files vs performance regression in filtering after upgrade; unrelated." + "reason": "Missing processor save files and Chinese tokenizer/filtering slowdown are unrelated." }, { - "left": "issue:43673", - "right": "issue:43761", + "left": "issue:43784", + "right": "issue:45542", "accept": false, - "reason": "Generation cache/chunked_prefill issue vs CLIPVisionModel hidden_states regression; different models and APIs." + "reason": "Sentence-transformers import NameError and missing TensorFlow backend due to pip extras are unrelated environment/import issues." }, { - "left": "issue:43784", - "right": "issue:45542", + "left": "issue:43673", + "right": "issue:43761", "accept": false, - "reason": "Import NameError in sentence-transformers integration vs TensorFlow backend installation issue." + "reason": "Chunked-prefill cache handling and CLIPVisionModel hidden_states propagation are different model/runtime bugs." }, { "left": "issue:44671", "right": "issue:44779", "accept": false, - "reason": "Both are v5 regressions affecting NLP outputs, but one is CamemBERT masked LM predictions and the other is Deepseek tokenizer behavior." + "reason": "CamemBERT masked LM prediction regressions and Deepseek tokenizer regressions affect different models and layers." }, { "left": "issue:42175", "right": "issue:43976", "accept": false, - "reason": "Backend packaging issue vs Python-version compatibility issue; not duplicates." + "reason": "Missing TensorFlow backend in a torch install and Python version incompatibility in Transformers 5.1.0 are unrelated dependency/environment issues." }, { "left": "issue:43122", "right": "issue:43650", "accept": false, - "reason": "Tokenizer behavior regression vs unrelated placeholder issue." + "reason": "Tokenizer output changes across versions and 'ADD THE DATA' are not the same issue." }, { "left": "issue:44987", "right": "issue:45542", "accept": false, - "reason": "Model loading failure vs TensorFlow installation/backend issue; unrelated." + "reason": "Model loading failure in transformers>=5.1.0 and missing TensorFlow backend from install extras are unrelated." }, { "left": "issue:43653", "right": "issue:45593", "accept": false, - "reason": "BigBirdTokenizer special-token bug vs D-FINE auxiliary-loss behavior; unrelated." + "reason": "BigBirdTokenizer special-token registration and D-FINE auxiliary-loss behavior are unrelated model areas." } ] }, @@ -9857,12 +9921,12 @@ "issue:43901|issue:43994", "issue:44361|issue:45593", "issue:43824|issue:43976", - "issue:44297|issue:45397", "issue:43475|issue:44016", + "issue:44297|issue:45397", "issue:43525|issue:43881", "issue:45561|issue:45563", - "issue:43352|issue:45250", "issue:44589|issue:45362", + "issue:43352|issue:45250", "issue:44188|issue:44336", "issue:41628|issue:43976", "issue:43295|issue:44617", @@ -9885,14 +9949,14 @@ "issue:44162|issue:45200", "issue:44242|issue:44857" ], - "prepared_review_unit_hash": "351a01c8be83cb7059d32306ef1c7ae50421a07d66f58637ff8e69f87de47b17", + "prepared_review_unit_hash": "dd78eee91f823a0d7865dff04c4720e6e161b93603e7f484daaa65e9e1639c26", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, @@ -9907,192 +9971,192 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This is a heterogeneous cluster of unrelated issues: import/export regressions, model-loading failures, pipeline behavior bugs, documentation problems, and testing/runtime edge cases. None of the proposed soft pairs looks like the same underlying bug or change.", - "confidence": 0.96, - "canonical_issue_reason": "Issue 45003 is the broadest open bug in the set and has a concrete implementation focus in core code (`modeling_utils` / `sys.modules`). The other issues are mostly narrow, model-specific, or unrelated regressions, so 45003 is the least arbitrary cluster anchor.", + "summary": "This cluster is mostly false-positive similarity: it mixes unrelated import/export errors, model-loading regressions, pipeline bugs, docs/test issues, and performance complaints. None of the soft-linked pairs look like the same underlying defect tightly enough to merge.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue is safe here; the items are too heterogeneous to collapse into one duplicate bucket.", "canonical_pr_reason": null, - "best_issue_reason": "45003 is the best representative issue because it is open, implementation-oriented, and broader than the many one-off model/import/doc issues. It is still not a true duplicate of the others.", + "best_issue_reason": "No clear best issue for the whole cluster; at most there are a few small topical groups, but they do not form one duplicate chain.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43901", "right": "issue:43994", "accept": false, - "reason": "Docs wording about `return_all_scores` is unrelated to a SigLIP2 model/pipeline correctness problem." + "reason": "Docs mismatch for return_all_scores is unrelated to a SigLIP2 model output-quality bug." }, { "left": "issue:44361", "right": "issue:45593", "accept": false, - "reason": "Tokenizer AttributeError on tasks and D-FINE auxiliary-loss behavior affect different code paths and model families." + "reason": "MLukeTokenizer task AttributeError and D-FINE auxiliary-loss behavior are different models and different failures." }, { "left": "issue:43824", "right": "issue:43976", "accept": false, - "reason": "Missing `Qwen2_5_VLForConditionalGeneration` export is a specific symbol issue, not the Python-version compatibility problem described by 43976." + "reason": "A missing Qwen2.5-VL export and a Python version compatibility issue are unrelated import problems." }, { - "left": "issue:44297", - "right": "issue:45397", + "left": "issue:43475", + "right": "issue:44016", "accept": false, - "reason": "Tokenizer config mismatch in `save_pretrained` is unrelated to Gemma-4 `from_pretrained`/zero3 loading." + "reason": "SAM3 video attribute error is unrelated to a notebook syntax error." }, { - "left": "issue:43475", - "right": "issue:44016", + "left": "issue:44297", + "right": "issue:45397", "accept": false, - "reason": "A SAM3 vision encoder attribute error is unrelated to a notebook syntax error." + "reason": "Tokenizer_class metadata mismatch is not the same bug as gemma-4 zero3 from_pretrained failure." }, { "left": "issue:43525", "right": "issue:43881", "accept": false, - "reason": "Missing `pad_token_id` on `Llama4Config` is a different loading bug than the GLM-4V-9B failure." + "reason": "Both are model-loading failures, but for different model families and likely different code paths." }, { "left": "issue:45561", "right": "issue:45563", "accept": false, - "reason": "Pytest xdist file-race behavior is unrelated to a stale `num_return_sequences` warning in paged generation." + "reason": "A pytest-xdist file race and a stale generation warning are unrelated testing/runtime issues." }, { - "left": "issue:43352", - "right": "issue:45250", + "left": "issue:44589", + "right": "issue:45362", "accept": false, - "reason": "One is a model-specific unsupported feature error; the other is just the generic Flash Attention 2.0 topic." + "reason": "Float8 storage lookup failure is unrelated to Qwen3.5 chat crashes." }, { - "left": "issue:44589", - "right": "issue:45362", + "left": "issue:43352", + "right": "issue:45250", "accept": false, - "reason": "Float8 storage deserialization failure is unrelated to the Qwen3.5 chat crash." + "reason": "A specific model lacking Flash Attention 2 support is not the same as the general Flash Attention topic." }, { "left": "issue:44188", "right": "issue:44336", "accept": false, - "reason": "Attention-kernel divergence under `torch.compile` is unrelated to ANSI-code output in `loading_report`." + "reason": "torch.compile kernel divergence and stray ANSI codes in loading_report are unrelated." }, { "left": "issue:41628", "right": "issue:43976", "accept": false, - "reason": "`AutoImageProcessor` import failure is a different missing-symbol issue than the Python-version breakage in 43976." + "reason": "AutoImageProcessor import failure and Python version support regression do not share the same root cause." }, { "left": "issue:43295", "right": "issue:44617", "accept": false, - "reason": "Processor/tokenizer regression in custom model code is unrelated to SAM3 video CUDA OOM." + "reason": "processor/tokenizer regression is unrelated to a CUDA OOM in SAM3 Video." }, { "left": "issue:42491", "right": "issue:43299", "accept": false, - "reason": "Both mention Qwen3 MoE, but one is a LoRA compatibility problem and the other is a model-loading regression; they are not clearly the same fix." + "reason": "Both mention Qwen3 MoE, but one is a LoRA compatibility break and the other is a VL loading regression; too different to merge." }, { "left": "issue:44336", "right": "issue:44373", "accept": false, - "reason": "ANSI terminal output and a wrong docstring for `position_ids` are unrelated." + "reason": "ANSI output in a utility and a wrong docstring are not duplicates." }, { "left": "issue:41950", "right": "issue:44220", "accept": false, - "reason": "A video-classification pipeline bug and `_torch_extract_fbank_features()` are different pipeline/code-path issues." + "reason": "video-classification using image processors is unrelated to _torch_extract_fbank_features()." }, { "left": "issue:43901", "right": "issue:44246", "accept": false, - "reason": "A pipeline docs mismatch is unrelated to intermittent import-time slowness." + "reason": "A docs/behavior mismatch is unrelated to intermittent import latency." }, { "left": "issue:44242", "right": "issue:45593", "accept": false, - "reason": "Missing load-balancing loss with `output_router_logits=False` is a different training-loss bug than D-FINE auxiliary-loss handling." + "reason": "Missing load-balancing loss and missing auxiliary losses are both loss-related, but for different models and flags." }, { "left": "issue:44295", "right": "issue:45397", "accept": false, - "reason": "Reading `position_ids` after buffer registration is unrelated to Gemma-4 zero3 loading." + "reason": "position_ids buffer access and gemma-4 zero3 loading are separate bugs." }, { "left": "issue:44462", "right": "issue:45356", "accept": false, - "reason": "Ignoring `tokenizer.json` is a tokenizer-loading bug, while 45356 is a codec/regex regression for Kimi-K2.5." + "reason": "AutoTokenizer ignoring tokenizer.json is not the same as the Kimi-K2.5 codec regression." }, { "left": "issue:44661", "right": "issue:45542", "accept": false, - "reason": "`TOKENIZER_MAPPING_NAMES` handling and undefined TensorFlow backend detection are unrelated." + "reason": "TOKENIZER_MAPPING_NAMES handling and missing TensorFlow backend setup are unrelated." }, { "left": "issue:45003", "right": "issue:45561", "accept": false, - "reason": "Unsafe `sys.modules` access in core modeling code is unrelated to the xdist captured-info race." + "reason": "Unsafe sys.modules access and an xdist captured_info race are different failure modes." }, { "left": "issue:43531", "right": "issue:45362", "accept": false, - "reason": "Qwen3-MoE sliding-window behavior and Qwen3.5 chat crashes are different model/runtime bugs." + "reason": "Qwen3-MoE sliding_window behavior and Qwen3.5 chat crashes are not the same defect." }, { "left": "issue:44246", "right": "issue:44496", "accept": false, - "reason": "Import slowness and unrecognized-model config loading are unrelated." + "reason": "Import slowness and unrecognized model config are unrelated." }, { "left": "issue:36246", "right": "issue:42757", "accept": false, - "reason": "Different missing import symbols (`Qwen2_5_VLImageProcessor` vs `is_offline_mode`) with no shared code-path shown." + "reason": "Qwen2.5-VL image processor import and huggingface_hub is_offline_mode import are different API export issues." }, { "left": "issue:36246", "right": "issue:44351", "accept": false, - "reason": "Missing `Qwen2_5_VLImageProcessor` and missing `HybridCache` are separate export/import failures." + "reason": "Different missing symbols from different modules; same symptom, not same bug." }, { "left": "issue:43673", "right": "issue:44561", "accept": false, - "reason": "Chunked-prefill cache handling and removal of `is_torch_fx_available` affect different runtime paths." + "reason": "Chunked prefill cache handling and removal of is_torch_fx_available are unrelated regressions." }, { "left": "issue:44336", "right": "issue:44485", "accept": false, - "reason": "ANSI code emission is unrelated to GLM-5 RoPE implementation." + "reason": "ANSI escaping in loading_report is unrelated to GLM-5 RoPE implementation discussion." }, { "left": "issue:43526", "right": "issue:45003", "accept": false, - "reason": "A BEiT image-processor label bug is unrelated to core modeling sys.modules access." + "reason": "BeitImageProcessorFast label reduction and sys.modules access are unrelated code paths." }, { "left": "issue:44162", "right": "issue:45200", "accept": false, - "reason": "ESM2 breakage and Gemma-4 `mm_token_type_ids` defaults are different model-specific regressions." + "reason": "ESM2 breakage and Gemma 4 mm_token_type_ids defaulting are separate model bugs." }, { "left": "issue:44242", "right": "issue:44857", "accept": false, - "reason": "Load-balancing loss omission is unrelated to LwDetrImageLoss AMP crashes." + "reason": "Missing router loss and LwDetrImageLoss AMP crash are not the same issue." } ] }, @@ -10174,12 +10238,12 @@ "issue:44877|issue:45375", "issue:44987|issue:45459", "issue:42886|issue:45229", - "issue:41628|issue:41950", "issue:44779|issue:45292", + "issue:41628|issue:41950", "issue:45292|issue:45584", "issue:43065|issue:43408", - "issue:43232|issue:44336", "issue:45292|issue:45701", + "issue:43232|issue:44336", "issue:43901|issue:45003", "issue:44995|issue:45200", "issue:43257|issue:45137", @@ -10187,28 +10251,28 @@ "issue:44496|issue:45003", "issue:43475|issue:43540", "issue:43653|issue:44162", + "issue:43262|issue:44246", "issue:44995|issue:45588", "issue:44368|issue:45563", - "issue:43262|issue:44246", "issue:44368|issue:44977", - "issue:43673|issue:44734", "issue:44466|issue:45335", + "issue:43673|issue:44734", "issue:45397|issue:45656" ], - "prepared_review_unit_hash": "fe0fdc116d6a004fdd9dc12508a956159014ee18625f52f0c6f1a6f1e8dc466a", + "prepared_review_unit_hash": "7d7774c6553aa55c4c8c3f0f09a10b23bdb9a3d289db3215e6d727d91230e469", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 28, - "serialized_chars": 36192, + "serialized_chars": 36189, "estimated_input_tokens": 9048, "estimated_eval_tokens": 18352 }, @@ -10217,180 +10281,180 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The cluster is mostly heterogeneous, with only loose thematic overlap around Transformers v5 regressions, tokenizers, and embedding/tied-weight behavior. I do not see any soft pair that is clearly the same underlying bug or change, so I rejected all candidate duplicates.", - "confidence": 0.39, - "canonical_issue_reason": "Issue 45292 is the clearest standalone bug report: it names a concrete, reproducible API regression in `resize_token_embeddings` and is phrased broadly enough to serve as a duplicate target for closely related embedding-resize reports.", + "summary": "This cluster does not look like a true duplicate set: the issues span many unrelated Transformers regressions (tokenizers, generation/cache, model configs, DeepSpeed, SAM3, Gemma, Qwen, etc.). I\u2019d treat them as separate bugs rather than one merged triage bucket.", + "confidence": 0.27, + "canonical_issue_reason": "If one issue must represent the group, issue #44987 is the strongest standalone report: it is open, concrete, user-facing, and clearly tied to a version-regression during model loading. However, the set is too diverse for a confident single canonical issue.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45292 is the best issue candidate in this set because it describes a specific code-path failure cleanly and is more reusable as a triage target than the more model-specific or symptom-heavy reports.", + "best_issue_reason": "Issue #44987 is the best overall triage candidate because it describes a reproducible failure on a specific package/version boundary and is still open for action. It is more concrete than the broader warning/config reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44246", "right": "issue:44871", "accept": false, - "reason": "Import performance and Gemma EOS configuration are unrelated bugs." + "reason": "Unrelated problems: import latency versus Gemma eos-token configuration mismatch. No shared code path or concrete bug." }, { "left": "issue:36246", "right": "issue:43596", "accept": false, - "reason": "Different failures: an import error for a vision image processor vs a DeepSpeed/Bert forward crash." + "reason": "Different failures in different subsystems: Qwen2.5-VL import error versus DeepSpeed ZeRO3/BertModel indexing error." }, { "left": "issue:44295", "right": "issue:44749", "accept": false, - "reason": "Registering `position_ids` as a buffer is unrelated to the reported tokenizer slowdown after upgrade." + "reason": "One is about position_ids buffer access, the other about tokenizer/filtering slowdown after upgrade. Not the same bug." }, { "left": "issue:43066", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer-related, but one is a wrong decoder-type warning and the other is incorrect Deepseek tokenization; not the same concrete bug." + "reason": "Both concern tokenizers in v5, but one is a wrong decoder-type warning and the other is incorrect DeepSeek tokenization results. Too broad to merge." }, { "left": "issue:43441", "right": "issue:43540", "accept": false, - "reason": "FlashAttention failure for Ministral-3 is unrelated to Qwen3OmniMoe video input processing." + "reason": "Different model families and failures: Ministral flash-attention issue versus Qwen3OmniMoe video input validation error." }, { "left": "issue:44877", "right": "issue:45375", "accept": false, - "reason": "Both involve config handling, but they describe different model fields and opposite failure modes, so they are not the same bug." + "reason": "Both involve config strictness, but they target different models and different missing fields; not the same underlying bug." }, { "left": "issue:44987", "right": "issue:45459", "accept": false, - "reason": "Model-loading failure for `physical-intelligence/fast` is unrelated to protobuf-masked tokenizer errors." - }, - { - "left": "issue:42886", - "right": "issue:45229", - "accept": false, - "reason": "Offline cache loading and Gemma4 CUDA OOM are unrelated." + "reason": "Different failure modes: model loading regression versus protobuf-related tokenizer error masking. No common code-path bug shown." }, { - "left": "issue:41628", - "right": "issue:41950", + "left": "issue:42886", + "right": "issue:45229", "accept": false, - "reason": "Missing `AutoImageProcessor` import and a video-classification pipeline misdetecting processors are different issues." + "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU OOM are unrelated." }, { "left": "issue:44779", "right": "issue:45292", "accept": false, - "reason": "Tokenizer correctness regression and embedding resize not updating output embeddings are different code paths." + "reason": "Tokenizer correctness regression versus resize_token_embeddings not updating output embeddings; separate issues." + }, + { + "left": "issue:41628", + "right": "issue:41950", + "accept": false, + "reason": "Importing AutoImageProcessor from transformers and a video-classification pipeline using image processors are related only at a broad API level, not the same concrete bug." }, { "left": "issue:45292", "right": "issue:45584", "accept": false, - "reason": "Embedding resize behavior is unrelated to Whisper empty-transcription generation failure." + "reason": "Embedding-resize behavior and Whisper empty-transcription generation failure are unrelated." }, { "left": "issue:43065", "right": "issue:43408", "accept": false, - "reason": "SAM3 dummy Conv2d implementation and SAM3 model-type warning are not the same bug." + "reason": "Both mention SAM3, but one is a dummy Conv2d implementation detail and the other is a model-type warning. Different bugs." }, { - "left": "issue:43232", - "right": "issue:44336", + "left": "issue:45292", + "right": "issue:45701", "accept": false, - "reason": "Generation kwargs/sync-gpus logic has nothing to do with ANSI code emission in loading reports." + "reason": "Resize-token-embedding behavior and version-dependent tokenization changes do not point to the same defect." }, { - "left": "issue:45292", - "right": "issue:45701", + "left": "issue:43232", + "right": "issue:44336", "accept": false, - "reason": "Embedding resizing and version-dependent tokenization changes are different regressions." + "reason": "Generation kwargs syncing and ANSI escape codes in loading_report are unrelated." }, { "left": "issue:43901", "right": "issue:45003", "accept": false, - "reason": "A docs mismatch for `return_all_scores` is unrelated to unsafe `sys.modules` access." + "reason": "Docs mismatch for text classification versus unsafe sys.modules access in modeling_utils. Different layers and different problems." }, { "left": "issue:44995", "right": "issue:45200", "accept": false, - "reason": "A second-forward cache crash in GlmMoeDsa is unrelated to Gemma4 token type defaults." + "reason": "GlmMoeDsa stale indexer cache on second forward pass is unrelated to Gemma4 text-only fine-tuning token-type defaults." }, { "left": "issue:43257", "right": "issue:45137", "accept": false, - "reason": "Both touch DeepSpeed, but one is MOE weight conversion and the other is a ZeRO3 deque error; not the same bug." + "reason": "Both involve DeepSpeed/ZeRO3, but one is Qwen3 MOE weight conversion and the other is a deque underflow error. Not the same bug." }, { "left": "issue:44060", "right": "issue:44077", "accept": false, - "reason": "Tied-weights warning in Qwen3-Next and `post_init` allowance in patchtsmixer are unrelated." + "reason": "Different model-family warnings: tied-weights warning versus a patchtsmixer post_init validation change." }, { "left": "issue:44496", "right": "issue:45003", "accept": false, - "reason": "Model config loading failure and unsafe `sys.modules` access are not the same underlying issue." + "reason": "Unrecognized model/config loading failure and unsafe sys.modules access are not the same underlying issue." }, { "left": "issue:43475", "right": "issue:43540", "accept": false, - "reason": "Missing `fpn_position_embeddings` in SAM3 output is unrelated to Qwen3OmniMoe video processing." + "reason": "SAM3 vision encoder output attribute error versus Qwen3OmniMoe video-input ValueError. Different code paths." }, { "left": "issue:43653", "right": "issue:44162", "accept": false, - "reason": "BigBirdTokenizer special-token registration and ESM2 breakage are different tokenizer/model problems." + "reason": "BigBirdTokenizer special-token registration issue versus ESM2 broken report. Not the same bug." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio chat-template sampling-rate default and import performance are unrelated." }, { "left": "issue:44995", "right": "issue:45588", "accept": false, - "reason": "Stale indexer cache crash and FlashAttention `s_aux=None` crash are different failures." + "reason": "Different failures in different code paths: stale cache on second forward pass versus flash-attention AttributeError on s_aux=None." }, { "left": "issue:44368", "right": "issue:45563", "accept": false, - "reason": "A tied-word-embeddings warning and a stale `num_return_sequences` warning are not the same bug." - }, - { - "left": "issue:43262", - "right": "issue:44246", - "accept": false, - "reason": "Audio chat-template sampling-rate defaults and import latency are unrelated." + "reason": "Both are warnings, but one is tied-word-embeddings config warnings and the other is stale num_return_sequences warning. Not the same defect." }, { "left": "issue:44368", "right": "issue:44977", "accept": false, - "reason": "A tied-embedding warning is not the same as a generation failure with FlashAttention." + "reason": "Qwen3.5 flash-attention generation failure is not the same as a tied-weights warning emitted during fine-tuning." }, { - "left": "issue:43673", - "right": "issue:44734", + "left": "issue:44466", + "right": "issue:45335", "accept": false, - "reason": "Generation cache missing during chunked prefill and a KV-cache indexing crash in transformers serve are different code paths." + "reason": "Both involve embedding weights, but one is serialization inconsistency and the other is resize_token_embeddings not propagating to decoder.embed_tokens. Separate bugs." }, { - "left": "issue:44466", - "right": "issue:45335", + "left": "issue:43673", + "right": "issue:44734", "accept": false, - "reason": "Both concern weights/embeddings, but one is serialization inconsistency and the other is resize propagation; they are not clearly the same bug." + "reason": "Generation cache issues in different contexts (chunked_prefill vs transformers serve KV continuation) without evidence of the same code-path bug." }, { "left": "issue:45397", "right": "issue:45656", "accept": false, - "reason": "Gemma4 Zero3 loading and duplicate optimizer stepping under DeepSpeed are unrelated." + "reason": "Gemma4 zero3 from_pretrained and optimizer step being called twice with deepspeed are distinct training/runtime issues." } ] }, @@ -10466,8 +10530,8 @@ "soft_pairs": [ "issue:44933|issue:45459", "issue:44869|issue:45356", - "issue:43931|issue:44368", "issue:45137|issue:45600", + "issue:43931|issue:44368", "issue:43232|issue:43673", "issue:43673|issue:45278", "issue:44297|issue:44623", @@ -10496,20 +10560,20 @@ "issue:43232|issue:44188", "issue:42222|issue:45200" ], - "prepared_review_unit_hash": "52199f130e253451c8936cfbb0c44da0f5db32a34afedf23058c0eb30f7158e2", + "prepared_review_unit_hash": "7ab605b108461052637d9c48a6b6e88052730bbf6b4b698ff8b4ea919d34a1d9", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 36717, + "serialized_chars": 36720, "estimated_input_tokens": 9180, "estimated_eval_tokens": 18616 }, @@ -10518,208 +10582,205 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly heterogeneous Transformers breakages that share broad upgrade/import/model-loading themes, not one single bug. The clear duplicate pair in the soft-edge list is the repeated Qwen3ForCausalLM VRAM leak report (42673/43704), and the packet also contains another exact-title duplicate pair, 45310/45478, for the Qwen3.5 MoE from_pretrained error. Outside those, the issues are mostly only loosely related by subsystem or model family.", - "confidence": 0.88, - "canonical_issue_reason": "issue:45278 is still the best umbrella issue for the cluster because it is open and broadly representative of the upgrade/import-regression theme, but it should be treated as an umbrella, not as evidence that the whole set is duplicate-heavy.", + "summary": "This cluster is a mix of related-but-distinct Transformers v5 regressions and model-specific breakages (imports, loading, generation, tokenizer, quantization, and save/load issues). I don\u2019t see any safe duplicate merges, and there are no PRs to evaluate.", + "confidence": 0.74, + "canonical_issue_reason": "issue:45278 is the best umbrella representative: it is broad, recent, open, and explicitly covers the v5 upgrade/import-regression theme that many of the other reports orbit around.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the strongest global issue candidate because it is open and broad enough to anchor triage across several upgrade-related reports, while the more specific items are better treated as separate bugs or duplicate subclusters.", + "best_issue_reason": "issue:45278 is the most suitable cluster representative because it captures the widest user-facing symptom set without overfitting to one model family or one narrow code path.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44933", "right": "issue:45459", "accept": false, - "reason": "Both touch tokenizer/import failures, but the concrete paths differ: image_utils import vs. protobuf-related tokenizer error masking." + "reason": "Both involve import-related failures, but one is a missing import from image_utils while the other is tokenizer error masking when protobuf is absent; different bug paths." }, { "left": "issue:44869", "right": "issue:45356", "accept": false, - "reason": "Different model areas and symptoms: Whisper timestamp decoding crash vs. Kimi tokenizer codec/regression handling." + "reason": "Both are tokenizer-adjacent, but Whisper timestamp decoding crash and Kimi-K2.5 codec/fix_mistral_regex regression are separate code paths and symptoms." }, { - "left": "issue:43931", - "right": "issue:44368", + "left": "issue:45137", + "right": "issue:45600", "accept": false, - "reason": "Loading-time shape mismatch for a VL model and a tie_word_embeddings warning are not the same bug." + "reason": "DeepSpeed ZeRO3 deque underflow and auto_mappings CI breakage are unrelated failures; same general release period only." }, { - "left": "issue:45137", - "right": "issue:45600", + "left": "issue:43931", + "right": "issue:44368", "accept": false, - "reason": "DeepSpeed ZeRO3 deque error and stale auto_mappings config references are distinct code-path failures." + "reason": "Qwen3-VL weight-shape mismatch is a model-loading error, while the other is a warning about tie_word_embeddings during LoRA fine-tuning." }, { "left": "issue:43232", "right": "issue:43673", "accept": false, - "reason": "Both are generation-related, but one is a sync_gpus kwargs update issue and the other is a missing cache path during chunked prefill." + "reason": "These both mention generation internals, but sync_gpus state handling and missing GenerationMixin cache in chunked prefill are different bugs." }, { "left": "issue:43673", "right": "issue:45278", "accept": false, - "reason": "A specific generation cache bug is not the same concrete bug as a broad report about many import errors after upgrading." + "reason": "Missing generation cache in chunked prefill is not the same as broad import errors after upgrading Transformers." }, { "left": "issue:44297", "right": "issue:44623", "accept": false, - "reason": "Tokenizer metadata saving and processor file saving are different save_pretrained failure modes." + "reason": "Tokenizer.save_pretrained metadata mismatch and processor.save_pretrained missing files are both save-related, but they affect different serialization layers." }, { "left": "issue:43646", "right": "issue:44162", "accept": false, - "reason": "trust_remote_code field loss and ESM2 being broken are unrelated bugs." + "reason": "Custom model initialization breakage under v5 and ESM2 being broken are both migration regressions, but not the same underlying failure." }, { "left": "issue:44361", "right": "issue:44749", "accept": false, - "reason": "Tokenizer AttributeError on tasks and a performance regression after upgrading are not the same underlying issue." + "reason": "MLukeTokenizer AttributeError and Chinese slowdown after upgrading to 5.3.0 are unrelated; one is a hard error, the other is performance regression." }, { "left": "issue:44623", "right": "issue:45656", "accept": false, - "reason": "Processor save_pretrained file loss and deepspeed optimizer double-step are unrelated." + "reason": "processor.save_pretrained missing files and deepspeed optimizer step called twice are unrelated subsystems and symptoms." }, { "left": "issue:28282", "right": "issue:30990", "accept": false, - "reason": "Missing PyTorch ImportError and Sentence Transformers getting stuck loading are different problems." + "reason": "AutoModel missing PyTorch is an environment/import error, while Sentence Transformers getting stuck loading is a runtime hang; not the same bug." }, { "left": "issue:44749", "right": "issue:45310", "accept": false, - "reason": "A data filtering slowdown and a Qwen3.5 MoE from_pretrained failure are not the same bug." + "reason": "General slowdown after upgrade and Qwen3.5 Moe from_pretrained error are different regressions with different observable failures." }, { "left": "issue:42994", "right": "issue:43482", "accept": false, - "reason": "Quantized model saving failure and Qwen2.5-GGUF loading failure are separate serialization/loading issues." + "reason": "Quantized model saving failure and Qwen2.5-GGUF loading failure are opposite directions of the save/load pipeline and do not share a concrete bug." }, { "left": "issue:42371", "right": "issue:45092", "accept": false, - "reason": "TF32 API guidance and InternVL2 meta-initialization incompatibility are unrelated." + "reason": "TF32 settings warning and old InternVL2 remote-code/meta-init incompatibility are unrelated." }, { "left": "issue:44038", "right": "issue:44623", "accept": false, - "reason": "Qwen3-VL-Moe/transformers-v5 incompatibility and processor.save_pretrained missing files are different failures." + "reason": "Qwen3-VL-Moe v5 bug and missing processor save files touch different model/component paths; not a single fixable defect." }, { "left": "issue:44749", "right": "issue:45656", "accept": false, - "reason": "Upgrade-related slowdown and deepspeed optimizer step duplication are different failure modes." + "reason": "Performance slowdown after upgrade and optimizer double-step under deepspeed are distinct training-loop issues." }, { "left": "issue:42673", "right": "issue:43704", - "accept": true, - "reason": "Exact same title and same symptom: Qwen3ForCausalLM VRAM leak in multiple dataloader threads, so these are the same underlying bug." + "accept": false, + "reason": "Same title text, but the later report is a separate instance and the evidence here does not show a single shared root cause or fixable code path." }, { "left": "issue:43475", "right": "issue:43643", "accept": false, - "reason": "A missing attribute on Sam3VisionEncoderOutput is not the same as missing fields in AutoConfig under trust_remote_code." + "reason": "SAM 3 video missing fpn_position_embeddings and trust_remote_code AutoConfig missing fields are both attribute problems, but in different models and initialization flows." }, { "left": "issue:45230", "right": "issue:45397", "accept": false, - "reason": "Generic bug report vs. a specific Gemma-4 ZeRO3 from_pretrained issue; no shared concrete code path." + "reason": "Generic bug report and gemma-4 ZeRO3 from_pretrained failure do not describe the same underlying issue." }, { "left": "issue:45375", "right": "issue:45478", "accept": false, - "reason": "Both involve Qwen3.5 MoE/config loading, but one is a specific missing-field report and the other is a broader from_pretrained error; not enough evidence they are the same bug." + "reason": "Qwen3_5MoeVisionConfig missing a config field and Qwen3.5 Moe from_pretrained error overlap on model family, but the reported failures and fix scope differ." }, { "left": "issue:43475", "right": "issue:45381", "accept": false, - "reason": "SAM 3 video missing attribute and qwen2.5-vl video position-id regression are different model bugs." + "reason": "SAM 3 video output attribute error and qwen2.5-vl vision_position_ids regression are unrelated model-specific bugs." }, { "left": "issue:41628", "right": "issue:44933", "accept": false, - "reason": "Two import errors, but from different modules and different root causes; too broad to merge." + "reason": "AutoImageProcessor import failure and image_utils nonexistent import are both import issues, but they concern different symbols and likely different breakages." }, { "left": "issue:45310", "right": "issue:45375", "accept": false, - "reason": "The former is a model-loading error report; the latter is a specific config-field omission. They may be related, but not clearly the same concrete bug." + "reason": "Both involve Qwen3.5/Moe, but one is a from_pretrained error under transformers>=5.4.0 and the other is a missing config field silently dropped by @strict." }, { "left": "issue:45081", "right": "issue:45103", "accept": false, - "reason": "Tokenizer regex patch crash and auto-docstring annotation handling crash are unrelated." + "reason": "Mistral regex patch crash and auto_docstring annotation handling crash are unrelated utility regressions." }, { "left": "issue:42994", "right": "issue:43525", "accept": false, - "reason": "Quantized save_pretrained failure and Llama4Config missing pad_token_id are distinct issues." + "reason": "Quantized model saving failure and Llama4Config missing pad_token_id are different parts of the model lifecycle." }, { "left": "issue:43475", "right": "issue:44661", "accept": false, - "reason": "SAM 3 output attribute failure and add-new-model-like tokenizer mapping failure are different code paths." + "reason": "SAM 3 video output attribute error and add-new-model-like failure in TOKENIZER_MAPPING_NAMES are unrelated model-registration issues." }, { "left": "issue:45362", "right": "issue:45375", "accept": false, - "reason": "Qwen3.5-35B chat crash and a Qwen3_5MoeVisionConfig field being dropped are not the same bug." + "reason": "Qwen3.5-35B chat crash and missing deepstack_visual_indexes are both Qwen-family issues, but not the same bug or code path." }, { "left": "issue:42831", "right": "issue:44222", "accept": false, - "reason": "FineGrainedFP8 accuracy issues and FP8 save_pretrained issues are related only at a high level, not the same bug." + "reason": "FineGrainedFP8 accuracy regression and FP8 save_pretrained for moe are both FP8-related, but one is numerical behavior and the other is serialization." }, { "left": "issue:43873", "right": "issue:44945", "accept": false, - "reason": "Quantization offloading and incorrect output under pipeline parallelism are different problems." + "reason": "Quantization offloading and incorrect output with pipeline parallelism are different distributed-inference problems." }, { "left": "issue:43232", "right": "issue:44188", "accept": false, - "reason": "Generation kwargs sync bug and diverging attention kernels under torch.compile are unrelated." + "reason": "Generation kwargs update after sync_gpus and attention-kernel divergence under torch.compile are different generation/compile bugs." }, { "left": "issue:42222", "right": "issue:45200", "accept": false, - "reason": "VitPose model breakage and Gemma 4 mm_token_type_ids defaults are different model-specific issues." + "reason": "SAM 3 vitpose breakage and Gemma 4 mm_token_type_ids defaulting are unrelated model-specific issues." } ] }, - "evaluator_result": { - "accept": false, - "feedback": "The summary is not fully grounded in the packet: it says the only clear duplicate pair is the Qwen3ForCausalLM VRAM-leak report, but the packet also contains two issues with the exact same title, `issue:45310` and `issue:45478` (`[BUG] transformers>=5.4.0, Qwen3.5 Moe from_pretrained error`). That omission makes the duplicate/umbrella assessment too strong. Several soft-edge false verdicts are conservative, but the high-level summary overstates the uniqueness of the VRAM-leak pair." - }, - "evaluator_used": true, - "retried": true, - "accepted_nontrivial_soft_edge": true, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -10791,11 +10852,11 @@ "issue:42994|issue:44164", "issue:44038|issue:45310", "issue:43526|issue:45561", - "issue:43673|issue:44188", - "issue:44496|issue:45561", "issue:36010|issue:41628", - "issue:43504|issue:45561", + "issue:44496|issue:45561", + "issue:43673|issue:44188", "issue:43377|issue:44242", + "issue:43504|issue:45561", "issue:43232|issue:44792", "issue:42898|issue:44568", "issue:30990|issue:35141", @@ -10808,8 +10869,8 @@ "issue:44623|issue:45310", "issue:43784|issue:43824", "issue:43723|issue:44568", - "issue:44060|issue:45125", "issue:43278|issue:45137", + "issue:44060|issue:45125", "issue:42898|issue:45356", "issue:44734|issue:45103", "issue:43874|issue:45092", @@ -10818,202 +10879,214 @@ "issue:42898|issue:44448", "issue:41628|issue:45459" ], - "prepared_review_unit_hash": "78fc8464d3e7c20c947f8558813743b43c6421db49c7e89129c3d34454a4986b", + "prepared_review_unit_hash": "67c65a1377f5048a4986df966a930298283a49ea9145a92b5e04ba330b544ecf", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 31, - "serialized_chars": 36970, - "estimated_input_tokens": 9243, - "estimated_eval_tokens": 18742 + "serialized_chars": 36977, + "estimated_input_tokens": 9245, + "estimated_eval_tokens": 18746 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These are mostly separate Transformers regression reports spanning import/export issues, tokenizer behavior changes, model-specific load/save bugs, and generation/runtime edge cases. The soft pairs share broad themes, but none look like the same concrete underlying defect.", - "confidence": 0.86, - "canonical_issue_reason": null, + "summary": "This cluster is mostly a loose similarity bucket, not a true duplicate set: the issues span unrelated import errors, tokenizer regressions, model-loading bugs, training/runtime failures, and test infra problems. I found no soft pair that looks like the same underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "issue:42898 is the broadest and most central of the v5 tokenizer-behavior regression reports, so it is the best umbrella representative for this mostly tokenizer/regression-themed subset.", "canonical_pr_reason": null, - "best_issue_reason": null, + "best_issue_reason": "issue:42898 is the most representative issue in the group by scope and recurrence of nearby v5-tokenization similarities, even though it is not a duplicate of the other reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41628", "right": "issue:44351", "accept": false, - "reason": "Both are import failures, but they concern different missing symbols (`AutoImageProcessor` vs `HybridCache`) and likely different export paths." + "reason": "Both are import errors, but for different missing symbols (`AutoImageProcessor` vs `HybridCache`) and different failure modes." }, { "left": "issue:44222", "right": "issue:45310", "accept": false, - "reason": "One is FP8 `save_pretrained` for MoE models, the other is a `from_pretrained` load error for Qwen3.5 MoE; different phases and failure modes." + "reason": "Both involve Qwen3.5/MoE serialization/loading, but one is FP8 `save_pretrained` and the other is a `from_pretrained` load error; not the same bug." }, { "left": "issue:42994", "right": "issue:44164", "accept": false, - "reason": "Both involve saving/loading, but one is quantized model saving and the other is `extra_state` handling in save/from_pretrained plumbing." + "reason": "Both touch save/load paths, but one is quantized model saving and the other is `extra_state` handling; different concrete issues." }, { "left": "issue:44038", "right": "issue:45310", "accept": false, - "reason": "Both mention Qwen/MoE, but the model families and concrete breakage differ (`Qwen3-VL-Moe` vs `Qwen3.5 Moe` from_pretrained error)." + "reason": "Same broad model family, but different versions and different failure surfaces (`Qwen3-VL-Moe` vs `Qwen3.5 Moe` load error)." }, { "left": "issue:43526", "right": "issue:45561", "accept": false, - "reason": "These are unrelated: one is a BEiT label reduction bug, the other is a pytest-xdist race in captured test output." + "reason": "Completely unrelated: BEiT label reduction bug versus a pytest-xdist captured-file race." }, { - "left": "issue:43673", - "right": "issue:44188", + "left": "issue:36010", + "right": "issue:41628", "accept": false, - "reason": "Both touch generation/runtime behavior, but one is missing cache support in chunked prefill while the other is attention-kernel divergence under `torch.compile`." + "reason": "Both are import failures, but they concern different API symbols and separate regressions." }, { "left": "issue:44496", "right": "issue:45561", "accept": false, - "reason": "An unrecognized model config error is unrelated to a test-worker race on `captured_info.txt`." + "reason": "Unrecognized model/config loading has nothing to do with a pytest-xdist file race." }, { - "left": "issue:36010", - "right": "issue:41628", + "left": "issue:43673", + "right": "issue:44188", "accept": false, - "reason": "Both are import errors, but they are for different APIs and modules, so they are not the same bug." + "reason": "Both are generation/runtime bugs, but one is missing generation cache in chunked prefill and the other is divergent attention kernels under `torch.compile`." }, { - "left": "issue:43504", - "right": "issue:45561", + "left": "issue:43377", + "right": "issue:44242", "accept": false, - "reason": "A legacy-field load failure in BEiT segmentation is unrelated to the xdist file race." + "reason": "Different model subsystems and different bugs: padding-mask correctness in MIMI vs missing load-balancing loss gating." }, { - "left": "issue:43377", - "right": "issue:44242", + "left": "issue:43504", + "right": "issue:45561", "accept": false, - "reason": "Different model behaviors: missing padding-mask support in MIMI encoder vs gating/load-balancing loss omission when router logits are off." + "reason": "Model preset loading with a legacy field is unrelated to the test-worker file race." }, { "left": "issue:43232", "right": "issue:44792", "accept": false, - "reason": "One is a generation kwargs update bug after `sync_gpus`; the other is a Janus image-generation test failure." + "reason": "Different code paths: generation-kwargs update after `sync_gpus` versus a Janus image-generation test failure." }, { "left": "issue:42898", "right": "issue:44568", "accept": false, - "reason": "Both are tokenizer regressions in v5, but one is `clean_up_tokenization_spaces` behavior and the other is `add_special_tokens` not inserting BOS/EOS." + "reason": "Both are tokenizer regressions in v5, but they are different concrete bugs: cleanup-space behavior versus missing BOS/EOS insertion for a specific tokenizer." }, { "left": "issue:30990", "right": "issue:35141", "accept": false, - "reason": "A Sentence Transformers loading hang and a token-embedding reinitialization bug are unrelated code paths." + "reason": "Unrelated issues: sentence-transformers loading hang versus token-embedding reinitialization after resize." }, { "left": "issue:43479", "right": "issue:44079", "accept": false, - "reason": "Both mention `None` handling, but one is multimodal config initialization and the other is `ModelOutput` key assignment logic." + "reason": "One is multimodal config initialization from `None`; the other is `ModelOutput` key assignment when values were previously `None`." }, { "left": "issue:43479", "right": "issue:44869", "accept": false, - "reason": "Different subsystems: multimodal config defaults vs Whisper word-timestamp decoding crash." + "reason": "Different model areas and symptoms: multimodal config defaults vs Whisper timestamp decode crash." }, { "left": "issue:43976", "right": "issue:44933", "accept": false, - "reason": "A Python-version compatibility issue is unrelated to a missing `image_utils` import." + "reason": "Python version compatibility and a missing image_utils import are unrelated bugs." }, { "left": "issue:42175", "right": "issue:44933", "accept": false, - "reason": "Package extras/backend composition and a missing import are distinct problems." + "reason": "Backend dependency installation does not match a nonexistent import from `image_utils`." }, { "left": "issue:44964", "right": "issue:45137", "accept": false, - "reason": "Loading a specific Phi-4 multimodal checkpoint and a DeepSpeed ZeRO3 deque underflow are unrelated." + "reason": "Phi-4 multimodal loading failure and DeepSpeed ZeRO3 deque underflow are unrelated." }, { "left": "issue:44188", "right": "issue:44373", "accept": false, - "reason": "A runtime attention-kernel branching bug is not the same as a docstring mismatch." + "reason": "A torch.compile attention-kernel divergence bug is unrelated to a docstring issue." }, { "left": "issue:44623", "right": "issue:45310", "accept": false, - "reason": "Processor `save_pretrained` missing files and Qwen3.5 MoE `from_pretrained` failure are different serialization directions and targets." + "reason": "Missing files in `processor.save_pretrained` is a different save-path problem than a Qwen3.5 MoE load error." }, { "left": "issue:43784", "right": "issue:43824", "accept": false, - "reason": "Both are import errors, but one is `nn` missing during sentence-transformers import and the other is a missing Qwen2.5-VL class export." + "reason": "Both are import errors, but for different missing names in different packages (`nn` vs `Qwen2_5_VLForConditionalGeneration`)." }, { "left": "issue:43723", "right": "issue:44568", "accept": false, - "reason": "Both involve tokenizers, but one is tokenizer loading via `AutoTokenizer.from_pretrained` and the other is special-token insertion semantics." + "reason": "Generic tokenizer loading failure and a specific BOS/EOS tokenization regression are not the same bug." }, { - "left": "issue:44060", - "right": "issue:45125", + "left": "issue:43278", + "right": "issue:45137", "accept": false, - "reason": "Both are Qwen3-era model issues, but one is an incorrect tied-weights warning and the other is a missing `_tp_plan` for tensor parallelism." + "reason": "Embedding dtype drift during evaluation is unrelated to a ZeRO3 deque error." }, { - "left": "issue:43278", - "right": "issue:45137", + "left": "issue:44060", + "right": "issue:45125", "accept": false, - "reason": "An embedding dtype drift between train/eval is unrelated to a ZeRO3 deque pop error." + "reason": "Incorrect tied-weights warning logic and missing tensor-parallel plan metadata are different model implementation bugs." }, { "left": "issue:42898", "right": "issue:45356", "accept": false, - "reason": "Different tokenizer regressions: cleanup-space behavior versus Kimi codec/regex handling." + "reason": "Both are tokenizer regressions in nearby versions, but one concerns cleanup-space behavior and the other codec handling/warning behavior in a specific tokenizer." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Different layers entirely: KV-cache continuation crash versus an auto-docstring AttributeError with future annotations." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "Both are multimodal/model-init issues, but they affect different models and different failure mechanisms." }, { "left": "issue:30333", "right": "issue:33290", "accept": false, - "reason": "MLFlow reporting on failed training jobs and a deepspeed OOM are unrelated." + "reason": "MLflow reporting after training failure and DeepSpeed OOM are distinct training-time problems." }, { "left": "issue:42898", "right": "issue:44448", "accept": false, - "reason": "One is a tokenizer cleanup option change, the other is different model outputs for Pegasus between v4 and v5." + "reason": "Both relate to v5 output regressions, but Pegasus v4/v5 output mismatch is not the same concrete tokenizer cleanup bug." }, { "left": "issue:41628", "right": "issue:45459", "accept": false, - "reason": "A missing exported class and tokenizer error masking via protobuf exception handling are different import/error-handling bugs." + "reason": "Different import-related failures: missing top-level export versus exception masking when protobuf is absent." } ] }, @@ -11102,8 +11175,8 @@ "issue:44297|issue:45310", "issue:44222|issue:44484", "issue:30333|issue:36331", - "issue:44263|issue:45092", "issue:43723|issue:44977", + "issue:44263|issue:45092", "issue:44483|issue:45406", "issue:30333|issue:34689", "issue:28282|issue:30333", @@ -11111,8 +11184,8 @@ "issue:44295|issue:44623", "issue:44373|issue:44485", "issue:44261|issue:45092", - "issue:43723|issue:44871", "issue:44514|issue:45103", + "issue:43723|issue:44871", "issue:43723|issue:44991", "issue:45092|issue:45357", "issue:43723|issue:44442", @@ -11126,256 +11199,262 @@ "issue:42757|issue:45278", "issue:43756|issue:44060" ], - "prepared_review_unit_hash": "42d2da78a2984a97224d7c2df82c299fe0dfebdea278f4dcf5524e6672417374", + "prepared_review_unit_hash": "aa86882df512c92b1b789055e5a65928337238463578b94675d3f7928ffec2f0", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 38, - "serialized_chars": 38851, - "estimated_input_tokens": 9713, - "estimated_eval_tokens": 19682 + "serialized_chars": 38848, + "estimated_input_tokens": 9712, + "estimated_eval_tokens": 19680 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is highly heterogeneous and mostly contains separate bug reports around imports, tokenizers, config serialization, and model-specific regressions. No soft-edge pair looks like the same underlying bug/change, so all candidate links should be rejected.", + "summary": "This cluster is heterogeneous: most items are separate Transformers regressions, import errors, tokenizer/load/save issues, and a few unrelated model-specific bugs. None of the soft edges look like true duplicate pairs; no PRs are present.", "confidence": 0.84, - "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set: it describes widespread import errors after a Transformers version upgrade, which is the closest fit to the cluster\u2019s repeated import/load-regression theme.", + "canonical_issue_reason": "Issue 43723 is the broadest, most central report in the set: a concrete v5 tokenizer-loading regression that best represents the recurring tokenizer/load failure theme.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the best representative issue because it is the most general, user-facing, and cluster-like report; it can plausibly anchor the broader import/error regressions in this set.", + "best_issue_reason": "43723 is the most useful representative issue because it has a clear repro, a specific regression description, and sits closest to the few other tokenizer-related reports without being too narrow.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44295", "right": "issue:45656", "accept": false, - "reason": "Different bugs: position_ids buffer access vs optimizer step being called twice under DeepSpeed." + "reason": "Unrelated bugs: buffer read/access error vs optimizer stepping twice under deepspeed." }, { "left": "issue:42898", "right": "issue:43295", "accept": false, - "reason": "Tokenizer cleanup behavior change is unrelated to processor.tokenizer/image-passing regression." + "reason": "Different regression surfaces: tokenization cleanup behavior vs processor.tokenizer/image handling." }, { "left": "issue:44222", "right": "issue:44297", "accept": false, - "reason": "Both involve save_pretrained, but one is FP8 MoE saving and the other is tokenizer metadata mismatch." + "reason": "FP8 save_pretrained/moe issue is unrelated to tokenizer_class metadata mismatch on save." }, { "left": "issue:44297", "right": "issue:45656", "accept": false, - "reason": "Tokenizer save metadata bug is unrelated to DeepSpeed optimizer-step duplication." + "reason": "Tokenizer config mismatch and deepspeed optimizer behavior are unrelated." }, { "left": "issue:43479", "right": "issue:43644", "accept": false, - "reason": "Multimodal config defaults and non-persistent buffer junk are different initialization/storage problems." + "reason": "Different initialization bugs: multimodal config defaults vs non-persistent buffer filling." }, { "left": "issue:44442", "right": "issue:45459", "accept": false, - "reason": "AutoTokenizer failing on a specific tokenizer is different from protobuf exception masking during tokenizer decode." + "reason": "Tokenizer class loading failure and protobuf exception masking have different root causes." }, { "left": "issue:42831", "right": "issue:44623", "accept": false, - "reason": "FineGrainedFP8 accuracy and processor.save_pretrained missing files are unrelated." + "reason": "FP8 accuracy regression is unrelated to processor.save_pretrained missing files." }, { "left": "issue:44222", "right": "issue:44623", "accept": false, - "reason": "Both touch saving, but FP8 MoE save failure is not the same as missing processor files." + "reason": "Both involve saving, but one is FP8/moe model saving and the other is processor file persistence." }, { "left": "issue:44206", "right": "issue:45103", "accept": false, - "reason": "Unsupported feature argument in LasrFeatureExtractor is unrelated to auto-docstring annotation parsing." + "reason": "Feature extractor crash from unsupported arg is unrelated to auto_docstring annotations failure." }, { "left": "issue:41628", "right": "issue:42757", "accept": false, - "reason": "Both are import errors, but they are for different symbols and different packages." + "reason": "Both are import errors, but they are for different missing symbols with different fixes." }, { "left": "issue:22355", "right": "issue:28282", "accept": false, - "reason": "Missing transformers.onnx module and missing PyTorch are separate dependency/import failures." + "reason": "Missing transformers.onnx and missing PyTorch are separate environment/import problems." }, { "left": "issue:44263", "right": "issue:44360", "accept": false, - "reason": "Same broad DSA area, but torch.split return handling is not the same as a missing ReLU in the indexer." + "reason": "Same subsystem, but different DSA indexer bugs with different concrete failures." }, { "left": "issue:44297", "right": "issue:45310", "accept": false, - "reason": "Tokenizer class metadata mismatch is unrelated to Qwen3.5 from_pretrained loading error." + "reason": "Tokenizer_class mismatch on save is not the same bug as Qwen3.5-Moe from_pretrained failure." }, { "left": "issue:44222", "right": "issue:44484", "accept": false, - "reason": "FP8 MoE save failure is not the same issue as max_shard_size default behavior." + "reason": "FP8/moe save failure and max_shard_size question are unrelated save_pretrained topics." }, { "left": "issue:30333", "right": "issue:36331", "accept": false, - "reason": "MLflow reporting on failed jobs is unrelated to the trainer compute_loss signature change." + "reason": "MLflow reporting and trainer loss signature errors are unrelated training issues." }, { - "left": "issue:44263", - "right": "issue:45092", + "left": "issue:43723", + "right": "issue:44977", "accept": false, - "reason": "DSA indexer split semantics and remote-code/meta-init incompatibility are different code paths." + "reason": "Generic tokenizer-load regression and flash-attention generation failure are different code paths." }, { - "left": "issue:43723", - "right": "issue:44977", + "left": "issue:44263", + "right": "issue:45092", "accept": false, - "reason": "Tokenizer loading regression and flash-attention generation failure are different symptoms and fixes." + "reason": "DSA indexer torch.split behavior is unrelated to InternVL2 meta-init incompatibility." }, { "left": "issue:44483", "right": "issue:45406", "accept": false, - "reason": "Chat-completions request rejection is unrelated to Gemma4Processor missing _tokenizer." + "reason": "Chat/completions request rejection and Gemma4Processor _tokenizer attribute error are different serving bugs." }, { "left": "issue:30333", "right": "issue:34689", "accept": false, - "reason": "MLflow job reporting and Llama model loading regression are unrelated." + "reason": "MLFlow reporting and Llama 3.2 vision model-loading regression are unrelated." }, { "left": "issue:28282", "right": "issue:30333", "accept": false, - "reason": "Missing PyTorch import error is unrelated to MLflow not reporting failed training jobs." + "reason": "AutoModel missing PyTorch and MLFlow job status reporting are unrelated." }, { "left": "issue:30333", "right": "issue:35141", "accept": false, - "reason": "Training-job reporting and embedding reinitialization after resize are unrelated bugs." + "reason": "Training-job reporting and embedding reinitialization bug are unrelated." }, { "left": "issue:44295", "right": "issue:44623", "accept": false, - "reason": "Position_ids buffer read error is unrelated to processor save_pretrained file omission." + "reason": "Reading position_ids from a buffer and processor save_pretrained missing files are unrelated." }, { "left": "issue:44373", "right": "issue:44485", "accept": false, - "reason": "Wrong docstring for position_ids is documentation-only and not the same as GLM-5 RoPE behavior." + "reason": "Docstring issue for position_ids is unrelated to GLM-5 RoPE implementation." }, { "left": "issue:44261", "right": "issue:45092", "accept": false, - "reason": "MLA layernorm epsilon precision issue is unrelated to InternVL2 meta-initialization incompatibility." + "reason": "Layernorm eps precision issue is unrelated to old InternVL2 meta-init compatibility." }, { - "left": "issue:43723", - "right": "issue:44871", + "left": "issue:44514", + "right": "issue:45103", "accept": false, - "reason": "Tokenizer loading regression and eos_token_id configuration mismatch are different model/tokenizer issues." + "reason": "Batched apply_chat_template crash is unrelated to auto_docstring future-annotations crash." }, { - "left": "issue:44514", - "right": "issue:45103", + "left": "issue:43723", + "right": "issue:44871", "accept": false, - "reason": "Batched chat-template crash is unrelated to annotation-handling in auto-docstring." + "reason": "Both are transformer loading/save-adjacent, but one is tokenizer loading and the other is eos_token_id configuration." }, { "left": "issue:43723", "right": "issue:44991", "accept": false, - "reason": "Both are tokenizer-load failures, but they concern different models and likely different failure modes." + "reason": "Both mention tokenizer loading, but they are different model-specific failures and not the same bug." }, { "left": "issue:45092", "right": "issue:45357", "accept": false, - "reason": "Remote-code meta-init incompatibility is different from incorrect visual-encoder keys on save_pretrained." + "reason": "Different Qwen/InternVL regressions with different failure modes and code paths." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Generic tokenizer-load regression and FastSpeech2ConformerTokenizer failure are different concrete issues." }, { "left": "issue:43479", "right": "issue:44913", "accept": false, - "reason": "Config defaults for multimodal models and GPTNeoX rotary_pct reload behavior are separate config bugs." + "reason": "Multimodal config defaulting and GPTNeoX rotary_pct reload are unrelated config bugs." }, { "left": "issue:30990", "right": "issue:38175", "accept": false, - "reason": "SentenceTransformers loading stall and SigLIP2 zero probabilities are unrelated model-specific problems." + "reason": "Sentence Transformers loading hang and SigLIP2 zero-probability issue are unrelated." }, { "left": "issue:42886", "right": "issue:42994", "accept": false, - "reason": "Offline tokenizer cache loading and quantized model saving are different subsystems." + "reason": "Offline tokenizer cache loading and quantized model saving are different failure modes." }, { "left": "issue:41628", "right": "issue:43824", "accept": false, - "reason": "Both are import errors, but they concern different classes and likely different missing exports." + "reason": "Different missing imports for different models; not the same underlying export bug." }, { "left": "issue:42831", "right": "issue:45310", "accept": false, - "reason": "FineGrainedFP8 accuracy issue is unrelated to Qwen3.5 MoE from_pretrained errors." + "reason": "FineGrainedFP8 accuracy and Qwen3.5-Moe from_pretrained error are unrelated." }, { "left": "issue:42831", "right": "issue:44038", "accept": false, - "reason": "FP8 accuracy problems and Qwen3-VL-Moe loading bugs are not the same underlying issue." + "reason": "FP8 accuracy regression and Qwen3-VL-Moe bug are distinct issues." }, { "left": "issue:44263", "right": "issue:44373", "accept": false, - "reason": "A torch.split return-value bug is unrelated to a wrong docstring for position_ids." + "reason": "A DSA return-value bug and a docstring bug are not duplicates." }, { "left": "issue:42757", "right": "issue:45278", "accept": false, - "reason": "Missing is_offline_mode from huggingface_hub is a specific dependency mismatch, not the same as broad import errors after upgrading Transformers." + "reason": "Missing is_offline_mode is one import error; the other is a broad upgrade report with many import failures." }, { "left": "issue:43756", "right": "issue:44060", "accept": false, - "reason": "Smollm3 RoPE-layer dropping and Qwen3-Next tied-weights warning are distinct architecture/configuration bugs." + "reason": "Smollm3 RoPE-layer bug and Qwen3-Next tied-weights warning are unrelated model-specific changes." } ] }, @@ -11456,8 +11535,8 @@ "issue:44117|issue:44704", "issue:30990|issue:33357", "issue:43976|issue:44945", - "issue:42907|issue:44945", "issue:44704|issue:45290", + "issue:42907|issue:44945", "issue:43122|issue:44514", "issue:44295|issue:44297", "issue:43232|issue:44743", @@ -11482,14 +11561,14 @@ "issue:42907|issue:43452", "issue:44038|issue:44222" ], - "prepared_review_unit_hash": "1883f0137650b61085dcd3b6734d1605d3dea7cab5b786fc87016bf0209c2729", + "prepared_review_unit_hash": "afe4d0dcae6791e96074eb5bbc4b7b6adc2ec030c07b76822342ac80d246bb27", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, @@ -11504,210 +11583,198 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "No duplicate cluster here: the soft pairs mostly connect unrelated regressions, API changes, and model-specific bugs rather than the same underlying issue.", - "confidence": 0.97, - "canonical_issue_reason": "No single canonical issue; the cluster is heterogeneous and the soft pairs are all false positives.", + "summary": "This set is heterogeneous: most items are unrelated Transformers bugs or model-specific regressions. None of the suggested soft pairs look like the same underlying issue, so there is no clear duplicate cluster here.", + "confidence": 0.87, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the broadest user-facing report in the set (multiple import errors after upgrading), so it is the best umbrella issue if one must be chosen.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43232", "right": "issue:45593", "accept": false, - "reason": "Different bugs: generation kwargs after sync_gpus vs D-FINE auxiliary-loss handling." + "reason": "Generation state sync bug vs D-FINE auxiliary-loss behavior; different models and code paths." }, { "left": "issue:43576", "right": "issue:43784", "accept": false, - "reason": "CLI env command breakage is unrelated to a sentence-transformers import NameError." + "reason": "Broken `transformers env` CLI is unrelated to a `sentence-transformers` import NameError." }, { "left": "issue:43122", "right": "issue:44521", "accept": false, - "reason": "Tokenizer output regression vs multimodal assistant_mask generation; different code paths." + "reason": "Tokenizer version drift vs multimodal chat-template mask generation; not the same bug." }, { "left": "issue:42886", "right": "issue:43066", "accept": false, - "reason": "Offline cache loading failure and wrong decoder type are separate issues." + "reason": "Offline cache loading failure vs tokenizer decoder type mismatch in v5; different failures." }, { "left": "issue:42371", "right": "issue:44263", "accept": false, - "reason": "TF32 settings warning is unrelated to torch.split return-value handling." + "reason": "TF32 API guidance and a GLM indexer split-value issue are unrelated." }, { "left": "issue:44117", "right": "issue:44704", "accept": false, - "reason": "Both involve loading logic, but one is mapping-name None handling and the other is cached_file kwargs propagation." + "reason": "`TOKENIZER_MAPPING_NAMES`/`from_pretrained` logic is different from `AutoProcessor` kwargs propagation." }, { "left": "issue:30990", "right": "issue:33357", "accept": false, - "reason": "Sentence-Transformers loading hang and Mac bus error on CLIP are different failures." + "reason": "Sentence-Transformers loading hang vs Mac bus error with CLIP; distinct symptoms and causes." }, { "left": "issue:43976", "right": "issue:44945", "accept": false, - "reason": "Python version support breakage is not the same as pipeline parallelism producing wrong outputs." + "reason": "Python-version compatibility regression is unrelated to pipeline-parallel output correctness." }, { - "left": "issue:42907", - "right": "issue:44945", + "left": "issue:44704", + "right": "issue:45290", "accept": false, - "reason": "Dequantized save failure and incorrect PP outputs are unrelated." + "reason": "Processor cached-file kwargs bug is unrelated to chat-template crashes with tool-call messages." }, { - "left": "issue:44704", - "right": "issue:45290", + "left": "issue:42907", + "right": "issue:44945", "accept": false, - "reason": "Processor cached_file kwargs and chat-template crashes on tool calls are different APIs." + "reason": "Dequantized model save failure is not the same as pipeline-parallel generation corruption." }, { "left": "issue:43122", "right": "issue:44514", "accept": false, - "reason": "Tokenization regression and batched apply_chat_template crash are distinct bugs." + "reason": "Both involve tokenization, but one is version-to-version tokenization drift and the other is a batched chat-template crash." }, { "left": "issue:44295", "right": "issue:44297", "accept": false, - "reason": "Position_ids buffer access and tokenizer_class mismatch on save_pretrained are different serialization problems." + "reason": "Position-id buffer read error vs tokenizer-class metadata mismatch; separate save/load issues." }, { "left": "issue:43232", "right": "issue:44743", "accept": false, - "reason": "Generation update after sync_gpus is unrelated to Qwen3.5 recurrent-state reset with cache." + "reason": "Generation kwargs update after GPU sync is not the same as recurrent-state reset in modular Qwen3.5." }, { "left": "issue:39401", "right": "issue:44488", "accept": false, - "reason": "Qwen3 offset_mapping error and sleng-bert load failure are unrelated tokenizer/model-loading issues." + "reason": "Wrong offset mapping for Qwen3 tokenizer is unrelated to loading `cjvt/sleng-bert`." }, { "left": "issue:43408", "right": "issue:43475", "accept": false, - "reason": "Model-type warning and missing attribute in SAM 3 Video are not the same bug." + "reason": "Model-type warning vs missing `fpn_position_embeddings` attribute; related family, but not the same defect." }, { "left": "issue:43232", "right": "issue:45245", "accept": false, - "reason": "Generation kwargs bug and category-count runtime error are unrelated." + "reason": "Generation keyword handling and category-count runtime limits are unrelated." }, { "left": "issue:42907", "right": "issue:43937", "accept": false, - "reason": "Model save failure for dequantized Ministrals/Devstrals does not match invalid GenerationConfig." + "reason": "Saving dequantized models is unrelated to invalid `GenerationConfig` for GLM-5." }, { "left": "issue:41628", "right": "issue:45278", "accept": false, - "reason": "Both mention imports, but AutoImageProcessor missing is not the same concrete regression as the broad import-error report." + "reason": "A specific missing import is not the same as a broad collection of import errors after upgrade." }, { "left": "issue:43784", "right": "issue:45600", "accept": false, - "reason": "Sentence-transformers import NameError and auto_mappings CI break from removed configs are different import failures." + "reason": "Import NameError in sentence-transformers is not the same as stale `auto_mappings.py` references." }, { "left": "issue:44360", "right": "issue:44485", "accept": false, - "reason": "DSA indexer ReLU discussion and GLM-5 RoPE implementation are unrelated model internals." + "reason": "DSA indexer missing ReLU is unrelated to GLM-5 RoPE implementation." }, { "left": "issue:39401", "right": "issue:44521", "accept": false, - "reason": "Wrong offset_mapping and zero assistant_masks are both tokenization-adjacent but not the same underlying bug." + "reason": "Tokenizer offset mapping bug is unrelated to multimodal assistant-mask generation." }, { "left": "issue:44261", "right": "issue:44373", "accept": false, - "reason": "Numerical precision issue in MLA q_a_layernorm is unrelated to a docstring correction." + "reason": "A precision/config issue in MLA is unrelated to a docstring correction." }, { "left": "issue:44625", "right": "issue:44829", "accept": false, - "reason": "num_labels propagation in Qwen3.5 is unrelated to flash_attention_3 degenerate training." + "reason": "`num_labels` propagation in Qwen3.5 is unrelated to flash-attention-3 training degeneration." }, { "left": "issue:43116", "right": "issue:45200", "accept": false, - "reason": "Example-script multi-label bug and Gemma 4 mm_token_type_ids defaulting are different training/fine-tuning issues." + "reason": "Empty multi-label classification results in an example script is unrelated to Gemma 4 multimodal token-type defaults." }, { "left": "issue:43122", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer regressions, but they involve different models and different failure modes." + "reason": "Different tokenization across versions is not the same as DeepSeek tokenizer incorrect results." }, { "left": "issue:29127", "right": "issue:30990", "accept": false, - "reason": "LayoutLMv3 error-message clarity and Sentence-Transformers loading hang are unrelated." + "reason": "LayoutLMv3 error messaging and a loading hang have no common underlying bug." }, { "left": "issue:42371", "right": "issue:42831", "accept": false, - "reason": "TF32 behavior settings and FineGrainedFP8 accuracy are different numerical-stability topics." + "reason": "TF32 control API changes are unrelated to FineGrainedFP8 accuracy issues." }, { "left": "issue:44038", "right": "issue:44297", "accept": false, - "reason": "Qwen3-VL-Moe v5 bug and tokenizer_class mismatch on save are unrelated." - }, - { - "left": "issue:42907", - "right": "issue:44964", - "accept": false, - "reason": "Saving dequantized Ministrals/Devstrals and loading Phi-4 multimodal are different model-loading failures." - }, - { - "left": "issue:44038", - "right": "issue:44295", - "accept": false, - "reason": "Qwen3-VL-Moe regression and position_ids buffer read error do not share the same code path." + "reason": "Qwen3-VL-Moe bug and tokenizer save metadata mismatch are different subsystems." }, { "left": "issue:36296", "right": "issue:43116", "accept": false, - "reason": "Tensor-parallel training bug and empty multi-label classification results are unrelated." + "reason": "Tensor-parallel training bug and multi-label example-script failure are unrelated." }, { "left": "issue:42907", - "right": "issue:43452", + "right": "issue:44964", "accept": false, - "reason": "GGUF from_pretrained breakage and failing to save dequantized models are different format-handling bugs." + "reason": "Saving dequantized Ministrals/Devstrals is unrelated to loading Phi-4 multimodal instruct." }, { "left": "issue:44038", "right": "issue:44222", "accept": false, - "reason": "Both mention MoE, but one is a v5 model bug and the other is FP8 save_pretrained behavior." + "reason": "Qwen3-VL-Moe bug and FP8 `save_pretrained` issue are different model/serialization problems." } ] }, @@ -11788,8 +11855,8 @@ "issue:43122|issue:44977", "issue:43278|issue:44945", "issue:43784|issue:45446", - "issue:43937|issue:44485", "issue:38617|issue:45278", + "issue:43937|issue:44485", "issue:42222|issue:43441", "issue:42673|issue:45072", "issue:43064|issue:43673", @@ -11798,8 +11865,8 @@ "issue:43116|issue:43493", "issue:44075|issue:44936", "issue:44038|issue:45478", - "issue:44222|issue:45478", "issue:43976|issue:45446", + "issue:44222|issue:45478", "issue:43116|issue:43825", "issue:43937|issue:45563", "issue:33453|issue:39401", @@ -11811,20 +11878,20 @@ "issue:43937|issue:44977", "issue:45341|issue:45561" ], - "prepared_review_unit_hash": "77e24539e91bbae2c7d72f8b591ea983e10a0309f6a8c13ebacfbfe924774449", + "prepared_review_unit_hash": "4f9f7727cea5bf907e223b232598c62be9dd51ae9afbe55e718ec94a617e5b17", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 30, - "serialized_chars": 35672, + "serialized_chars": 35671, "estimated_input_tokens": 8918, "estimated_eval_tokens": 18092 }, @@ -11833,192 +11900,192 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is not a true duplicate set; it mixes unrelated Transformers v5 regressions, model-specific bugs, import failures, and docs/API issues. The closest cluster anchor is the broad import-regression report, but most pairs should stay separate.", - "confidence": 0.92, - "canonical_issue_reason": "issue:45278 is the broadest umbrella report here: it describes multiple import errors after upgrading from 4.57.0 to 5.5.0, which makes it a better anchor than the narrower model- or API-specific issues.", + "summary": "This cluster is a loose mix of Transformers v5 regression reports (imports, tokenization, model loading, training, and docs), but the paired items are not the same concrete bug/change. I would not merge any soft edges; treat them as separate issues.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set ('Many import errors after update from 4.57.0 to 5.5.0') and best represents the shared migration-regression theme.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 is the best representative issue for this loose group because it captures the widest user-facing regression pattern, while the other issues are mostly specific to one model, one symbol, or one code path.", + "best_issue_reason": "issue:45278 is the best global representative because it is the most general, high-level issue and subsumes the import-error subgroup better than the more specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43824", "right": "issue:44351", "accept": false, - "reason": "Both are import failures, but they are different missing symbols (`Qwen2_5_VLForConditionalGeneration` vs `HybridCache`) with different likely causes and fixes." + "reason": "Both are import errors, but they involve different missing symbols and different code paths." }, { "left": "issue:45216", "right": "issue:45478", "accept": false, - "reason": "Both concern Qwen3.5/Moe save-or-load regressions, but one is about incorrect saved checkpoints and the other is a from_pretrained load error; not the same concrete bug." + "reason": "Both concern Qwen3.5 save/load behavior, but one is save_pretrained corruption and the other is from_pretrained failure; not the same bug." }, { "left": "issue:43992", "right": "issue:44704", "accept": false, - "reason": "`embed_tokens.weight` missing in `UMT5Encoder.from_pretrained` is a model-loading state issue, while `AutoProcessor.from_pretrained` not passing kwargs is a processor/file-resolution bug." + "reason": "Unrelated areas: UMT5 weight loading vs AutoProcessor cached_file kwargs." }, { "left": "issue:42757", "right": "issue:44351", "accept": false, - "reason": "These are distinct import errors from different modules and symbols; they do not point to one shared underlying regression." + "reason": "Different missing imports from different libraries/modules; only the symptom overlaps." }, { "left": "issue:42907", "right": "issue:44222", "accept": false, - "reason": "One is about saving dequantized Ministral/Devstral models, the other about FP8 save_pretrained behavior for moe models; same broad save path, but not the same bug." + "reason": "Both involve saving quantized models, but the model families and failure modes differ." }, { "left": "issue:43122", "right": "issue:44977", "accept": false, - "reason": "Both mention tokenization differences, but one is a version-to-version tokenizer behavior change and the other is a Qwen3.5 flash-attention generation problem." + "reason": "Both are tokenizer regressions, but for different models and likely different tokenizer code paths." }, { "left": "issue:43278", "right": "issue:44945", "accept": false, - "reason": "Embedding dtype mismatch in evaluate and incorrect output with pipeline parallelism are separate training/eval runtime issues with different code paths." + "reason": "Dtype mismatch during eval is unrelated to incorrect output under pipeline parallelism." }, { "left": "issue:43784", "right": "issue:45446", "accept": false, - "reason": "One is a missing `nn` import in sentence-transformers integration, the other is an incorrect PyTorch version gate for AuxRequest in flex_attention; unrelated failures." + "reason": "Import-time NameError in sentence-transformers is unrelated to the flex_attention version check bug." }, { - "left": "issue:43937", - "right": "issue:44485", + "left": "issue:38617", + "right": "issue:45278", "accept": false, - "reason": "A generation-config validation error for GLM-5 is not the same as a RoPE implementation discussion/bug; they affect different parts of the model stack." + "reason": "38617 is one specific import failure, while 45278 is a broad umbrella; not the same concrete issue." }, { - "left": "issue:38617", - "right": "issue:45278", + "left": "issue:43937", + "right": "issue:44485", "accept": false, - "reason": "Both are import-related, but one is a missing `layer_type_validation` symbol and the other is a broad upgrade report with multiple import errors; not one concrete bug." + "reason": "GLM-5 generation config validation and RoPE implementation are different bugs." }, { "left": "issue:42222", "right": "issue:43441", "accept": false, - "reason": "Vitpose model breakage and Ministral-3 flash-attention failures are different model-specific regressions with different execution paths." + "reason": "Different models and different breakages: vitpose import breakage vs FlashAttention failure." }, { "left": "issue:42673", "right": "issue:45072", "accept": false, - "reason": "A VRAM leak in multi-threaded Qwen3ForCausalLM use is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel bfloat16 inference." + "reason": "VRAM leak in Qwen3ForCausalLM does not match dtype mismatch in bfloat16 inference." }, { "left": "issue:43064", "right": "issue:43673", "accept": false, - "reason": "FSDP2/PEFT optimizer-state corruption and missing GenerationMixin cache during chunked prefill are different subsystems and failure modes." + "reason": "Training/optimizer state corruption under FSDP2+PEFT is unrelated to missing generation cache in chunked prefill." }, { "left": "issue:22355", "right": "issue:29127", "accept": false, - "reason": "`transformers.onnx` import failure and LayoutLMv3 box-validation messaging are entirely unrelated." + "reason": "A missing module import and a LayoutLMv3 error-message issue are unrelated." }, { "left": "issue:42371", "right": "issue:43874", "accept": false, - "reason": "TF32 behavior settings and a missing `get_number_of_image_patches` method in Glm46VImageProcessorFast are unrelated issues." + "reason": "TF32 API guidance and GLM46V image-patch counting are different issues." }, { "left": "issue:43116", "right": "issue:43493", "accept": false, - "reason": "Multi-label classification example output emptiness and SigLIP2 HF-vs-JAX discrepancy are different problems in different model/example paths." + "reason": "Example-script classification output and SigLIP2 implementation discrepancy are unrelated." }, { "left": "issue:44075", "right": "issue:44936", "accept": false, - "reason": "SGD argument handling and `trainer.evaluate()` failing after `trainer.train()` are separate trainer/optimizer behaviors." + "reason": "Unused SGD args and train-then-evaluate failure are different training bugs." }, { "left": "issue:44038", "right": "issue:45478", "accept": false, - "reason": "Qwen3-VL-Moe support in Transformers 5.0 and Qwen3.5 Moe from_pretrained errors are related by model family, but not the same concrete bug or fix." + "reason": "Both hit Qwen MoE models, but the model/version and failure modes differ enough that this is not one bug." }, { - "left": "issue:44222", - "right": "issue:45478", + "left": "issue:43976", + "right": "issue:45446", "accept": false, - "reason": "FP8 save_pretrained for moe and Qwen3.5 Moe from_pretrained load errors affect different code paths; they are not mergeable into one PR." + "reason": "Python version compatibility and a PyTorch import version check are unrelated." }, { - "left": "issue:43976", - "right": "issue:45446", + "left": "issue:44222", + "right": "issue:45478", "accept": false, - "reason": "Python 3.9/3.10 compatibility claims for Transformers 5.1.0 and a flex_attention import/version check bug are unrelated." + "reason": "FP8 save_pretrained issues for moe models are not the same as Qwen3.5 from_pretrained errors." }, { "left": "issue:43116", "right": "issue:43825", "accept": false, - "reason": "A classification example script returning empty multi-label results is not the same as a pipeline() error message incorrectly advertising translation support." + "reason": "Classification example output and pipeline translation error messaging are unrelated." }, { "left": "issue:43937", "right": "issue:45563", "accept": false, - "reason": "GLM-5 generation-config validation and a stale warning in paged generate() are different generation-path issues." + "reason": "GLM-5 config validation does not match a stale warning in paged generate." }, { "left": "issue:33453", "right": "issue:39401", "accept": false, - "reason": "Tokenizer loading regression and Qwen3 offset_mapping errors are both tokenizer-related but refer to different regressions and likely different fixes." + "reason": "Both are tokenizer regressions, but the reported behavior and models differ." }, { "left": "issue:45083", "right": "issue:45446", "accept": false, - "reason": "A helper-function behavior issue in qwen3_omni_moe is unrelated to the AuxRequest import/version-check bug in flex_attention." + "reason": "Feature-extraction helper behavior in qwen3_omni_moe is unrelated to the AuxRequest import check." }, { "left": "issue:39401", "right": "issue:44779", "accept": false, - "reason": "Both are tokenizer bugs, but one is Qwen3 offset_mapping and the other is Deepseek incorrect tokenization; different models and likely different root causes." + "reason": "Both are tokenizer correctness regressions, but on different models and likely different root causes." }, { "left": "issue:43931", "right": "issue:44945", "accept": false, - "reason": "Qwen3-VL-30B weight-shape mismatch and incorrect output under pipeline parallelism are different model-loading vs runtime-parallelism problems." + "reason": "Weight-shape mismatch during model loading is unrelated to pipeline-parallel output corruption." }, { "left": "issue:44117", "right": "issue:45588", "accept": false, - "reason": "TOKENIZER_MAPPING_NAMES returning None and a flash_attention crash on `s_aux=None` are unrelated implementation bugs." + "reason": "Tokenizer mapping fallback and flash_attention AttributeError are unrelated code paths." }, { "left": "issue:43827", "right": "issue:45446", "accept": false, - "reason": "Docs still referencing pipeline() after removals and an incorrect PyTorch version check are not the same underlying bug or change." + "reason": "Docs still referencing pipeline() and a flex_attention import check are not the same issue." }, { "left": "issue:43937", "right": "issue:44977", "accept": false, - "reason": "GLM-5 generation config validation and Qwen3.5 flash-attention generation failure are separate model-specific generation bugs." + "reason": "Different model families and different failure modes; only both are v5 regressions." }, { "left": "issue:45341", "right": "issue:45561", "accept": false, - "reason": "A testing_utils bug and an xdist race on captured_info.txt are both test-infra related, but they are not the same concrete race or code path." + "reason": "A general testing_utils bug and an xdist race on captured_info.txt are not the same concrete defect." } ] }, @@ -12115,28 +12182,28 @@ "issue:44075|issue:45656", "issue:42617|issue:45341", "issue:43012|issue:43577", - "issue:43673|issue:45341", "issue:43066|issue:43122", + "issue:43673|issue:45341", "issue:44484|issue:45478", "issue:44410|issue:45478", "issue:43874|issue:44261", - "issue:43097|issue:44493", - "issue:42222|issue:45459" + "issue:42222|issue:45459", + "issue:43097|issue:44493" ], - "prepared_review_unit_hash": "a76d19cbc6d7ee461bdbb3f6a22fbfc57fe17ed5a35cb3d3d8b41ec8962191c6", + "prepared_review_unit_hash": "1de26b1ace99fe7eeda176802cf714c5e145e674982ae79185f17a9027dd7f9b", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 30, - "serialized_chars": 36526, + "serialized_chars": 36525, "estimated_input_tokens": 9132, "estimated_eval_tokens": 18520 }, @@ -12145,192 +12212,192 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "The set is highly heterogeneous: it mixes import-regression reports, tokenizer behavior bugs, model/config loading errors, backend/version checks, and unrelated feature/optimizer issues. No soft pair looks like a true duplicate; the apparent overlaps are only topical, not the same underlying bug.", - "confidence": 0.77, - "canonical_issue_reason": "issue:45278 is the broadest umbrella among the import-regression reports (multiple import errors after upgrading), so it is the best representative issue if one must be chosen.", + "summary": "This set is a mixed bag of unrelated Transformers issues. The soft-similarity pairs are mostly same-subsystem, not same-bug duplicates, so I would not merge any of them.", + "confidence": 0.73, + "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set: an open report about widespread import errors after upgrading to 5.5.0, which makes it the best candidate to anchor any import-regression subset.", "canonical_pr_reason": null, - "best_issue_reason": "issue:45278 has the widest scope and is most likely to subsume nearby import-failure reports, making it the best global issue representative in this mixed cluster.", + "best_issue_reason": "issue:45278 is the strongest global representative among these items because it describes a wide regression rather than a single narrow symptom.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44291", "right": "issue:45071", "accept": false, - "reason": "Different failures: unexpected `_is_hf_initialized` argument during init_empty_weights vs PreTrainedConfig type-checking regression." + "reason": "Both are v5-related breakages, but one is an unexpected model-loading argument and the other is a config type-checking issue." }, { "left": "issue:45405", "right": "issue:45446", "accept": false, - "reason": "Unrelated backend/version problems: PEFT minimum version bump vs PyTorch version gating for flex_attention AuxRequest." + "reason": "Completely different problems: a PEFT version pin mismatch vs a PyTorch version guard for flex_attention." }, { "left": "issue:42673", "right": "issue:43906", "accept": false, - "reason": "Both mention Qwen3-related behavior, but one is a VRAM leak in dataloader threads and the other is a reproduction of a separate prior issue." + "reason": "One is a VRAM leak in dataloader threads; the other is just a reproduction of a separate issue." }, { "left": "issue:43450", "right": "issue:43986", "accept": false, - "reason": "Different subsystems: batched video processor output shape vs AutoProcessor crash when torchvision is missing." + "reason": "Video processor shape handling and AutoProcessor dependency-crash behavior are different bugs." }, { "left": "issue:39401", "right": "issue:43421", "accept": false, - "reason": "Tokenizer offset-mapping bug is not the same as runtime post-processor updates after special token modification." + "reason": "Different tokenizer internals: offset_mapping correctness vs runtime special-token/post-processor synchronization." }, { "left": "issue:44829", "right": "issue:45216", "accept": false, - "reason": "Different concrete problems: flash_attention_3 causes degenerate training, while Qwen3.5 save_pretrained produces an incorrect checkpoint." + "reason": "Unrelated failures: flash_attention_3 training degeneration vs save_pretrained checkpoint corruption." }, { "left": "issue:38617", "right": "issue:41628", "accept": false, - "reason": "Both are import errors, but they concern different missing symbols and code paths." + "reason": "Both are import errors, but they are for different symbols and different code paths." }, { "left": "issue:44410", "right": "issue:45310", "accept": false, - "reason": "Different model families and failures: missing projections in qwen3next vs Qwen3.5 MoE from_pretrained error." + "reason": "Different model families and different failures; no shared code-path bug is evident." }, { "left": "issue:44117", "right": "issue:45081", "accept": false, - "reason": "Tokenizer mapping None handling is separate from the Mistral regex patch crash in backend_tokenizer access." + "reason": "One is a generic from_pretrained mapping assumption; the other is a Mistral regex patch crash." }, { "left": "issue:44484", "right": "issue:45310", "accept": false, - "reason": "One is a save_pretrained shard-size question; the other is a Qwen3.5 MoE loading regression." + "reason": "Save_pretrained shard sizing and Qwen3.5 MoE from_pretrained loading are unrelated." }, { "left": "issue:42371", "right": "issue:44261", "accept": false, - "reason": "TF32 control API concern is unrelated to MLA q_a_layernorm missing rms_norm_eps precision mismatch." + "reason": "TF32 API guidance and an rms_norm_eps precision mismatch are not the same bug." }, { "left": "issue:42222", "right": "issue:45542", "accept": false, - "reason": "Different domains: vitpose model breakage vs missing TensorFlow backend when only tensorboard is installed." + "reason": "A broken vitpose model implementation is unrelated to a TensorFlow backend installation issue." }, { "left": "issue:43408", "right": "issue:43742", "accept": false, - "reason": "Different warnings/errors: sam3_video vs sam3_tracker type mismatch is not the same as MobileLLM loading key error." + "reason": "Model-type mismatch warning vs key error while loading a different model family." }, { "left": "issue:43937", "right": "issue:44496", "accept": false, - "reason": "GenerationConfig validation for GLM-5 is unrelated to unrecognized model/config loading for Olmo-Hybrid-Instruct." + "reason": "Both are loading/config errors, but one is invalid GenerationConfig and the other is missing model_type." }, { "left": "issue:42673", "right": "issue:43582", "accept": false, - "reason": "Both are runtime issues, but VRAM leak in threads is not the same as Apple Silicon warmup TypeError." + "reason": "VRAM leak in multi-threading and Apple Silicon allocator TypeError are different failures." }, { "left": "issue:42673", "right": "issue:43577", "accept": false, - "reason": "VRAM leak in threaded use is different from BLIP2 dtype fields staying float32 after loading." + "reason": "Different symptom and code path: memory leak vs dtype propagation bug in BLIP2 loading." }, { "left": "issue:42175", "right": "issue:42222", "accept": false, - "reason": "TensorFlow backend packaging issue is unrelated to vitpose model breakage." + "reason": "Package backend dependency issue and vitpose model breakage are unrelated." }, { "left": "issue:38617", "right": "issue:44220", "accept": false, - "reason": "Different import/feature failures: missing layer_type_validation symbol vs audio fbank extraction issue." + "reason": "ImportError for a missing symbol and an audio feature extraction bug are unrelated." }, { "left": "issue:44933", "right": "issue:45278", "accept": false, - "reason": "Same broad import-error theme, but 44933 is a specific missing import from image_utils while 45278 is a broader version-upgrade import breakage report." + "reason": "45278 is a broad import-regression umbrella, but 44933 is one specific missing import; not enough evidence they are the same exact bug." }, { "left": "issue:44483", "right": "issue:45464", "accept": false, - "reason": "Chat/completions API request rejection is not the same as streaming inference failure on Qwen3.5-0.8B." + "reason": "Both touch chat/completions, but one is API request acceptance and the other is streaming inference on a specific model." }, { "left": "issue:44075", "right": "issue:45656", "accept": false, - "reason": "SGD optimizer args ignored and optimizer.step called twice are different optimizer bugs." + "reason": "Different optimizer issues: ignored SGD args vs double optimizer.step under deepspeed." }, { "left": "issue:42617", "right": "issue:45341", "accept": false, - "reason": "3d_parallel.py runtime failure is unrelated to a testing_utils bug." + "reason": "A runtime launcher failure and a minor testing_utils bug do not match." }, { "left": "issue:43012", "right": "issue:43577", "accept": false, - "reason": "PyTorch warning on compiling bfloat16 models is different from BLIP2 dtype fields remaining float32." + "reason": "Both mention precision/dtype, but one is a compile-time warning and the other is incorrect model dtype loading." }, { - "left": "issue:43673", - "right": "issue:45341", + "left": "issue:43066", + "right": "issue:43122", "accept": false, - "reason": "Generation cache missing in v5.0.0 during chunked_prefill is unrelated to a testing_utils bug." + "reason": "Both are tokenizer regressions in v5, but one is decoder-type metadata and the other is changed tokenization output." }, { - "left": "issue:43066", - "right": "issue:43122", + "left": "issue:43673", + "right": "issue:45341", "accept": false, - "reason": "Both are tokenization-related, but one is decoder-type selection in v5 and the other is a concrete tokenization difference across versions." + "reason": "Generation cache behavior and a testing_utils bug are unrelated." }, { "left": "issue:44484", "right": "issue:45478", "accept": false, - "reason": "Different Qwen3.5 MoE loading bug vs unrelated save_pretrained max_shard_size question." + "reason": "save_pretrained shard-size behavior is unrelated to a Qwen3.5 MoE loading error." }, { "left": "issue:44410", "right": "issue:45478", "accept": false, - "reason": "Missing qkv/gate projections in qwen3next is not the same as the Qwen3.5 MoE from_pretrained error." + "reason": "Different model architectures and different missing-component failures." }, { "left": "issue:43874", "right": "issue:44261", "accept": false, - "reason": "Missing get_number_of_image_patches in GLM46V is separate from MLA q_a_layernorm rms_norm_eps precision error." + "reason": "A missing image-patch helper and a layernorm epsilon precision issue are unrelated." }, { - "left": "issue:43097", - "right": "issue:44493", + "left": "issue:42222", + "right": "issue:45459", "accept": false, - "reason": "Removed tie_embeddings_and_encoder_decoder API concern is different from unexpected position-id keys in model loading." + "reason": "Vitpose model breakage and protobuf-hidden tokenizer errors are different subsystems and bugs." }, { - "left": "issue:42222", - "right": "issue:45459", + "left": "issue:43097", + "right": "issue:44493", "accept": false, - "reason": "Vitpose model breakage is unrelated to tokenizer error masking when protobuf is absent." + "reason": "Config parameter removal and unexpected checkpoint keys are not the same underlying regression." } ] }, @@ -12412,8 +12479,8 @@ "issue:43931|issue:44410", "issue:43986|issue:44484", "issue:45003|issue:45341", - "issue:43931|issue:44038", "issue:43986|issue:45341", + "issue:43931|issue:44038", "issue:44117|issue:44987", "issue:43097|issue:43452", "issue:43335|issue:45071", @@ -12441,250 +12508,250 @@ "issue:43825|issue:44655", "issue:43992|issue:44484" ], - "prepared_review_unit_hash": "1d71b0352666eec2bc7df93a43f1f5f30eb5ef9d00768f5377f00f311c8cb35a", + "prepared_review_unit_hash": "b923bfd846f8c020aab9409ca5dfe325d0f97601557164f0edfeb5f392a1868c", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38501, - "estimated_input_tokens": 9626, - "estimated_eval_tokens": 19508 + "serialized_chars": 38491, + "estimated_input_tokens": 9623, + "estimated_eval_tokens": 19502 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a loose title-similarity grouping, not a true duplicate set. The paired issues span unrelated import errors, model-specific loading bugs, training regressions, and warning/compatibility problems, so all soft edges should be rejected.", - "confidence": 0.41, - "canonical_issue_reason": "If a single issue must anchor the cluster, issue 43502 is the broadest core-loading regression with the most discussion, but the set is not actually one duplicate family.", + "summary": "These items form a loose semantic cluster of unrelated Transformers regressions and import errors. None of the soft-edge pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.87, + "canonical_issue_reason": "No single issue is a safe canonical: the cluster spans unrelated import errors, model-loading regressions, training bugs, and warning/behavior changes.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 43502 is the best overall representative only because it is a high-signal, broadly relevant loading bug with substantial user impact and discussion; it is still not a strong canonical for the whole cluster.", + "best_issue_reason": "No issue is sufficiently central or representative to serve as a true cluster anchor; the topics are too heterogeneous.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43441", "right": "issue:44829", "accept": false, - "reason": "Both mention FlashAttention, but one is a model-specific FA2 failure and the other is a degenerate training bug with FA3; different code paths and symptoms." + "reason": "Both involve FlashAttention, but one is a support error for Ministral-3 and the other is degenerate training with FA3; different failures." }, { "left": "issue:44263", "right": "issue:44485", "accept": false, - "reason": "Unrelated GLM issues: one is a torch.split/indexer bug, the other is a RoPE implementation discussion." + "reason": "GLM MoE indexer split behavior and GLM-5 RoPE implementation are unrelated code paths." }, { "left": "issue:43012", "right": "issue:43408", "accept": false, - "reason": "Both are warnings, but about different subsystems and causes: bfloat16 precision warning vs model-type mismatch warning." + "reason": "A bfloat16 precision warning is not the same bug as the sam3_video/sam3_tracker model-type warning." }, { "left": "issue:41628", "right": "issue:45071", "accept": false, - "reason": "Different regressions: missing AutoImageProcessor import vs PretrainedConfig type-checking breakage." + "reason": "AutoImageProcessor import failure and PretrainedConfig type-checking regression are different APIs and root causes." }, { "left": "issue:45071", "right": "issue:45542", "accept": false, - "reason": "Type checking in PretrainedConfig is unrelated to the TensorBoard/TF backend detection bug." + "reason": "PretrainedConfig type checking and tensorboard-only tf backend detection are unrelated." }, { "left": "issue:43931", "right": "issue:44410", "accept": false, - "reason": "Same broad family name, but one is a Qwen3-VL load shape mismatch and the other is missing projections in qwen3next; not the same bug." + "reason": "Qwen3-VL weight-shape mismatch and qwen3next missing projections are distinct model bugs." }, { "left": "issue:43986", "right": "issue:44484", "accept": false, - "reason": "Video-model AutoProcessor dependency crash is unrelated to the max_shard_size save_pretrained question." + "reason": "AutoProcessor crash without torchvision and max_shard_size behavior in save_pretrained are unrelated." }, { "left": "issue:45003", "right": "issue:45341", "accept": false, - "reason": "Unsafe sys.modules access in modeling_utils does not match a small testing_utils bug." + "reason": "Unsafe sys.modules access and a testing_utils bug are different problems." }, { - "left": "issue:43931", - "right": "issue:44038", + "left": "issue:43986", + "right": "issue:45341", "accept": false, - "reason": "Both are Qwen-VL loading problems, but they affect different model variants and likely different failure modes." + "reason": "AutoProcessor loading without torchvision is unrelated to testing_utils behavior." }, { - "left": "issue:43986", - "right": "issue:45341", + "left": "issue:43931", + "right": "issue:44038", "accept": false, - "reason": "A torchvision dependency crash is unrelated to a testing utility bug." + "reason": "These are different Qwen3-VL / Qwen3-VL-Moe loading issues with different symptoms." }, { "left": "issue:44117", "right": "issue:44987", "accept": false, - "reason": "Tokenizer mapping returning None is a distinct loading-path bug from the physical-intelligence/fast model load failure." + "reason": "TOKENIZER_MAPPING_NAMES returning None is a tokenizer registry bug, not the physical-intelligence/fast loading failure." }, { "left": "issue:43097", "right": "issue:43452", "accept": false, - "reason": "Removed tie_embeddings API and gguf_file breakage are different compatibility issues in different code paths." + "reason": "A removed config option is not the same as gguf_file breaking tokenizer/model loading." }, { "left": "issue:43335", "right": "issue:45071", "accept": false, - "reason": "SwitchTransformers sparse-layer config bug is unrelated to PretrainedConfig type checking." + "reason": "SwitchTransformers sparse-layer construction and PretrainedConfig type checking are unrelated." }, { "left": "issue:43408", "right": "issue:44368", "accept": false, - "reason": "Both are warnings, but they concern different configuration fields and different models." + "reason": "Both are warning-style issues, but they target different configs and different warning conditions." }, { "left": "issue:43874", "right": "issue:44263", "accept": false, - "reason": "Missing image-patch helper in a fast image processor is unrelated to a torch.split return-value bug." + "reason": "Missing get_number_of_image_patches and torch.split return-values are different GLM bugs." }, { "left": "issue:43976", "right": "issue:45071", "accept": false, - "reason": "Python version compatibility for Transformers 5.1.0 is unrelated to the type-checking regression." + "reason": "Python version compatibility and PretrainedConfig type checking are unrelated." }, { "left": "issue:44829", "right": "issue:45245", "accept": false, - "reason": "FlashAttention training degeneration and a category-count limit error are unrelated." + "reason": "FlashAttention-3 training degeneration and category-cardinality limits are unrelated." }, { "left": "issue:43986", "right": "issue:45372", "accept": false, - "reason": "Different missing-dependency/import failures in different processor paths." + "reason": "AutoProcessor without torchvision and a missing ReasoningEffort import in mistral_common are different dependency issues." }, { "left": "issue:43531", "right": "issue:45341", "accept": false, - "reason": "Qwen3-MoE sliding_window behavior is unrelated to a testing utility bug." + "reason": "Qwen3-MoE sliding_window behavior and a testing_utils bug do not share the same code path." }, { "left": "issue:38617", "right": "issue:42757", "accept": false, - "reason": "Both are ImportError reports, but for different missing symbols from different packages and likely different fixes." + "reason": "These are two separate import errors for different missing symbols." }, { "left": "issue:43064", "right": "issue:44315", "accept": false, - "reason": "FSDP2/PEFT optimizer-state corruption is unrelated to Liger Kernel not being applied under model_init." + "reason": "FSDP2/PEFT optimizer-state corruption and Liger-kernel application with model_init are different training setup bugs." }, { "left": "issue:44261", "right": "issue:44485", "accept": false, - "reason": "A missing rms_norm_eps precision issue is not the same as a GLM RoPE implementation discussion." + "reason": "An MLA rms_norm_eps precision issue is not the same as the GLM-5 RoPE implementation bug." }, { "left": "issue:44410", "right": "issue:45375", "accept": false, - "reason": "Missing qkv/gate projections in qwen3next is unrelated to a missing deepstack_visual_indexes config field." + "reason": "Missing qwen3next projections and a missing deepstack_visual_indexes field are different config/model issues." }, { "left": "issue:44038", "right": "issue:44410", "accept": false, - "reason": "Different Qwen model families and different architecture bugs; not a single underlying defect." + "reason": "These involve different Qwen3-family models and different missing components." }, { "left": "issue:38617", "right": "issue:44351", "accept": false, - "reason": "Different missing imports from different modules/symbols; too broad to be one duplicate." + "reason": "layer_type_validation and HybridCache are different missing imports, not one underlying defect." }, { "left": "issue:44062", "right": "issue:44117", "accept": false, - "reason": "Tokenizer AddedToken argument duplication is a different bug from TOKENIZER_MAPPING_NAMES sometimes returning None." + "reason": "AddedToken special-argument duplication and TOKENIZER_MAPPING_NAMES returning None are unrelated tokenizer bugs." }, { "left": "issue:44704", "right": "issue:45103", "accept": false, - "reason": "AutoProcessor kwargs forwarding and auto_docstring annotation handling are unrelated code paths." + "reason": "Kwargs propagation in AutoProcessor and the auto_docstring annotations crash are unrelated." }, { "left": "issue:43097", "right": "issue:45588", "accept": false, - "reason": "A removed embedding-tie API and a flash_attention.py AttributeError with s_aux=None are unrelated." + "reason": "Removed tie_embeddings support and a flash_attention.py s_aux=None crash are different failures." }, { "left": "issue:43986", "right": "issue:44117", "accept": false, - "reason": "Video-model loading without torchvision is unrelated to tokenizer mapping assumptions." + "reason": "AutoProcessor without torchvision and tokenizer mapping returning None are unrelated." }, { "left": "issue:43502", "right": "issue:43992", "accept": false, - "reason": "local_files_only network leakage and missing embed_tokens.weight are different loading bugs." + "reason": "local_files_only still making API requests and missing embed_tokens.weight are different loading bugs." }, { "left": "issue:43824", "right": "issue:43986", "accept": false, - "reason": "A missing Qwen2.5-VL import and a torchvision-related video processor crash are distinct issues." + "reason": "A missing Qwen2_5_VL import and a torchvision-related AutoProcessor crash are not the same issue." }, { "left": "issue:43097", "right": "issue:43873", "accept": false, - "reason": "Removed tie_embeddings behavior is unrelated to quantization offloading not working." + "reason": "A removed config API and quantization/offloading behavior are unrelated." }, { "left": "issue:43066", "right": "issue:43906", "accept": false, - "reason": "The tokenizer decoder type bug is not the same as a generic isolated reproduction of another issue." + "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same bug." }, { "left": "issue:43352", "right": "issue:44380", "accept": false, - "reason": "Nemotron Flash Attention 2 support and GPT2 attention scaling under SDPA/FA are different attention bugs." + "reason": "Nemotron FlashAttention 2 support and GPT2 attention-scaling being ignored are different backend bugs." }, { "left": "issue:43825", "right": "issue:44655", "accept": false, - "reason": "pipeline() error messaging and Pipeline save_pretrained support are different pipeline-related problems." + "reason": "A pipeline translation error message and saving Pipeline objects are unrelated." }, { "left": "issue:43992", "right": "issue:44484", "accept": false, - "reason": "Missing encoder weights on load is unrelated to the default max_shard_size discussion." + "reason": "Missing encoder embed weights and shard-size defaults are different save/load behaviors." } ] }, @@ -12787,264 +12854,252 @@ "issue:44220|issue:44655", "issue:43352|issue:43937", "issue:44263|issue:45440", - "issue:36683|issue:44077", - "issue:43066|issue:45381", "issue:44077|issue:44380", + "issue:43066|issue:45381", + "issue:36683|issue:44077", "issue:43976|issue:44938", - "issue:36683|issue:43525", "issue:36010|issue:42757", + "issue:36683|issue:43525", "issue:43064|issue:44492" ], - "prepared_review_unit_hash": "59dca2591857cd72ea2ef0f71608f3b3f18c88d248c43ba1e7121764ba4789e7", + "prepared_review_unit_hash": "40ba120921392bf9dc8fcea88b29e82d1f5004b85ce136a3e8c5caa90d5e1de8", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 36, - "serialized_chars": 38172, - "estimated_input_tokens": 9543, - "estimated_eval_tokens": 19342 + "serialized_chars": 38175, + "estimated_input_tokens": 9544, + "estimated_eval_tokens": 19344 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a loose similarity set of unrelated regression reports. The only plausible duplicate pair is the CLIP v5 output regression (missing attentions vs missing hidden_states), which looks like the same forward-output code path.", - "confidence": 0.76, - "canonical_issue_reason": "issue:43618 is the best canonical issue because it captures the shared CLIP v5 output regression at a broader level; issue:43761 appears to be a specific symptom of the same model-forward bug.", + "summary": "Mostly a heterogeneous set of Transformers issues with no broad duplicate cluster. The only clear duplicate-like pair is the CLIP vision forward/output regression around missing returned fields.", + "confidence": 0.88, + "canonical_issue_reason": "Issue:43761 is the best anchor for the only true duplicate-like subcluster: it is a concrete CLIP v5 regression with a clear repro and broader missing-output scope.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43618 is the strongest issue to keep as the representative because it is concrete, regression-oriented, and broad enough to subsume the related CLIP hidden-state failure.", + "best_issue_reason": "Issue:43761 is the most suitable representative of the accepted CLIP regression subcluster; the rest of the items are largely unrelated and should not be merged into one canonical issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43097", "right": "issue:43901", "accept": false, - "reason": "Different problems: API removal/regression vs stale docs text. Same theme of API changes, but not the same bug." + "reason": "Unrelated: removed model config API vs docs mismatch for pipeline scores." }, { "left": "issue:43012", "right": "issue:44368", "accept": false, - "reason": "Both mention warnings, but one is a PyTorch compile precision warning and the other is a model config warning about tied embeddings." + "reason": "Both mention warnings, but they are different warnings on different code paths." }, { "left": "issue:43012", "right": "issue:43550", "accept": false, - "reason": "Unrelated failure modes: bfloat16 compile warning vs Bamba torch.compile/SDPA runtime bug." + "reason": "Precision warning during compile is not the same as an SDPA compile failure." }, { "left": "issue:42886", "right": "issue:43381", "accept": false, - "reason": "Different subsystems and symptoms: offline cache loading vs gradient checkpointing in eval mode." + "reason": "Cache/offline loading bug and eval-mode gradient checkpointing are unrelated." }, { "left": "issue:42907", "right": "issue:44655", "accept": false, - "reason": "Both involve saving, but one is dequantized model save failure and the other is saving Pipeline objects; not the same code path." + "reason": "Both involve saving, but one is quantized/dequantized model saving and the other is Pipeline save_pretrained." }, { "left": "issue:44380", "right": "issue:45479", "accept": false, - "reason": "Different model families and bugs: attention scaling under SDPA/FlashAttention vs degenerate single-label classification loss." + "reason": "Different bugs: attention scaling backend regression vs sequence-classification zero-loss issue." }, { "left": "issue:43208", "right": "issue:43550", "accept": false, - "reason": "xLSTM training bugs and Bamba compile/SDPA issues are unrelated." + "reason": "xLSTM training bugs and Bamba torch.compile/SDPA failure are unrelated." }, { "left": "issue:43352", "right": "issue:43976", "accept": false, - "reason": "Nemotron Flash Attention support and Python-version compatibility are unrelated." + "reason": "FlashAttention support issue and Python-version compatibility bug are not the same defect." }, { "left": "issue:43064", "right": "issue:44380", "accept": false, - "reason": "FSDP2/PEFT optimizer-state corruption is unrelated to GPT2 attention scaling under backend changes." + "reason": "Different failures: FSDP2/PEFT optimizer state corruption vs GPT2 attention scaling being ignored." }, { "left": "issue:43618", "right": "issue:44380", "accept": false, - "reason": "Both are regressions, but in different model paths: CLIP output fields vs GPT2 attention scaling." + "reason": "CLIP output regression and GPT2 attention scaling are unrelated code paths." }, { "left": "issue:43208", "right": "issue:43526", "accept": false, - "reason": "Different xLSTM training bugs vs BeitImageProcessorFast label reduction bug." + "reason": "xLSTM training bugs and BEiT fast processor label reduction are unrelated." }, { "left": "issue:44483", "right": "issue:44514", "accept": false, - "reason": "Different API surfaces: chat/completions request validation vs processor chat-template batching crash." + "reason": "Chat-completions request rejection and apply_chat_template crash are different layers of the stack." }, { "left": "issue:36683", "right": "issue:43582", "accept": false, - "reason": "Gemma3Config attribute error and Apple Silicon allocator TypeError are unrelated." + "reason": "Gemma3Config missing vocab_size and Apple Silicon allocator TypeError are unrelated." }, { "left": "issue:43618", "right": "issue:43761", "accept": true, - "reason": "Both are CLIP v5 regressions in model outputs: one loses attentions, the other loses hidden_states. They plausibly come from the same forward/output assembly bug and could be fixed together." + "reason": "Both are CLIP v5 regressions in forward output assembly where requested outputs are missing from returned objects." }, { "left": "issue:43012", "right": "issue:44683", "accept": false, - "reason": "Precision warning during compile is unrelated to compiled flex_attention failure on torch >= 2.9." + "reason": "Different compile-related problems: a warning versus a torch>=2.9 flex_attention failure." }, { "left": "issue:42757", "right": "issue:43824", "accept": false, - "reason": "Both are ImportErrors, but for completely different symbols and packages." + "reason": "Missing import from huggingface_hub and missing import from transformers are separate export issues." }, { "left": "issue:44038", "right": "issue:44230", "accept": false, - "reason": "Related model family names, but different issues: general Qwen3-VL-Moe breakage vs FP8 inference support." + "reason": "Qwen3-VL-Moe loading bug and fp8 support request are not the same bug." }, { "left": "issue:44483", "right": "issue:45290", "accept": false, - "reason": "OpenAI-style chat endpoint request rejection and assistant tool-call chat-template crash are different code paths." + "reason": "Server request validation failure and chat-template tool-call crash are different issues." }, { "left": "issue:43299", "right": "issue:43867", "accept": false, - "reason": "Both concern model loading/saving, but the described failures are not the same concrete bug." + "reason": "Qwen3VL MoE loading regression and sorted-state_dict load error are unrelated." }, { "left": "issue:44483", "right": "issue:45381", "accept": false, - "reason": "Chat completion API validation and Qwen2.5-VL video position-id corruption are unrelated." + "reason": "Chat-completions API rejection and Qwen2.5-VL vision_position_ids bug are unrelated." }, { "left": "issue:38617", "right": "issue:43502", "accept": false, - "reason": "Importing layer_type_validation and ignoring local_files_only are unrelated bugs." + "reason": "ImportError for layer_type_validation and unexpected network calls in local_files_only are unrelated." }, { "left": "issue:43756", "right": "issue:44485", "accept": false, - "reason": "Both mention RoPE, but they refer to different models and different implementation problems." + "reason": "Different model-specific RoPE/implementation complaints; not the same underlying bug." }, { "left": "issue:43901", "right": "issue:44655", "accept": false, - "reason": "Docs wording about return_all_scores is unrelated to pipeline serialization." + "reason": "Docs mentioning return_all_scores and inability to save Pipelines are unrelated." }, { "left": "issue:36683", "right": "issue:43475", "accept": false, - "reason": "Different missing-attribute bugs on different model/config objects." + "reason": "Gemma3Config vocab_size and SAM3 vision encoder output attribute error are unrelated." }, { "left": "issue:42994", "right": "issue:44655", "accept": false, - "reason": "Quantized model saving and pipeline saving are different save paths and symptoms." + "reason": "Both are save failures, but they affect different objects and code paths." }, { "left": "issue:43867", "right": "issue:45216", "accept": false, - "reason": "Both are checkpoint/load regressions, but there is not enough evidence they share the same model or code path." + "reason": "State_dict loading error and saved-checkpoint regression are separate serialization issues." }, { "left": "issue:44220", "right": "issue:44655", "accept": false, - "reason": "Audio feature extraction bug vs pipeline save bug are unrelated." + "reason": "Feature extraction bug and Pipeline save_pretrained failure are unrelated." }, { "left": "issue:43352", "right": "issue:43937", "accept": false, - "reason": "Flash Attention support issue for Nemotron is unrelated to GLM-5 generation config validation." + "reason": "FlashAttention support and invalid GenerationConfig are unrelated." }, { "left": "issue:44263", "right": "issue:45440", "accept": false, - "reason": "Different MoE-related failures in different models and code paths." + "reason": "Different MoE model bugs in different implementations; not one shared defect." }, { - "left": "issue:36683", - "right": "issue:44077", + "left": "issue:44077", + "right": "issue:44380", "accept": false, - "reason": "Gemma3 config attribute missing and patchtsmixer post_init policy are unrelated." + "reason": "PatchTSMixer post_init validation and GPT2 attention scaling are unrelated." }, { "left": "issue:43066", "right": "issue:45381", "accept": false, - "reason": "Tokenizer decoder type mismatch and Qwen2.5-VL video position-id bug are unrelated." - }, - { - "left": "issue:44077", - "right": "issue:44380", - "accept": false, - "reason": "Patchtsmixer post_init validation and GPT2 attention scaling are unrelated." + "reason": "Wrong tokenizer decoder type and Qwen2.5-VL video position IDs are unrelated." }, { - "left": "issue:43976", - "right": "issue:44938", + "left": "issue:36010", + "right": "issue:42757", "accept": false, - "reason": "Python version support problem at 5.1.0 is not the same as a separate Python 3.14 load failure." + "reason": "Two separate import errors from different modules/packages." }, { "left": "issue:36683", "right": "issue:43525", "accept": false, - "reason": "Different missing config attributes for different models." - }, - { - "left": "issue:36010", - "right": "issue:42757", - "accept": false, - "reason": "Both are import failures from different modules, with different missing symbols." + "reason": "Gemma3Config missing vocab_size and Llama4Config missing pad_token_id are different config regressions." }, { "left": "issue:43064", "right": "issue:44492", "accept": false, - "reason": "FSDP optimizer-state corruption and a cache-strategy typo are unrelated." + "reason": "FSDP2 optimizer-state corruption and cache strategy typo are unrelated." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary stays grounded in the packet and is appropriately cautious. The only accepted soft pair, issue:43618 vs issue:43761, is a reasonable conservative merge candidate because both are CLIP v5 output-regression reports affecting forward output fields. The other pair verdicts also read as appropriately negative and do not overclaim shared bugs." + "feedback": "The summary is grounded in the packet and stays mostly conservative. The one accepted soft edge, issue:43618 \u2194 issue:43761, is reasonably supported by the titles as related CLIP v5 output-field regressions. The rest of the soft-pair rejections are cautious and do not overclaim duplicate status. No mergeability concerns apply here because these are issue pairs, not PR pairs." }, "evaluator_used": true, "retried": false, @@ -13146,230 +13201,224 @@ "issue:42890|issue:45341", "issue:43010|issue:44075" ], - "prepared_review_unit_hash": "f4e2b9a8861790d29d0332552e4b6cb6f41a6e83118b979017fad71eb566a11d", + "prepared_review_unit_hash": "9884cba6560567158fd1c2fa85fc845193aa00a956eb01f3003f2085e1d87061", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 31, - "serialized_chars": 36065, - "estimated_input_tokens": 9017, - "estimated_eval_tokens": 18290 + "serialized_chars": 36063, + "estimated_input_tokens": 9016, + "estimated_eval_tokens": 18288 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly noisy and contains many unrelated issues. The only soft pair that looks like the same underlying problem is the import-time slowdown pair; the rest are different bugs or features in the same broad subsystem but not true duplicates.", - "confidence": 0.79, - "canonical_issue_reason": "issue:44246 is the broader user-facing report of slow `import transformers`; issue:41669 reads as a specific root-cause/optimization ticket (`import *` in models) that plausibly addresses that slowdown.", + "summary": "The cluster is still mostly heterogeneous import/load-regression noise, with 45278 remaining the best broad anchor. I\u2019m downgrading the 41669\u219444246 soft edge: the packet supports shared import-performance concern, but not that they are the same underlying bug.", + "confidence": 0.72, + "canonical_issue_reason": "Issue 45278 is the best open cluster anchor because it is broad, current, and covers the recurring import/load regression theme without being as narrow as the other reports.", "canonical_pr_reason": null, - "best_issue_reason": "issue:44246 is the best anchor because it states the symptom clearly, is broader than the implementation-specific follow-up, and has the strongest duplicate relationship among the soft pairs.", + "best_issue_reason": "45278 is the strongest global triage point: it is open, broad, and can absorb nearby upgrade/import failures better than the narrower model-specific bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44188", "right": "issue:44380", "accept": false, - "reason": "Both are attention-related, but one is about `torch.compile` branching causing kernel divergence and the other about GPT-2 attention scaling being ignored under SDPA/FlashAttention; different failure modes and fixes." + "reason": "Different attention bugs: torch.compile branching divergence vs GPT2 scaling config being ignored under SDPA/FlashAttention." }, { "left": "issue:43352", "right": "issue:45593", "accept": false, - "reason": "Flash Attention 2 support gating for Nemotron is unrelated to D-FINE auxiliary-loss behavior when denoising is off." + "reason": "Unrelated problems: FlashAttention2 support gating vs D-FINE auxiliary-loss behavior." }, { "left": "issue:43618", "right": "issue:45216", "accept": false, - "reason": "`CLIPOutput.attentions` disappearing and Qwen3.5 save_pretrained checkpoint corruption are different code paths and different bug classes." + "reason": "Different models and failures: CLIPOutput attentions assignment vs Qwen3.5 save_pretrained regression." }, { "left": "issue:36683", "right": "issue:44242", "accept": false, - "reason": "Missing `vocab_size` on `Gemma3Config` and router-logits load-balancing-loss gating are unrelated model/config bugs." + "reason": "Gemma3Config vocab_size missing is unrelated to MoE load-balancing loss not being added." }, { "left": "issue:43010", "right": "issue:45278", "accept": false, - "reason": "`@torch.no_grad` on cache/layer update and broad import errors after upgrading versions are not the same underlying issue." + "reason": "Cache update no_grad semantics are unrelated to broad import/load errors after upgrade." }, { "left": "issue:43352", "right": "issue:43931", "accept": false, - "reason": "Flash Attention 2 unsupported vs Qwen3-VL weight-shape mismatch are distinct loading/runtime failures." + "reason": "FlashAttention2 support vs Qwen3-VL weight-shape mismatch are different loading failures." }, { "left": "issue:43992", "right": "issue:45310", "accept": false, - "reason": "Missing `embed_tokens.weight` in UMT5Encoder and Qwen3.5 MoE `from_pretrained` errors are unrelated model-loading regressions." + "reason": "Both are from_pretrained failures, but they affect different models and different failure modes." }, { "left": "issue:41669", "right": "issue:44246", - "accept": true, - "reason": "Both point to import-time slowdown in `transformers`; 41669 provides a likely concrete root cause (`import *` in models), while 44246 is the general symptom report." + "accept": false, + "reason": "Both touch import performance, but the packet does not establish that 44246 is the same import-* regression as 41669." }, { "left": "issue:43867", "right": "issue:45375", "accept": false, - "reason": "A sorted-state_dict load failure and a missing `deepstack_visual_indexes` config field are different problems." + "reason": "State-dict loading error vs missing config field dropped by strict are different root causes." }, { "left": "issue:36683", "right": "issue:43064", "accept": false, - "reason": "Gemma3 config missing `vocab_size` is unrelated to FSDP2/PEFT optimizer state corruption on nonzero ranks." + "reason": "Gemma config attribute bug is unrelated to the FSDP2+PEFT optimizer state problem." }, { "left": "issue:44355", "right": "issue:44855", "accept": false, - "reason": "Generic compiled-file errors and a Python 3.13 `@torch.jit.script` parsing indentation issue are not the same bug." + "reason": "Compiled Python-file errors are not the same as the Python 3.13 torch.jit parse/IndentationError issue." }, { "left": "issue:42890", "right": "issue:45561", "accept": false, - "reason": "Missing `set_seed()` in SamHQ integration tests and an xdist race on `captured_info.txt` are different test flakiness causes." + "reason": "Seed-related flaky test and xdist file-race are different CI/test failures." }, { "left": "issue:44938", "right": "issue:45071", "accept": false, - "reason": "Python 3.14 import/load failure and `PretrainedConfig` type-checking regression are different compatibility issues." + "reason": "Python 3.14 import failure and PretrainedConfig type-checking break are distinct compatibility issues." }, { "left": "issue:43867", "right": "issue:45357", "accept": false, - "reason": "Sorted state_dict loading errors and Qwen3.5 visual-encoder save_pretrained corruption are different save/load paths." + "reason": "Model load error and save_pretrained key regression are opposite directions of the serialization path, not the same bug." }, { "left": "issue:43992", "right": "issue:45478", "accept": false, - "reason": "UMT5Encoder missing weights and Qwen3.5 MoE `from_pretrained` errors are unrelated model-loading bugs." + "reason": "Same broad from_pretrained area, but different model family and different missing-key/error behavior." }, { "left": "issue:43010", "right": "issue:43976", "accept": false, - "reason": "`torch.no_grad` decoration for cache updates has nothing to do with Python version support for Transformers 5.1.0." + "reason": "Cache update no_grad semantics do not match a Python-version support failure." }, { "left": "issue:44360", "right": "issue:45440", "accept": false, - "reason": "A ReLU missing from the DSA indexer and DeepSeekV3MoE parity issues are different algorithmic concerns." + "reason": "DSA indexer activation bug is unrelated to DeepSeekV3 MoE implementation divergence." }, { "left": "issue:44938", "right": "issue:45381", "accept": false, - "reason": "Python 3.14 load failures and Qwen2.5-VL video position-id bugs are unrelated." + "reason": "Python import failure in 3.14 and qwen2.5-vl position-id bug are unrelated." }, { "left": "issue:43618", "right": "issue:44077", "accept": false, - "reason": "Missing CLIP attentions and `patchtsmixer` post_init policy are separate API/architecture issues." + "reason": "CLIPOutput attentions assignment is unrelated to patchtsmixer post_init being disallowed." }, { "left": "issue:43867", "right": "issue:44484", "accept": false, - "reason": "Model loading with sorted state_dict and the 50GB shard-size question are unrelated save/load behaviors." + "reason": "Checkpoint loading error and a save_pretrained shard-size question are unrelated." }, { "left": "issue:36296", "right": "issue:43575", "accept": false, - "reason": "Tensor-parallel training bug and TP OOM when loading a large Qwen model are both TP-related but not the same concrete failure." + "reason": "Tensor-parallel training bug and tp load OOM for a specific large model are not the same underlying issue." }, { "left": "issue:43618", "right": "issue:44857", "accept": false, - "reason": "Missing CLIP attentions and LwDETR float16 AMP crashes are different bugs in different models." + "reason": "Attentions field regression is unrelated to the float16 AMP crash in LwDetrImageLoss." }, { "left": "issue:43618", "right": "issue:45588", "accept": false, - "reason": "Missing CLIP attentions and a flash-attention crash on `s_aux=None` are unrelated failures." + "reason": "CLIPOutput attentions assignment and sink-less flash_attention crash are different code paths." }, { "left": "issue:28282", "right": "issue:33453", "accept": false, - "reason": "PyTorch-not-found import error and tokenizer-loading regression are unrelated setup/loading issues." + "reason": "PyTorch missing ImportError and tokenizer-loading regression are different loading problems." }, { "left": "issue:42890", "right": "issue:44964", "accept": false, - "reason": "SamHQ test flakiness and inability to load Phi-4 multimodal are unrelated." + "reason": "Flaky integration test and Phi-4 multimodal load failure are unrelated." }, { "left": "issue:42890", "right": "issue:44112", "accept": false, - "reason": "Seed-related integration-test flakiness and GraniteSpeech stale device override CI failure are different test issues." + "reason": "Seed instability and GraniteSpeech stale device override are different CI issues." }, { "left": "issue:42890", "right": "issue:45600", "accept": false, - "reason": "SamHQ seeding flakiness and `auto_mappings.py` referencing removed configs are unrelated." + "reason": "Seed-related failure is unrelated to auto_mappings referencing removed configs." }, { "left": "issue:43502", "right": "issue:44062", "accept": false, - "reason": "Unexpected network access with `local_files_only=True` and `AddedToken(..., special=...)` argument collision are unrelated tokenizer/load issues." + "reason": "Local-files-only network leak and AddedToken keyword conflict are different bugs." }, { "left": "issue:43723", "right": "issue:44556", "accept": false, - "reason": "Tokenizer loading regression in v5 and checkpoint reload failure after upgrading versions are different failures." - }, - { - "left": "issue:42890", - "right": "issue:45341", - "accept": false, - "reason": "Seed-related test flakiness and a bug in `testing_utils.py` are not the same underlying problem." + "reason": "Both involve loading, but one is tokenizer loading and the other is checkpoint reloading after a version upgrade; they are not the same bug." }, { "left": "issue:43010", "right": "issue:44075", "accept": false, - "reason": "Cache update no-grad decoration and ignored SGD args are unrelated training/runtime bugs." + "reason": "no_grad on cache updates is unrelated to SGD argument handling." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet, and the soft-edge decisions are generally conservative. The only accepted pair (41669/44246) is plausibly the same import-slowdown problem, while the rejected pairs are supported by clearly different titles and bug classes." + "accept": false, + "feedback": "The cluster-level framing is mostly grounded, but one soft-edge acceptance is too strong for the evidence available: 41669 vs 44246 may both concern import performance, yet the reason asserts a specific causal link (\u201c44246 matching the import slow-down caused by the import-* issue\u201d) that is not established by the packet. To keep the verdicts conservative, this should be downgraded unless the issue bodies explicitly tie them together. The rest of the non-merge decisions look reasonably cautious." }, "evaluator_used": true, - "retried": false, - "accepted_nontrivial_soft_edge": true, + "retried": true, + "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, @@ -13449,8 +13498,8 @@ "issue:44315|issue:44360", "issue:43010|issue:44368", "issue:43502|issue:44222", - "issue:43502|issue:44961", "issue:43299|issue:44556", + "issue:43502|issue:44961", "issue:44355|issue:44655", "issue:42831|issue:43874", "issue:30333|issue:36296", @@ -13477,256 +13526,256 @@ "issue:44273|issue:45003", "issue:44273|issue:45542" ], - "prepared_review_unit_hash": "d2cb30de7a95e77cdd642607d3dceacfdc42957774af529ccea9462eecf2a20a", + "prepared_review_unit_hash": "8dd138fa3b7ad89bd5ff17bf8e5c3d6a65f9dda1c9b60b02f494186f95fe699f", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 47, "item_count": 47, "soft_pair_count": 41, - "serialized_chars": 38808, - "estimated_input_tokens": 9702, - "estimated_eval_tokens": 19660 + "serialized_chars": 38809, + "estimated_input_tokens": 9703, + "estimated_eval_tokens": 19662 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "Most soft pairs are only loosely similar by topic (imports, loading, v5 regressions, FP8/MoE) but describe different concrete bugs or code paths, so they should not be deduplicated.", - "confidence": 0.86, - "canonical_issue_reason": "issue:43576 is the strongest hub among the issue candidates: it is a concrete, user-visible v5 regression and has the most nearby import/CLI-related soft matches, even though the matches are still distinct bugs.", + "summary": "This cluster is heterogeneous: the soft links mostly connect issues that share broad keywords like loading, imports, v5 regressions, cache/generation, or quantization, but they do not look like the same underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "No single true duplicate hub stands out. If forced to anchor the cluster, issue:43576 is the broadest v5 regression report with some inbound references, but the items are mostly unrelated.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43576 is the best single issue representative here because it has clear repro impact and the broadest duplicate-like neighborhood in this set.", + "best_issue_reason": "issue:43576 is the most usable cluster anchor because it is a concise, version-specific regression report with some external references; however, it is not a strong canonical duplicate representative.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44222", "right": "issue:45125", "accept": false, - "reason": "Both mention FP8/MoE, but one is save_pretrained and the other tensor-parallel model loading." + "reason": "Both involve FP8/MoE, but one is about save_pretrained and the other about missing tensor-parallel plan metadata; different bugs." }, { "left": "issue:43010", "right": "issue:43065", "accept": false, - "reason": "No shared code path: cache/no_grad decoration vs a dummy Conv2d in Sam3PixelDecoder." + "reason": "Cache update no_grad behavior and a dummy Conv2d in Sam3PixelDecoder are unrelated code-paths." }, { "left": "issue:44387", "right": "issue:44556", "accept": false, - "reason": "Different failures: int4 memory growth vs checkpoint reload compatibility after version upgrade." + "reason": "Int4 memory/OOM behavior and checkpoint reload breakage after v5 upgrade are different failure modes." }, { "left": "issue:42548", "right": "issue:44351", "accept": false, - "reason": "Both are import errors, but for different missing symbols and likely different export regressions." + "reason": "Both are import errors, but for different symbols and likely different missing exports." }, { "left": "issue:45125", "right": "issue:45478", "accept": false, - "reason": "Same model family, but tensor-parallel support is not the same as a from_pretrained error." + "reason": "Tensor-parallel plan metadata and a generic from_pretrained error are not the same defect." }, { "left": "issue:43010", "right": "issue:43232", "accept": false, - "reason": "Different generation internals: no_grad cache updates vs sync_gpus/_update_model_kwargs behavior." + "reason": "Decorating cache update with no_grad and post-sync_gpus generation kwargs handling are different generation bugs." }, { "left": "issue:45125", "right": "issue:45375", "accept": false, - "reason": "Different model components: tensor parallel plan vs a missing config field silently dropped by strict parsing." + "reason": "Missing _tp_plan for tensor parallelism and a missing config field in a vision config are separate issues." }, { "left": "issue:44355", "right": "issue:44749", "accept": false, - "reason": "Unrelated issues: compiled Python file errors vs a post-upgrade data filtering slowdown." + "reason": "Compiled Python file errors and a post-upgrade slowdown are unrelated." }, { "left": "issue:43502", "right": "issue:44485", "accept": false, - "reason": "Local-files-only network leakage is unrelated to GLM-5 RoPE implementation." + "reason": "local_files_only network leakage and GLM-5 RoPE implementation are unrelated." }, { "left": "issue:30990", "right": "issue:31795", "accept": false, - "reason": "Loading hang in Sentence Transformers is not the same as documentation confusion for model.forward arguments." + "reason": "Stuck loading and documentation confusion are not the same bug." }, { "left": "issue:30333", "right": "issue:33453", "accept": false, - "reason": "MLflow reporting on failed jobs is unrelated to tokenizer loading regression." + "reason": "MLflow reporting on failed training jobs and tokenizer loading regression are unrelated." }, { "left": "issue:44315", "right": "issue:44360", "accept": false, - "reason": "Liger Kernel application in model_init and a DSA indexer ReLU bug are different subsystems." + "reason": "Liger Kernel application and DSA indexer activation logic are different subsystems and defects." }, { "left": "issue:43010", "right": "issue:44368", "accept": false, - "reason": "Cache update no_grad behavior is unrelated to a tie_word_embeddings warning." + "reason": "Cache no_grad decoration and a tie_word_embeddings warning are unrelated." }, { "left": "issue:43502", "right": "issue:44222", "accept": false, - "reason": "Different bugs: unwanted API requests vs FP8 save_pretrained behavior." + "reason": "Network calls despite local_files_only and FP8 save_pretrained are different bugs." }, { - "left": "issue:43502", - "right": "issue:44961", + "left": "issue:43299", + "right": "issue:44556", "accept": false, - "reason": "No meaningful overlap beyond being generic issues." + "reason": "Qwen3VL MoE loading failure and generic checkpoint reload regression after upgrade are not clearly the same code-path." }, { - "left": "issue:43299", - "right": "issue:44556", + "left": "issue:43502", + "right": "issue:44961", "accept": false, - "reason": "Qwen3VL MoE loading failure and checkpoint reload breakage are different model/version regressions." + "reason": "local_files_only behavior and an unrelated 'racoon' issue do not match." }, { "left": "issue:44355", "right": "issue:44655", "accept": false, - "reason": "Compiled-file runtime errors and Pipeline save_pretrained failure are not the same bug." + "reason": "Compiled file execution errors and pipeline save_pretrained failure are different defects." }, { "left": "issue:42831", "right": "issue:43874", "accept": false, - "reason": "FP8 accuracy drift and missing image-patch API in GLM46V are separate issues." + "reason": "FineGrainedFP8 accuracy and GLM46V image processor attribute errors are unrelated." }, { "left": "issue:30333", "right": "issue:36296", "accept": false, - "reason": "MLflow reporting on failure is unrelated to tensor-parallel training bugs." + "reason": "MLflow reporting on failed jobs and tensor parallel training bug are different issues." }, { "left": "issue:44222", "right": "issue:44230", "accept": false, - "reason": "Both are FP8/MoE, but save_pretrained support and inference support are different change requests." + "reason": "Both mention FP8/MoE, but one is save_pretrained bug and the other is inference support; not mergeable as one fix." }, { "left": "issue:44355", "right": "issue:45071", "accept": false, - "reason": "Compiled-file errors are unrelated to PretrainedConfig type-checking regressions." + "reason": "Compiled file errors and PretrainedConfig type checking breakage are unrelated." }, { "left": "issue:33453", "right": "issue:34689", "accept": false, - "reason": "Tokenizer loading regression and Llama 3.2 Vision model loading breakage are different failure modes." + "reason": "Tokenizer loading regression and Llama 3.2 vision model loading failure are different loading paths." }, { "left": "issue:42757", "right": "issue:43576", "accept": false, - "reason": "Missing is_offline_mode import and broken `transformers env` may both be v5-related, but they are distinct bugs." + "reason": "Missing is_offline_mode import and broken transformers env command are different regressions." }, { "left": "issue:43827", "right": "issue:44961", "accept": false, - "reason": "Docs still mentioning pipeline() is unrelated to the unrelated racoon issue." + "reason": "Pipeline-doc references and an unrelated 'racoon' issue do not match." }, { "left": "issue:33453", "right": "issue:36296", "accept": false, - "reason": "Tokenizer loading regression and tensor-parallel training bug are not the same underlying defect." + "reason": "Tokenizer loading and tensor parallel training are unrelated." }, { "left": "issue:43576", "right": "issue:45071", "accept": false, - "reason": "A broken CLI env command and config type-checking are different v5 regressions." + "reason": "Broken env command and PretrainedConfig type checking are different failures." }, { "left": "issue:43576", "right": "issue:43824", "accept": false, - "reason": "Broken env command and missing Qwen2_5_VL export are different import/export issues." + "reason": "Broken env command and Qwen2_5_VL import error are unrelated." }, { "left": "issue:43673", "right": "issue:44492", "accept": false, - "reason": "Chunked prefill cache handling and a cache-strategy typo are not the same bug." + "reason": "Generation cache missing during chunked_prefill and a cache-strategy typo are different scopes." }, { "left": "issue:38617", "right": "issue:43576", "accept": false, - "reason": "Missing layer_type_validation export and a broken env CLI command are different import regressions." + "reason": "ImportError from configuration_utils and broken env command are not the same bug." }, { "left": "issue:44961", "right": "issue:45468", "accept": false, - "reason": "No meaningful overlap; racoon is unrelated to Gemma-4 audio positional encoding." + "reason": "A placeholder 'racoon' issue and Gemma-4 audio positional encoding are unrelated." }, { "left": "issue:31795", "right": "issue:38617", "accept": false, - "reason": "Documentation confusion and an import error for layer_type_validation are unrelated." + "reason": "Docs confusion around forward args and an import error for layer_type_validation are unrelated." }, { "left": "issue:43576", "right": "issue:44351", "accept": false, - "reason": "Broken env CLI and missing HybridCache import are distinct missing-symbol bugs." + "reason": "Broken env command and missing HybridCache import are different regressions." }, { "left": "issue:44908", "right": "issue:44961", "accept": false, - "reason": "Scheduler kwargs handling has nothing to do with the racoon issue." + "reason": "Scheduler kwargs handling and an unrelated 'racoon' issue do not match." }, { "left": "issue:41628", "right": "issue:42548", "accept": false, - "reason": "Both are import errors, but for different classes and likely different export paths." + "reason": "AutoImageProcessor and PreTrainedModel import failures are separate missing-export issues." }, { "left": "issue:44485", "right": "issue:44961", "accept": false, - "reason": "GLM-5 RoPE implementation and racoon are unrelated." + "reason": "GLM-5 RoPE implementation and an unrelated 'racoon' issue are not the same." }, { "left": "issue:36296", "right": "issue:43901", "accept": false, - "reason": "Tensor-parallel training and TextClassificationPipeline docs are unrelated." + "reason": "Tensor parallel training and TextClassificationPipeline docs are unrelated." }, { "left": "issue:36296", "right": "issue:43012", "accept": false, - "reason": "Tensor-parallel training bugs do not match bfloat16 compile warnings." + "reason": "Tensor parallel training and bfloat16 compile warning are different bugs." }, { "left": "issue:41628", @@ -13738,19 +13787,19 @@ "left": "issue:44261", "right": "issue:44360", "accept": false, - "reason": "MLA layer norm epsilon precision issue is not the same as the DSA indexer ReLU bug." + "reason": "Missing rms_norm_eps in MLA q_a_layernorm and DSA indexer ReLU absence are different precision/logic issues." }, { "left": "issue:44273", "right": "issue:45003", "accept": false, - "reason": "Lazy loading failure and sys.modules access concerns may be adjacent, but not the same concrete bug." + "reason": "Lazy loading problems and unsafe sys.modules access are related only superficially; not the same concrete bug." }, { "left": "issue:44273", "right": "issue:45542", "accept": false, - "reason": "Lazy loading and a TensorFlow backend import error are unrelated." + "reason": "Lazy loading and undefined tf backend error from tensorboard-only installs are unrelated." } ] }, @@ -13874,20 +13923,20 @@ "issue:43606|issue:45468", "issue:43704|issue:45341" ], - "prepared_review_unit_hash": "0d59a3380e2ef8ba456de821000717d91f42a8ad9b61bf84d3c9571a5b697ea4", + "prepared_review_unit_hash": "47a84ab8025d0858b2c554575c02e24bef94bc3152f49262c167b730ac7a415d", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 48, "item_count": 48, "soft_pair_count": 49, - "serialized_chars": 41786, + "serialized_chars": 41785, "estimated_input_tokens": 10447, "estimated_eval_tokens": 21150 }, @@ -13896,306 +13945,306 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a loose set of unrelated Transformers issues with a weak subtheme around v5 loading/tokenizer/config regressions. The soft edges are driven by broad lexical similarity, but the underlying bugs and code paths are usually different.", - "confidence": 0.69, - "canonical_issue_reason": "Issue 44556 is the clearest concrete anchor in the loading/regression subtheme: it describes a specific checkpoint reload failure after upgrading to v5.2/v5.3, which is more representative than generic or overly narrow reports.", + "summary": "This cluster is dominated by title-level similarity around Transformers v5/loading/tokenizer/model issues, but the pairs do not look like the same underlying bug or change. I rejected all soft edges and used one concrete v5 loading regression as the best representative issue.", + "confidence": 0.71, + "canonical_issue_reason": "issue:44556 is the most concrete and representative loading-regression report in the set: it has a specific upgrade path (v4.57 to v5.2/v5.3) and a clear failure mode, unlike the many generic or loosely related items.", "canonical_pr_reason": null, - "best_issue_reason": "44556 is the most actionable and broadly representative issue among the candidates, since it is a concrete v5 compatibility/loading failure rather than a generic placeholder or a narrowly model-specific report.", + "best_issue_reason": "issue:44556 is the strongest anchor for duplicate triage because it is specific, reproducible, and sits in the common compatibility-breakage theme seen across several nearby titles.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44623", "right": "issue:45230", "accept": false, - "reason": "Processor save_pretrained missing-files bug vs a generic placeholder report; not the same issue." + "reason": "Different bugs: processor save_pretrained missing files vs a generic bug report with no concrete matching failure mode." }, { "left": "issue:43576", "right": "issue:44556", "accept": false, - "reason": "Both are v5 regressions, but one is the env command and the other is checkpoint reload after upgrade; different failures." + "reason": "Both mention v5 breakage, but one is an env command issue and the other is checkpoint reload compatibility; different codepaths." }, { "left": "issue:41669", "right": "issue:43673", "accept": false, - "reason": "Import-time file-generation/performance issue vs chunked_prefill cache regression; unrelated code paths." + "reason": "Import-star performance/regression is unrelated to GenerationMixin cache behavior during chunked_prefill." }, { "left": "issue:44038", "right": "issue:45125", "accept": false, - "reason": "Same model family, but Qwen3-VL-Moe breakage and missing _tp_plan are different bugs." + "reason": "Same model family, but one is a general Qwen3-VL-Moe bug and the other is missing _tp_plan for tensor parallelism; not the same issue." }, { "left": "issue:44393", "right": "issue:45071", "accept": false, - "reason": "2D bbox output quality bug vs PretrainedConfig type-checking regression; not the same defect." + "reason": "Qwen3-VL bounding-box output error is unrelated to PretrainedConfig type-checking breakage." }, { "left": "issue:44556", "right": "issue:45092", "accept": false, - "reason": "Both concern loading old checkpoints in v5, but the specific incompatibility and failure mode differ." + "reason": "Both are v5 model-loading incompatibilities, but one is a checkpoint reload regression and the other is remote-code/meta-init incompatibility for a specific model." }, { "left": "issue:42371", "right": "issue:44492", "accept": false, - "reason": "TF32 settings warning vs cache-strategy typo; unrelated." + "reason": "TF32 API changes and a cache-strategy typo are unrelated." }, { "left": "issue:42890", "right": "issue:44908", "accept": false, - "reason": "Flaky tests due to missing seeds vs scheduler kwargs propagation; different subsystems." + "reason": "Missing set_seed in tests is unrelated to inverse_sqrt scheduler kwargs handling." }, { "left": "issue:44704", "right": "issue:44908", "accept": false, - "reason": "AutoProcessor kwargs forwarding vs inverse_sqrt scheduler kwargs handling; unrelated." + "reason": "AutoProcessor kwargs forwarding and scheduler kwargs propagation are different bugs in different subsystems." }, { "left": "issue:44246", "right": "issue:44273", "accept": false, - "reason": "Import slowdowns and lazy-loading malfunction are related only loosely; not clearly the same bug." + "reason": "Import slowness and lazy-loading failure are related only at a broad theme level; not the same concrete bug." }, { "left": "issue:43408", "right": "issue:44315", "accept": false, - "reason": "Model type warning vs Liger Kernel not applied with model_init; different behaviors." + "reason": "Model-type warning vs Liger Kernel not applied with model_init; different mechanisms and symptoms." }, { "left": "issue:43976", "right": "issue:44393", "accept": false, - "reason": "Python-version compatibility regression vs Qwen3-VL output bug; unrelated." + "reason": "Python version compatibility issue is unrelated to Qwen3-VL output correctness." }, { "left": "issue:43352", "right": "issue:44393", "accept": false, - "reason": "Flash Attention support limitation vs Qwen3-VL bounding-box issue; not the same." + "reason": "Flash Attention 2 support gap is unrelated to Qwen3-VL hallucination/error output." }, { "left": "issue:43576", "right": "issue:44908", "accept": false, - "reason": "Env command regression vs scheduler kwargs bug; no shared code-path bug." + "reason": "v5 env command breakage and scheduler kwargs bug are unrelated." }, { "left": "issue:44908", "right": "issue:45071", "accept": false, - "reason": "Scheduler kwargs bug vs PretrainedConfig typing regression; different defects." + "reason": "Scheduler kwargs handling and PretrainedConfig type-checking are separate issues." }, { "left": "issue:44297", "right": "issue:45230", "accept": false, - "reason": "Tokenizer class mismatch vs generic placeholder bug report; not enough overlap." + "reason": "Tokenizer_class mismatch in save_pretrained is not the same as a generic bug report." }, { "left": "issue:44295", "right": "issue:45230", "accept": false, - "reason": "Position_ids buffer read error vs placeholder report; unrelated." + "reason": "position_ids buffer read error is a distinct model-tensor issue, not the generic bug report." }, { "left": "issue:41669", "right": "issue:44351", "accept": false, - "reason": "Import * performance issue vs HybridCache import failure; different problems." + "reason": "Import-star cleanup/performance is unrelated to missing HybridCache import." }, { "left": "issue:41669", "right": "issue:45341", "accept": false, - "reason": "Import-time slowdown vs a bug in testing_utils; unrelated." + "reason": "Import-time cleanup issue is unrelated to a small testing_utils bug." }, { "left": "issue:44492", "right": "issue:44945", "accept": false, - "reason": "Cache-strategy typo vs incorrect LLM output under pipeline parallelism; different issues." + "reason": "Cache-strategy typo and incorrect pipeline-parallel output are different failures." }, { "left": "issue:41669", "right": "issue:44492", "accept": false, - "reason": "Import * performance regression vs cache-strategy typo; not the same bug." + "reason": "Import-star performance issue does not match a cache-strategy typo." }, { "left": "issue:44393", "right": "issue:45478", "accept": false, - "reason": "Different Qwen3.5/Qwen3-VL failures and different symptoms; not duplicates." + "reason": "Different Qwen3.5 Moe bug: bounding-box hallucination vs from_pretrained error." }, { "left": "issue:44315", "right": "issue:44829", "accept": false, - "reason": "Liger Kernel application bug vs flash_attention_3 training degeneration; distinct code paths." + "reason": "Liger Kernel application with model_init is unrelated to flash_attention_3 causing degenerate training." }, { "left": "issue:43827", "right": "issue:44908", "accept": false, - "reason": "Docs still referencing pipeline() vs scheduler kwargs handling; unrelated." + "reason": "Docs still referencing pipeline() is unrelated to scheduler kwargs handling." }, { "left": "issue:31795", "right": "issue:33453", "accept": false, - "reason": "Documentation confusion in model.forward vs tokenizer loading regression; different issues." + "reason": "Documentation confusion and tokenizer-loading regression are not the same bug." }, { "left": "issue:44077", "right": "issue:44861", "accept": false, - "reason": "Optional post_init policy vs _get_tied_weight_keys AttributeError; not the same bug." + "reason": "Optional post_init allowance and _get_tied_weight_keys AttributeError are separate model-internal issues." }, { "left": "issue:41669", "right": "issue:44908", "accept": false, - "reason": "Import performance issue vs scheduler kwargs bug; unrelated." + "reason": "Import performance regression is unrelated to scheduler behavior." }, { "left": "issue:43723", "right": "issue:44393", "accept": false, - "reason": "Tokenizer loading failure vs Qwen3-VL bounding-box bug; different subsystems." + "reason": "Tokenizer loading in v5 and Qwen3-VL bounding-box output errors are unrelated." }, { "left": "issue:43576", "right": "issue:44273", "accept": false, - "reason": "Env command regression vs lazy-loading issue; not the same underlying failure." + "reason": "Broken env command and lazy-loading malfunction are different problems, even if both touch v5-era internals." }, { "left": "issue:44655", "right": "issue:44861", "accept": false, - "reason": "Pipeline save_pretrained problem vs tied-weights AttributeError; different code paths." + "reason": "Pipeline save_pretrained support and tied-weight key handling are unrelated." }, { "left": "issue:44360", "right": "issue:45468", "accept": false, - "reason": "DSA indexer math complaint vs Gemma-4 audio positional-encoding bug; unrelated." + "reason": "DSA indexer activation issue and Gemma-4 audio positional encoding are unrelated." }, { "left": "issue:44273", "right": "issue:44908", "accept": false, - "reason": "Lazy loading malfunction vs scheduler kwargs bug; different issues." + "reason": "Lazy loading failure does not match inverse_sqrt scheduler kwargs behavior." }, { "left": "issue:45230", "right": "issue:45310", "accept": false, - "reason": "Generic bug report vs specific Qwen3.5 Moe from_pretrained error; no concrete duplicate evidence." + "reason": "Generic bug report is not a concrete duplicate of the Qwen3.5 Moe from_pretrained error." }, { "left": "issue:43519", "right": "issue:44485", "accept": false, - "reason": "Qwen3VL timestamp calculation vs GLM-5 RoPE implementation; different model/math paths." + "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different model-specific math bugs." }, { "left": "issue:41669", "right": "issue:43704", "accept": false, - "reason": "Import * performance regression vs VRAM leak in dataloader threads; unrelated." + "reason": "Import-star slowdown and VRAM leak in multi-threaded dataloader use are unrelated." }, { "left": "issue:43519", "right": "issue:44908", "accept": false, - "reason": "Processor timestamp math vs scheduler kwargs bug; not the same issue." + "reason": "Timestamp calculation in a processor and scheduler kwargs handling are unrelated." }, { "left": "issue:31795", "right": "issue:34689", "accept": false, - "reason": "Documentation issue vs model loading regression; different bug classes." + "reason": "Documentation confusion about forward args is unrelated to Llama 3.2 model-loading regression." }, { "left": "issue:31795", "right": "issue:37428", "accept": false, - "reason": "Docs confusion vs flash-attention import error; unrelated." + "reason": "Forward-doc confusion is unrelated to a flash-attention import error." }, { "left": "issue:44315", "right": "issue:45092", "accept": false, - "reason": "Liger Kernel/model_init issue vs legacy checkpoint meta-init incompatibility; different failures." + "reason": "Liger Kernel/model_init and remote-code meta-initialization incompatibility are not the same bug." }, { "left": "issue:43704", "right": "issue:44485", "accept": false, - "reason": "VRAM leak in dataloader threads vs GLM-5 RoPE implementation; unrelated." + "reason": "VRAM leak in dataloader threads and GLM-5 RoPE implementation are unrelated." }, { "left": "issue:44485", "right": "issue:45468", "accept": false, - "reason": "GLM-5 RoPE implementation vs Gemma-4 audio positional encoding; different models and bugs." + "reason": "Different model internals: RoPE implementation vs audio relative positional encoding." }, { "left": "issue:42371", "right": "issue:43704", "accept": false, - "reason": "TF32 behavior warning vs VRAM leak; unrelated." + "reason": "TF32 API guidance is unrelated to a VRAM leak." }, { "left": "issue:43502", "right": "issue:43519", "accept": false, - "reason": "API requests despite local_files_only vs Qwen3VL timestamp bug; different subsystems." + "reason": "local_files_only network requests and Qwen3VL timestamp math are different failure modes." }, { "left": "issue:43519", "right": "issue:44623", "accept": false, - "reason": "Processor timestamp math vs processor save_pretrained missing files; not the same issue." + "reason": "Timestamp calculation bug and processor save_pretrained missing files are unrelated." }, { "left": "issue:43519", "right": "issue:43704", "accept": false, - "reason": "Qwen3VL timestamp bug vs multi-thread VRAM leak; unrelated." + "reason": "Timestamp calculation in a processor is unrelated to a multi-threaded VRAM leak." }, { "left": "issue:44263", "right": "issue:45468", "accept": false, - "reason": "torch.split return-value issue vs Gemma-4 audio positional encoding bug; different defects." + "reason": "torch.split return semantics in GlmMoeDsaIndexer are unrelated to Gemma-4 audio positional encoding." }, { "left": "issue:44261", "right": "issue:45468", "accept": false, - "reason": "MLA q_a_layernorm precision/config issue vs Gemma-4 audio positional encoding; unrelated." + "reason": "Layernorm epsilon precision discussion is unrelated to audio relative positional encoding." }, { "left": "issue:43606", "right": "issue:45468", "accept": false, - "reason": "CPU offload device-mismatch CI bug vs Gemma-4 audio positional encoding; different code paths." + "reason": "CPU offload device mismatch and Gemma-4 audio positional encoding are unrelated." }, { "left": "issue:43704", "right": "issue:45341", "accept": false, - "reason": "VRAM leak in dataloader threads vs testing_utils bug; unrelated." + "reason": "VRAM leak in multi-threaded loading does not match a testing_utils bug." } ] }, @@ -14261,14 +14310,14 @@ "issue:31515|issue:33453", "issue:43298|issue:44230" ], - "prepared_review_unit_hash": "69926dc5a0c0196e2e3ca9c4aa37810c3a1b0dd39e8a144a55cfba67930f1085", + "prepared_review_unit_hash": "370d30b279fbc797fdf89d3a7c11882f1eec603a436ee61e061469792d51ad1f", "component_budget": { "node_count": 381, "item_count": 381, "soft_pair_count": 1427, - "serialized_chars": 735811, - "estimated_input_tokens": 183953, - "estimated_eval_tokens": 368162 + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 }, "budget": { "node_count": 22, @@ -14283,114 +14332,114 @@ "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These items are mostly unrelated single-issue reports rather than duplicates. The soft pairs are mostly broad thematic matches (e.g. tokenizer/v5 regressions, loading issues) but not the same concrete bug or code path, so none should be merged.", - "confidence": 0.89, - "canonical_issue_reason": "Issue 44556 is the clearest, most actionable regression report in the set: it describes a specific checkpoint reload failure after a v4.57\u2192v5.x upgrade. It is more concrete and triage-friendly than the other broad or ambiguous reports.", + "summary": "This cluster is mostly heterogeneous. There is a loose tokenizer-v5 theme, but none of the soft pairs look like the same underlying bug/change strongly enough to merge. No PRs are present.", + "confidence": 0.34, + "canonical_issue_reason": "If a representative is needed, issue 42913 is the broadest tokenizer-v5 regression report; however, the cluster does not look like one true duplicate set.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 44556 is the strongest representative issue because it is specific, reproducible, and describes a likely regression with a clear upgrade path and impact.", + "best_issue_reason": "42913 is the best cluster representative because it is the most general and likely to subsume nearby tokenizer-v5 complaints, even though it is not a confirmed duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42913", "right": "issue:43066", "accept": false, - "reason": "Both are tokenizer-related v5 complaints, but one is a broad behavior difference and the other is a specific wrong decoder-type report; they do not describe the same concrete defect." + "reason": "Both concern tokenizer behavior in v5, but one is a broad v4\u2192v5 behavior regression and the other is a specific decoder-type mismatch; not clearly the same bug." }, { "left": "issue:44908", "right": "issue:44998", "accept": false, - "reason": "Inverse-sqrt scheduler kwarg handling and an unrelated unemployment/off-topic issue are clearly different." + "reason": "Scheduler kwargs handling is unrelated to an unrelated/non-technical issue." }, { "left": "issue:44998", "right": "issue:45341", "accept": false, - "reason": "Completely unrelated: one is off-topic, the other is a testing_utils bug." + "reason": "No shared code path or symptom; these are unrelated issues." }, { "left": "issue:44861", "right": "issue:45290", "accept": false, - "reason": "Different code paths: tied-weights key extraction crash versus chat template handling for tool-call assistant messages." + "reason": "Different subsystems and different failure modes: tied-weights attribute handling vs chat template crash." }, { "left": "issue:44492", "right": "issue:44998", "accept": false, - "reason": "A typo in cache strategies is unrelated to the off-topic unemployment issue." + "reason": "A typo in cache strategies is unrelated to the other issue." }, { "left": "issue:43618", "right": "issue:44998", "accept": false, - "reason": "CLIPOutput attentions regression and an off-topic issue are unrelated." + "reason": "CLIP output attentions regression is unrelated." }, { "left": "issue:44556", "right": "issue:44998", "accept": false, - "reason": "Checkpoint reload regression after version upgrade is unrelated to the off-topic unemployment issue." + "reason": "Checkpoint reload compatibility after v5 upgrade is unrelated to the other issue." }, { "left": "issue:43525", "right": "issue:44861", "accept": false, - "reason": "Missing pad_token_id in Llama4Config is a different bug from the tied-weights AttributeError." + "reason": "Missing pad_token_id on Llama4Config and tied-weight key handling are different bugs." }, { "left": "issue:43572", "right": "issue:44861", "accept": false, - "reason": "Missing pad_token_idx in StableLmConfig is a separate config regression, not the tied-weights crash." + "reason": "Missing pad_token_idx on StableLmConfig is a separate config regression." }, { "left": "issue:31515", "right": "issue:38617", "accept": false, - "reason": "Slow checkpoint loading and an ImportError for layer_type_validation are unrelated problems." + "reason": "Slow checkpoint loading and an ImportError from configuration_utils are different code paths." }, { "left": "issue:44230", "right": "issue:44623", "accept": false, - "reason": "FP8 inference support for Qwen3-VL/Qwen3.5 MoE is not the same as missing files from processor.save_pretrained." + "reason": "FP8/Qwen3-VL support and missing processor save files are not the same bug." }, { "left": "issue:31515", "right": "issue:36296", "accept": false, - "reason": "Checkpoint loading performance and a tensor-parallel training bug are different issues." + "reason": "Checkpoint loading speed and tensor-parallel training are unrelated." }, { "left": "issue:30333", "right": "issue:31515", "accept": false, - "reason": "MLflow reporting on failed training jobs is unrelated to checkpoint loading speed." + "reason": "MLFlow job-status reporting is unrelated to checkpoint loading performance." }, { "left": "issue:31515", "right": "issue:31795", "accept": false, - "reason": "Checkpoint loading performance and confusing model.forward docs are not the same underlying bug." + "reason": "Loading performance and documentation confusion are not the same underlying issue." }, { "left": "issue:44230", "right": "issue:45310", "accept": false, - "reason": "Both mention Qwen3-family models, but one is an FP8 support request and the other is a concrete from_pretrained error; too different to merge as duplicates." + "reason": "Both mention Qwen3.5 MoE, but one is FP8 support and the other is a from_pretrained error; not clearly one concrete bug." }, { "left": "issue:31515", "right": "issue:33453", "accept": false, - "reason": "A slow from_pretrained checkpoint load and a tokenizer-loading regression are not the same defect." + "reason": "Checkpoint loading slowdown and tokenizer-loading regression are separate issues." }, { "left": "issue:43298", "right": "issue:44230", "accept": false, - "reason": "Continuous batching lacking audio-model support is a different capability gap from FP8 Qwen3-VL/Qwen3.5 MoE support." + "reason": "Continuous batching audio-model support and FP8 inference support are different feature gaps." } ] }, @@ -14420,7 +14469,7 @@ "soft_pairs": [ "issue:26350|issue:33260" ], - "prepared_review_unit_hash": "7296b965239bcf252d33470058bf50138ae7b73dd6f6b05d4c9e923658578584", + "prepared_review_unit_hash": "efe8ec252c55ddf8c41f0561f93d8e2b515b3f69970a2821f528f9294ebd8913", "component_budget": { "node_count": 2, "item_count": 2, @@ -14442,7 +14491,7 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two issues are about different community-contribution feature requests: Flash Attention 2 support for more architectures vs. GGUF support for more architectures. They are not duplicates and should not be merged.", + "summary": "These are two different community feature requests in the same broad area, but they target unrelated capabilities: Flash Attention 2 support vs GGUF support. They should not be treated as duplicates.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": null, @@ -14453,7 +14502,7 @@ "left": "issue:26350", "right": "issue:33260", "accept": false, - "reason": "Different underlying feature requests with different targets (Flash Attention 2 vs GGUF). They do not describe the same bug or change, so this is not a duplicate pair." + "reason": "Different underlying changes/features; one requests Flash Attention 2 support and the other requests GGUF support, so they are not the same bug or fix." } ] }, @@ -14490,70 +14539,70 @@ "issue:43834|issue:44365", "issue:44365|issue:44370" ], - "prepared_review_unit_hash": "03f7a243c5ca3148b4ff924254d232dc6f8cfaf1b0e702cb0914d602ef327482", + "prepared_review_unit_hash": "70b8053e33e3b6bed84eafd239d5700deb77468f622d38e82e7c06d6fe306e3d", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4452, - "estimated_input_tokens": 1113, - "estimated_eval_tokens": 2482 + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4452, - "estimated_input_tokens": 1113, - "estimated_eval_tokens": 2482 + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is dominated by generic i18n translation-template issues, with one concrete Turkish docs translation issue. The concrete Turkish issue is the best canonical issue; the placeholder/template issues are too underspecified to treat as exact duplicates of each other or of the Turkish one.", - "confidence": 0.9, - "canonical_issue_reason": "Issue 27088 is the only concrete, language-specific report ('Turkish') rather than a placeholder template, so it best represents the real actionable issue in the cluster.", + "summary": "All four items are documentation-translation issues, but only #27088 is concrete (Turkish). The others are generic placeholder/template issues with no language specified, so they are not safe duplicate merges. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "#27088 is the only specific, actionable issue: it names Turkish docs translation explicitly. The other issues are generic placeholders ([i18n-] / ) and do not identify the same concrete problem.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 27088 is the strongest issue to keep as the representative: it names a specific language, is not a generic template, and is the most actionable artifact here.", + "best_issue_reason": "#27088 is the best representative because it is specific, user-facing, and clearly scoped to one translation task. The others look like template instances rather than a concrete duplicate target.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:27088", "right": "issue:44370", "accept": false, - "reason": "Different concrete language targets: one is Turkish, the other is a generic placeholder. Similar topic, but not the same underlying issue." + "reason": "Both are translation-related, but #44370 is a generic placeholder issue and does not identify Turkish docs or the same concrete work item." }, { "left": "issue:27088", "right": "issue:44365", "accept": false, - "reason": "The Turkish-specific report is not the same as a generic placeholder translation issue; no evidence they refer to the same language or same task." + "reason": "#27088 is specifically Turkish; #44365 is only a template stub, so there is not enough evidence that they are the same underlying issue." }, { "left": "issue:27088", "right": "issue:43834", "accept": false, - "reason": "One issue is explicitly Turkish while the other is only a template stub. Too little evidence that they describe the same change or bug." + "reason": "#43834 is a generic language placeholder issue, not a concrete Turkish translation request, so this is not a safe duplicate." }, { "left": "issue:43834", "right": "issue:44370", "accept": false, - "reason": "Both are template-like translation issues, but there is no concrete language or scope match to prove they are the same underlying request." + "reason": "These are near-identical templates, but without actual language details they cannot be confirmed as the same underlying issue." }, { "left": "issue:43834", "right": "issue:44365", "accept": false, - "reason": "Near-identical template text alone is not enough; these could be separate language translation requests, so do not merge." + "reason": "Same generic i18n template wording only; no concrete language or task details to justify duplicate merging." }, { "left": "issue:44365", "right": "issue:44370", "accept": false, - "reason": "Same generic template wording, but no concrete evidence that they target the same language or identical work item." + "reason": "Both are placeholder issues with identical template titles, but the available data is too generic to prove they refer to one specific bug/change." } ] }, @@ -14622,12 +14671,12 @@ "issue:43837|issue:45027", "issue:43837|issue:45177" ], - "prepared_review_unit_hash": "a63d57906e1cae48af1797b2aed0699a2adf9e7e258d981d62645c813b5d635b", + "prepared_review_unit_hash": "bc5136440272b7bab1e0c331202c49a792e339b34f33eec72b4b7ba62d35bab0", "component_budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 28, - "serialized_chars": 17648, + "serialized_chars": 17647, "estimated_input_tokens": 4412, "estimated_eval_tokens": 9080 }, @@ -14635,7 +14684,7 @@ "node_count": 14, "item_count": 14, "soft_pair_count": 28, - "serialized_chars": 17648, + "serialized_chars": 17647, "estimated_input_tokens": 4412, "estimated_eval_tokens": 9080 }, @@ -14644,8 +14693,8 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are all separate model-support requests across different model families and modalities; despite broad topical similarity, none look like true duplicates or the same underlying change.", - "confidence": 0.96, + "summary": "The cluster is mostly broad model-request issues that look superficially similar but refer to different models, modalities, or feature additions. They do not appear to be duplicates of the same underlying change.", + "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, @@ -14655,169 +14704,169 @@ "left": "issue:36817", "right": "issue:41211", "accept": false, - "reason": "EuroBert config support and DEIMv2 model support are unrelated requests for different architectures." + "reason": "Different model requests: EuroBert config support vs DEIMv2 architecture support. Same broad \u2018add model\u2019 theme, but not the same change." }, { "left": "issue:36817", "right": "issue:43671", "accept": false, - "reason": "EuroBert config support is unrelated to Qwen3-TTS audio support." + "reason": "EuroBert config vs Qwen3-TTS support are unrelated models and tasks." }, { "left": "issue:37015", "right": "issue:43837", "accept": false, - "reason": "NeoBERT support and Qwen3-ASR support target different models and different inference paths." + "reason": "NeoBERT addition and Qwen3-ASR support target different models and modalities." }, { "left": "issue:37015", "right": "issue:44121", "accept": false, - "reason": "NeoBERT model addition is unrelated to the OpenAI weight-sparse transformer request." + "reason": "NeoBERT vs OpenAI weight-sparse transformer are different model families with different implementation needs." }, { "left": "issue:37015", "right": "issue:43909", "accept": false, - "reason": "NeoBERT support and LFM2.5 Audio support are different model families with different requirements." + "reason": "NeoBERT and LFM2.5 Audio are unrelated model additions." }, { "left": "issue:37015", "right": "issue:42503", "accept": false, - "reason": "NeoBERT and ModernVBERT are separate model requests, not the same concrete bug or change." + "reason": "NeoBERT and ModernVBERT are different models; no evidence they are the same underlying request." }, { "left": "issue:37015", "right": "issue:45027", "accept": false, - "reason": "NeoBERT and Voxtral TTS support are unrelated model additions." + "reason": "NeoBERT vs Voxtral TTS support are distinct model integrations." }, { "left": "issue:41115", "right": "issue:43837", "accept": false, - "reason": "MiniCPM3 architecture support and Qwen3-ASR support are distinct feature requests." + "reason": "MiniCPM3 architecture support and Qwen3-ASR support are different model-specific requests." }, { "left": "issue:41115", "right": "issue:44121", "accept": false, - "reason": "MiniCPM3 support is unrelated to the weight-sparse transformer request." + "reason": "MiniCPM3 architecture work is not the same as adding the OpenAI sparse transformer model." }, { "left": "issue:41115", "right": "issue:42503", "accept": false, - "reason": "MiniCPM3 and ModernVBERT are different model families and not duplicates." + "reason": "MiniCPM3 and ModernVBERT are separate model additions." }, { "left": "issue:41115", "right": "issue:45027", "accept": false, - "reason": "MiniCPM3 support is unrelated to Voxtral-4B-TTS support." + "reason": "MiniCPM3 architecture request and Voxtral TTS support are unrelated." }, { "left": "issue:41211", "right": "issue:43671", "accept": false, - "reason": "DEIMv2 model support and Qwen3-TTS support are separate requests." + "reason": "DEIMv2 addition and Qwen3-TTS support are different model requests." }, { "left": "issue:42503", "right": "issue:43837", "accept": false, - "reason": "ModernVBERT support and Qwen3-ASR support are different models and different tasks." + "reason": "ModernVBERT models and Qwen3-ASR support are unrelated additions." }, { "left": "issue:42503", "right": "issue:43671", "accept": false, - "reason": "ModernVBERT and Qwen3-TTS are unrelated model-support requests." + "reason": "ModernVBERT and Qwen3-TTS are different models and tasks." }, { "left": "issue:42503", "right": "issue:42738", "accept": false, - "reason": "ModernVBERT models and general BERT-like RoPE support overlap conceptually, but they are not the same concrete request or code change." + "reason": "ModernVBERT models vs BERT-like models with RoPE is only a broad architectural similarity, not the same concrete request." }, { "left": "issue:42738", "right": "issue:45027", "accept": false, - "reason": "General BERT-like RoPE support is unrelated to Voxtral TTS support." + "reason": "BERT-like RoPE support and Voxtral TTS support are unrelated feature requests." }, { "left": "issue:43671", "right": "issue:43837", "accept": false, - "reason": "Both are Qwen3 audio requests, but TTS and ASR are different model targets and would not be merged into one fix." + "reason": "Both are Qwen3 requests, but TTS and ASR are different modalities and separate integrations." }, { "left": "issue:43671", "right": "issue:45027", "accept": false, - "reason": "Qwen3-TTS and Voxtral-4B-TTS are different TTS model additions." + "reason": "Qwen3-TTS and Voxtral TTS are different models; same modality but not the same underlying change." }, { "left": "issue:43671", "right": "issue:45177", "accept": false, - "reason": "Qwen3-TTS support is unrelated to DeepSeek-OCR2 support." + "reason": "Qwen3-TTS support and DeepSeek-OCR2 are different model requests." }, { "left": "issue:43671", "right": "issue:44121", "accept": false, - "reason": "Qwen3-TTS support and weight-sparse transformer support are unrelated." + "reason": "Qwen3-TTS support is unrelated to the OpenAI sparse transformer request." }, { "left": "issue:43671", "right": "issue:43909", "accept": false, - "reason": "Qwen3-TTS and LFM2.5 Audio are separate audio-model support requests." + "reason": "Qwen3-TTS and LFM2.5 Audio are distinct audio model requests." }, { "left": "issue:43837", "right": "issue:43909", "accept": false, - "reason": "Qwen3-ASR and LFM2.5 Audio are different models and different integration paths." + "reason": "Qwen3-ASR and LFM2.5 Audio are different audio models with separate support paths." }, { "left": "issue:43837", "right": "issue:45322", "accept": false, - "reason": "Qwen3-ASR support is unrelated to EUPE model support." + "reason": "Qwen3-ASR support and EUPE encoder support are unrelated model additions." }, { "left": "issue:43837", "right": "issue:44121", "accept": false, - "reason": "Qwen3-ASR support and the weight-sparse transformer request are unrelated." + "reason": "Qwen3-ASR and the OpenAI weight-sparse transformer are different model integrations." }, { "left": "issue:43909", "right": "issue:44205", "accept": false, - "reason": "LFM2.5 Audio support and SAM3-LiteText are different model requests." + "reason": "LFM2.5 Audio and SAM3-LiteText are different models and different use cases." }, { "left": "issue:44121", "right": "issue:45322", "accept": false, - "reason": "The weight-sparse transformer request and EUPE are different proposed models, not the same change." + "reason": "OpenAI sparse transformer support and EUPE encoder support are unrelated requests." }, { "left": "issue:43837", "right": "issue:45027", "accept": false, - "reason": "Qwen3-ASR and Voxtral-4B-TTS are different speech-model support requests." + "reason": "Qwen3-ASR and Voxtral-4B-TTS are different speech models and not the same change." }, { "left": "issue:43837", "right": "issue:45177", "accept": false, - "reason": "Qwen3-ASR support and DeepSeek-OCR2 support are unrelated." + "reason": "Qwen3-ASR support and DeepSeek-OCR2 addition are different model requests." } ] }, @@ -14858,12 +14907,12 @@ "issue:43668|issue:44031", "issue:43668|issue:43813" ], - "prepared_review_unit_hash": "be9be9e3985fe90301d4d44c167d875dd8408d5cc3c6b96540eda1fdfb961d15", + "prepared_review_unit_hash": "442133c41799bc70f07079ff922d28c5bb1714fa3695e3bd6605d8cdea8356a2", "component_budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6290, + "serialized_chars": 6292, "estimated_input_tokens": 1573, "estimated_eval_tokens": 3402 }, @@ -14871,7 +14920,7 @@ "node_count": 6, "item_count": 6, "soft_pair_count": 8, - "serialized_chars": 6290, + "serialized_chars": 6292, "estimated_input_tokens": 1573, "estimated_eval_tokens": 3402 }, @@ -14880,60 +14929,60 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These issues are not duplicates of one another; they span unrelated model, config, API, typo, and tokenizer-regression topics. The cluster looks like a loose soft-similarity grouping rather than a true duplicate set.", - "confidence": 0.91, - "canonical_issue_reason": "issue:43240 is the most broadly phrased and externally referenced item in the set, so it is the best representative of the cluster even though the cluster itself does not contain a true duplicate.", + "summary": "This cluster is heterogeneous: the issues span unrelated areas (model normalization, Mask2Former loss behavior, config typing, a typo, and tokenizer warning behavior). None of the soft edges look like the same underlying bug.", + "confidence": 0.98, + "canonical_issue_reason": "No canonical issue fits well here; the items are about different code paths and bug classes, so the cluster should not be merged around a single issue.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43240 has the strongest centrality signals (inbound references/discussion) and the most general bug framing, making it the best issue candidate for the cluster.", + "best_issue_reason": "No single issue is a good global representative because there is no common underlying defect across the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:37934", "right": "issue:43668", "accept": false, - "reason": "Different model components and problem types: Llama4TextL2Norm vs ModernBERTConfig type hint. One is about normalization semantics, the other about an annotation typo." + "reason": "Both mention normalization-related terms, but one is about Llama4TextL2Norm semantics and the other is a ModernBERT config type hint; these are unrelated bugs." }, { "left": "issue:42936", "right": "issue:43240", "accept": false, - "reason": "Mask2Former ignore_value handling and loss kwargs forwarding are separate code paths and bugs; they are not the same underlying defect." + "reason": "Mask2Former ignore_value being unused and kwargs not reaching a loss function are different implementation problems in different paths." }, { "left": "issue:42936", "right": "issue:44031", "accept": false, - "reason": "Unrelated subsystems: Mask2Former loss/ignore_value versus tokenizer regex warning regression." + "reason": "Mask2Former loss handling is unrelated to tokenizer regex warning behavior." }, { "left": "issue:42936", "right": "issue:43813", "accept": false, - "reason": "Model loss-handling bug vs a literal typo in a quantization-related name; no shared code-path." + "reason": "A loss-argument issue and a typo in a quantization attribute name are distinct defects." }, { "left": "issue:42936", "right": "issue:43668", "accept": false, - "reason": "Mask2Former runtime behavior bug and ModernBERT config typing issue are unrelated." + "reason": "Different models and different bug types: unused ignore_value vs incorrect config type hint." }, { "left": "issue:43240", "right": "issue:43668", "accept": false, - "reason": "Loss kwargs not being forwarded is an execution bug; incorrect norm_eps type hint is a configuration annotation issue." + "reason": "Passing kwargs to loss calculation and a ModernBERT config annotation issue do not share the same code-path bug." }, { "left": "issue:43668", "right": "issue:44031", "accept": false, - "reason": "ModernBERTConfig type hint mismatch and tokenizer regex warning regression are different components and symptoms." + "reason": "Incorrect type hint in a config class is not the same as a tokenizer warning regression." }, { "left": "issue:43668", "right": "issue:43813", "accept": false, - "reason": "Type-hint typo in ModernBERTConfig and a quantization-operation naming typo are distinct, unrelated fixes." + "reason": "These are both small correctness issues, but one is a type-hint typo and the other is a specific attribute-name typo in a different subsystem." } ] }, @@ -14963,12 +15012,12 @@ "soft_pairs": [ "issue:41084|issue:43037" ], - "prepared_review_unit_hash": "f4d78dc97b5407eab1e1590a0f09e0d1085dc140440f218726e372fe02bd8564", + "prepared_review_unit_hash": "30394be29ea882871b54591c1ada2d1c55688a6749e24e7428d4a6df5c5a9f8f", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1480, + "serialized_chars": 1479, "estimated_input_tokens": 370, "estimated_eval_tokens": 996 }, @@ -14976,7 +15025,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1480, + "serialized_chars": 1479, "estimated_input_tokens": 370, "estimated_eval_tokens": 996 }, @@ -14985,18 +15034,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These two issues do not look like duplicates: one is about block decoding, the other is about adding DeepSeek v3.2 support. They target different features and likely different code paths.", + "summary": "The two issues are not duplicates: one is about block decoding behavior, the other is a feature request for DeepSeek v3.2 support. They do not describe the same underlying problem or change.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No single canonical issue stands out because the reports are unrelated; if forced, neither is a good representative of the other.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41084", "right": "issue:43037", "accept": false, - "reason": "Different requests with no clear shared underlying bug or change: block decoding vs model/version support." + "reason": "Titles and likely scope are unrelated; one concerns decoding logic, the other model/version support. No shared code path or concrete change is evident." } ] }, @@ -15026,7 +15075,7 @@ "soft_pairs": [ "issue:43598|issue:43600" ], - "prepared_review_unit_hash": "29be77c41d2f9e905ca615515af09c801401d926b6cc4b2a58c5678e89ac985f", + "prepared_review_unit_hash": "084d21f372647b656322b45061660af13d1b7074da130a8bf7908d060292f9f4", "component_budget": { "node_count": 2, "item_count": 2, @@ -15048,18 +15097,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related by theme (custom trainers in `Trainer`), but they describe different problems: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. I would not merge them as duplicates.", - "confidence": 0.91, - "canonical_issue_reason": "No clear canonical duplicate; if one must be chosen as the cluster representative, #43598 is a slightly broader Trainer customizability issue.", + "summary": "These issues are only loosely related by custom-trainer support, but they target different Trainer behaviors: unused-column removal vs FLOPS estimation. They do not look like duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue for a duplicate cluster here; the two issues describe different code paths and fixes.", "canonical_pr_reason": null, - "best_issue_reason": "#43598 is the better representative because it frames a general Trainer behavior/customizability concern rather than a narrower FLOPS-estimation bug.", + "best_issue_reason": "If one issue must be kept as the most representative of the shared theme, #43598 is slightly broader around Trainer customizability, but it is not a duplicate of #43600.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43598", "right": "issue:43600", "accept": false, - "reason": "Same general area, but different underlying bugs: one concerns `remove_unused_column` behavior, the other FLOPS counting for custom trainers. They would not be fixed by one change." + "reason": "Both mention custom trainers, but one is about remove_unused_column behavior and the other is about FLOPS estimation. Different symptoms, different logic, not the same underlying bug." } ] }, @@ -15089,7 +15138,7 @@ "soft_pairs": [ "issue:43684|issue:44405" ], - "prepared_review_unit_hash": "5ee5a8ba56314a13f59cd9f056d9c9cfe948b5ae25ea05e4767a9da7a4e5b7c2", + "prepared_review_unit_hash": "4669445ee80485228bb5b3538ac449de7ab686347b5276ce0d47503d06dc5516", "component_budget": { "node_count": 2, "item_count": 2, @@ -15111,18 +15160,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two issues are about different Qwen model families and different auto-model capabilities: one asks for Qwen3-Omni registration in AutoModel/AutoModelForConditionalGeneration, while the other asks for AutoModelForSequenceClassification support for Qwen3.5. They are related only at a broad subsystem level, not the same underlying bug/change.", - "confidence": 0.95, - "canonical_issue_reason": "If one issue must represent the cluster, 43684 is the broader model-registration request and is more general than the Qwen3.5 sequence-classification support request.", + "summary": "These issues are related only at a very broad AutoModel level, but they request different capabilities for different Qwen variants and different auto classes, so they should not be treated as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "Issue 43684 is the broader model-registration request, but it is not the same underlying problem as 44405, which asks for sequence-classification support for a different Qwen config.", "canonical_pr_reason": null, - "best_issue_reason": "43684 is the better representative because it targets core auto-model registration, which is broader and more central than a single-task head enablement.", + "best_issue_reason": "43684 is the more general representative of the cluster because it concerns core AutoModel registration rather than a narrower task-specific head.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43684", "right": "issue:44405", "accept": false, - "reason": "Different model families (Qwen3-Omni vs Qwen3.5) and different requested capabilities (registration/conditional generation vs sequence classification); not the same bug or change." + "reason": "Different model families/configs and different auto-class targets: Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration vs Qwen3.5 sequence-classification support for AutoModelForSequenceClassification." } ] }, @@ -15159,79 +15208,76 @@ "issue:43845|issue:43859", "issue:43846|issue:43859" ], - "prepared_review_unit_hash": "1730361b56ed37366fec8c84f511d6f3bab2e07c14a2eac01b0d4d377c8c82ab", + "prepared_review_unit_hash": "8245c5995f6aa4febfd425f0c4349118b263030b57df21d912164379bc318198", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4264, - "estimated_input_tokens": 1066, - "estimated_eval_tokens": 2388 + "serialized_chars": 4267, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, - "serialized_chars": 4264, - "estimated_input_tokens": 1066, - "estimated_eval_tokens": 2388 + "serialized_chars": 4267, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The three \"huggingface\" issues are plausibly related and 43845 is the best representative, but the packet only gives soft_similarity for every pair and no deterministic_accept, so the duplicate links should stay tentative. \"Fett\" appears separate.", - "confidence": 0.63, - "canonical_issue_reason": "issue:43845 is the best representative of the huggingface subgroup because it is the earliest of the near-identical placeholder-title reports and aligns most closely with issue:43846, but the evidence is still only soft similarity.", + "summary": "These are low-information closed issues with generic titles. The three \"huggingface\" issues are superficially similar, but there\u2019s no body evidence tying them to the same concrete bug, so none of the soft edges should be merged.", + "confidence": 0.61, + "canonical_issue_reason": "issue:43845 is the best cluster representative because it is one of the repeated \"huggingface\" titles and is the earliest of that set, but the signal is weak.", "canonical_pr_reason": null, - "best_issue_reason": "issue:43845 is the strongest cluster representative: it is earliest among the huggingface reports, while issue:43859 is shorter and the only one with discussion. Even so, the available evidence does not justify treating the links as confirmed duplicates.", + "best_issue_reason": "issue:43845 is the most plausible cluster anchor due to title repetition and earliest timestamp among the similar issues, though the underlying problem is not described.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43835", "right": "issue:43845", "accept": false, - "reason": "Different titles and no shared concrete details, filenames, or targets; this does not look like the same underlying issue." + "reason": "Different titles and no description evidence beyond generic similarity." }, { "left": "issue:43835", "right": "issue:43846", "accept": false, - "reason": "Different titles and no shared concrete details, filenames, or targets; this does not look like the same underlying issue." + "reason": "Different titles and no concrete shared bug details." }, { "left": "issue:43835", "right": "issue:43859", "accept": false, - "reason": "Different titles and no shared concrete details, filenames, or targets; this does not look like the same underlying issue." + "reason": "No evidence these refer to the same underlying problem." }, { "left": "issue:43845", "right": "issue:43846", "accept": false, - "reason": "Same placeholder title, but the packet provides only soft_similarity and no deterministic_accept or concrete shared targets/files, so this is not strong enough to accept as the same issue." + "reason": "Same title, but bodies are too empty to confirm a shared bug or change." }, { "left": "issue:43845", "right": "issue:43859", "accept": false, - "reason": "The match is only superficial: same placeholder title, but no concrete issue target, filename, or diff evidence, and deterministic_accept is false." + "reason": "Title match is not enough without concrete issue details." }, { "left": "issue:43846", "right": "issue:43859", "accept": false, - "reason": "The match is only superficial: same placeholder title, but no concrete issue target, filename, or diff evidence, and deterministic_accept is false." + "reason": "Title match alone does not justify duplicate merging." } ] }, - "evaluator_result": { - "accept": false, - "feedback": "The cluster summary is mostly grounded, but the soft-edge verdicts are too confident. In the packet, all pairwise links are only soft_similarity, and deterministic_accept is false for every pair. Accepting issue:43845 with issue:43859 and issue:43846 with issue:43859 overstates the evidence; those should be treated more conservatively. The representative choice for the huggingface duplicates is otherwise reasonable, and Fett does look separate." - }, - "evaluator_used": true, - "retried": true, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null @@ -15266,7 +15312,7 @@ "issue:44572|issue:44937", "issue:44573|issue:44937" ], - "prepared_review_unit_hash": "55c9e3606b1d35cc759370c42b473c916265c7d1cc6f092823bcac48bcf1a107", + "prepared_review_unit_hash": "ac94fdb4e05974ccac3e512c12417c2fee565f1fd710b09c02d50c1f76a9e5a0", "component_budget": { "node_count": 5, "item_count": 5, @@ -15288,66 +15334,66 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is dominated by low-content spam/generic issue stubs, with no actionable bug or change details to justify deduping on substance.", - "confidence": 0.28, - "canonical_issue_reason": "Issue 44418 is the least spam-like representative: it is the only non- title and is still just a generic placeholder, so it\u2019s the best available cluster anchor.", + "summary": "No convincing duplicate cluster here: four items are spam-like placeholders and one is a generic documentation suggestion, so none look like the same underlying issue.", + "confidence": 0.97, + "canonical_issue_reason": null, "canonical_pr_reason": null, - "best_issue_reason": "No issue here contains enough concrete problem description to be a true canonical duplicate target; 44418 is only the least bad representative, not a substantive one.", + "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44371", "right": "issue:44572", "accept": false, - "reason": "Same '' title, but there is no actual bug/change context to show they describe the same underlying issue." + "reason": "Both are spam-like placeholders with no substantive shared bug or change to merge." }, { "left": "issue:44371", "right": "issue:44573", "accept": false, - "reason": "Both are spam placeholders with no concrete problem statement; identical titles alone are not enough to merge them as duplicates." + "reason": "Same spam-only pattern; no concrete issue evidence that they describe the same problem." }, { "left": "issue:44371", "right": "issue:44418", "accept": false, - "reason": "Titles are unrelated generic placeholders, and neither issue has enough substance to establish a shared underlying bug." + "reason": "\"\" is unrelated to a documentation enhancement suggestion." }, { "left": "issue:44371", "right": "issue:44937", "accept": false, - "reason": "'' and 'Check out \"Google LLC\"' do not indicate the same bug or change; this looks like unrelated spam." + "reason": "Spam placeholder and a separate company-name themed title; no shared underlying issue." }, { "left": "issue:44418", "right": "issue:44572", "accept": false, - "reason": "Generic documentation-sounding text versus a spam placeholder; no concrete common issue is described." + "reason": "Generic documentation request does not match a spam placeholder." }, { "left": "issue:44418", "right": "issue:44573", "accept": false, - "reason": "No shared actionable content; these are just low-information placeholder issues." + "reason": "Generic documentation request does not match a spam placeholder." }, { "left": "issue:44572", "right": "issue:44573", "accept": false, - "reason": "Identical spam titles, but there is no evidence of a real shared bug/change to deduplicate." + "reason": "Identical spam-like titles are not enough to treat them as the same underlying bug or change." }, { "left": "issue:44572", "right": "issue:44937", "accept": false, - "reason": "A spam placeholder and a promotional-sounding title are not the same underlying issue." + "reason": "No concrete overlap beyond both being low-information titles." }, { "left": "issue:44573", "right": "issue:44937", "accept": false, - "reason": "No substantive overlap beyond both looking like spam; not a valid duplicate match." + "reason": "No concrete overlap beyond both being low-information titles." } ] }, @@ -15377,7 +15423,7 @@ "soft_pairs": [ "issue:45145|issue:45151" ], - "prepared_review_unit_hash": "d2bb998fa78245e61231f6e0459f0c4a1fb58531e8791f9f966fbecb99a5ab5e", + "prepared_review_unit_hash": "a87d75b407b4aada8de82e2efd0ea7f9ccf8e033875f6890435a6e205fc1f762", "component_budget": { "node_count": 2, "item_count": 2, @@ -15399,28 +15445,28 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The pair looks strongly related, but the available evidence is not enough to call them definite duplicates with high confidence. They share the exact title and similar body size, yet we do not see body-content overlap, explicit links, or scope details to prove they are the same underlying issue.", - "confidence": 0.76, - "canonical_issue_reason": "Issue 45145 is the earlier report and has an inbound reference, so it is the best tentative anchor if these are later found to be duplicates.", + "summary": "These two closed issues appear to be duplicates of the same report: they have the same title, very similar body size, and were opened on the same day with no distinguishing links or files.", + "confidence": 0.99, + "canonical_issue_reason": "Issue #45145 is the better canonical because it was created first and already has an inbound reference, so it is the more established duplicate target.", "canonical_pr_reason": null, - "best_issue_reason": "Issue 45145 is the best representative of the pair because it is earlier and already has more visible downstream attention, though the duplicate call is not definitive.", + "best_issue_reason": "Issue #45145 is the best representative of this cluster for the same reason: earliest created and slightly more anchored in the discussion graph.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45145", "right": "issue:45151", - "accept": false, - "reason": "The evidence is too thin to confirm the same underlying issue: identical titles and similar body lengths alone do not establish duplicate scope, and there is no explicit cross-linking or content-level proof." + "accept": true, + "reason": "Same title and essentially the same report; no evidence of different underlying bugs or distinct scopes." } ] }, "evaluator_result": { - "accept": false, - "feedback": "The evidence supports strong similarity, but not a high-confidence duplicate conclusion. The packet only shows matching titles, close body lengths, and a soft-similarity signal; it does not establish identical scope or that 45145 is definitively the canonical anchor. The soft-edge verdict is a bit too assertive given the available evidence, and the confidence of 0.98 is overstated." + "accept": true, + "feedback": "The summary is grounded in the packet: the issues share the exact title, have very similar body lengths, were opened the same day, and there are no distinguishing links or files. The canonical-choice rationale is also supported by the earlier creation time and inbound reference on issue 45145. The soft-edge verdict is conservative enough given the available evidence." }, "evaluator_used": true, - "retried": true, - "accepted_nontrivial_soft_edge": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, @@ -15497,12 +15543,12 @@ "soft_pairs": [ "issue:45672|issue:45674" ], - "prepared_review_unit_hash": "32fdae562cd9cbc5b133f37e1d8671c7f7ef673a5d70a63e234a12a6cdc8d70f", + "prepared_review_unit_hash": "29dda7dfd42d507eef3d67685c35319bfb1f1f2bf2d3e446fa50455ff63c40ee", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1668, + "serialized_chars": 1667, "estimated_input_tokens": 417, "estimated_eval_tokens": 1090 }, @@ -15510,7 +15556,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 1668, + "serialized_chars": 1667, "estimated_input_tokens": 417, "estimated_eval_tokens": 1090 }, @@ -15519,18 +15565,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related to 4-bit inference/bitsandbytes behavior, but they describe different failure modes in different code paths, so they should not be clustered as duplicates.", - "confidence": 0.94, - "canonical_issue_reason": null, + "summary": "Two open issues in the same 4-bit inference area, but they describe different failures: one is a Gemma4/audio-module torch.finfo() TypeError on uint8 weights, the other is a BitsAndBytesConfig llm_int8_skip_modules regression that removes the default lm_head exclusion and triggers an AssertionError. They do not look like the same underlying bug.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45674 is the better representative because it is broader, has more discussion/activity, and describes a clearer configuration regression in 4-bit inference.", "canonical_pr_reason": null, - "best_issue_reason": null, + "best_issue_reason": "issue:45674 is the strongest cluster member by activity and scope; issue:45672 is a separate model/module-specific crash rather than the same defect.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45672", "right": "issue:45674", "accept": false, - "reason": "Different underlying bugs: one is a Gemma4 audio-module uint8/torch.finfo TypeError during NF4 inference, the other is a BitsAndBytesConfig skip-modules regression that removes the default lm_head exclusion and triggers an AssertionError. Same broad area, but not the same fix." + "reason": "Reject: the symptoms and root causes differ. One is a torch.finfo() TypeError on uint8 weights in Gemma4 audio modules; the other is a BitsAndBytesConfig exclusion bug causing an AssertionError when llm_int8_skip_modules is set." } ] }, @@ -15587,12 +15633,12 @@ "pull_request:43823|pull_request:45213", "pull_request:43251|pull_request:43254" ], - "prepared_review_unit_hash": "0e21cce5ec3a1d2e2c4d1312b93711d4fa9328fab7211a2910a281bacdf88f06", + "prepared_review_unit_hash": "f9441d5883b454267bdbf39d5351877f4e96c1a6754ca6b075426ff33747f68d", "component_budget": { "node_count": 12, "item_count": 12, "soft_pair_count": 18, - "serialized_chars": 32874, + "serialized_chars": 32875, "estimated_input_tokens": 8219, "estimated_eval_tokens": 16694 }, @@ -15600,7 +15646,7 @@ "node_count": 12, "item_count": 12, "soft_pair_count": 18, - "serialized_chars": 32874, + "serialized_chars": 32875, "estimated_input_tokens": 8219, "estimated_eval_tokens": 16694 }, @@ -15609,126 +15655,144 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is mostly a grab bag of unrelated PRs; the only true duplicate is the two fixed_cross_entropy kwargs patches (43251 and 43254).", - "confidence": 0.95, + "summary": "Only one soft duplicate pair is strong enough to merge: the two `loss_utils.py` PRs that both extend `fixed_cross_entropy` with extra `cross_entropy` kwargs. The rest are broad thematic similarities (new models, processor changes, tests, or unrelated refactors) and should stay separate.", + "confidence": 0.9, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 43251 is the best canonical representative for the only real duplicate pair: it has the explicit issue link, targets the same loss_utils bug, and cleanly extends fixed_cross_entropy with weight/label_smoothing handling.", + "canonical_pr_reason": "PR 43251 is the best canonical representative: it explicitly targets issue 43240 and implements the scoped fix in `fixed_cross_entropy` by forwarding `weight` and `label_smoothing` to `nn.functional.cross_entropy`.", "best_issue_reason": null, - "best_pr_reason": "43251 is the strongest standalone PR in the set because it fixes a concrete bug in a single code path and is clearly tied to the tracked issue; 43254 is a near-duplicate alternative.", + "best_pr_reason": "PR 43251 is the strongest standalone candidate in this cluster because it is tightly focused, issue-linked, and clearly describes the intended behavior change.", "soft_edge_verdicts": [ { "left": "pull_request:39895", "right": "pull_request:43424", "accept": false, - "reason": "VideoPrism model addition vs an executorch dynamic-shapes test; unrelated changes." + "reason": "Both are unrelated feature/test PRs; they do not touch the same code path or implement the same change." }, { "left": "pull_request:41855", "right": "pull_request:43823", "accept": false, - "reason": "Mistral tokenizer helper methods vs adding MobileLLM; different subsystems and goals." + "reason": "Tokenizer bugfix vs. adding a new model; different scopes and no shared underlying change." }, { "left": "pull_request:41855", "right": "pull_request:43424", "accept": false, - "reason": "Tokenizer method additions are unrelated to executorch export testing." + "reason": "Missing tokenizer methods is unrelated to executorch dynamic-shape export tests." }, { "left": "pull_request:42668", "right": "pull_request:43823", "accept": false, - "reason": "Processor robustness changes vs adding a new model; not the same underlying change." + "reason": "Processor robustness work and adding MobileLLM are different PRs in different areas." }, { "left": "pull_request:43251", "right": "pull_request:43823", "accept": false, - "reason": "Loss-function kwargs fix vs MobileLLM model implementation; unrelated." + "reason": "Loss-function kwargs fix and new model addition are unrelated." }, { "left": "pull_request:43251", "right": "pull_request:44827", "accept": false, - "reason": "Both touch code quality in different contexts, but one fixes cross-entropy kwargs while the other addresses Mistral4 test/runtime issues; not the same bug." + "reason": "The Mistral4 work is model/test/integration related; it is not the same change as forwarding kwargs in `fixed_cross_entropy`." }, { "left": "pull_request:43424", "right": "pull_request:44827", "accept": false, - "reason": "Executorch dynamic-shape export test vs Mistral4 test fixes; different concerns." + "reason": "Executorch export tests and Mistral4 fixes are unrelated code paths." }, { "left": "pull_request:43424", "right": "pull_request:43823", "accept": false, - "reason": "Executorch exportability test vs MobileLLM model addition; unrelated." + "reason": "New model support and dynamic export testing are not the same underlying change." }, { "left": "pull_request:43823", "right": "pull_request:45655", "accept": false, - "reason": "New MobileLLM model support vs config_class resolution ordering fix; different code paths." + "reason": "A new model PR and a `config` resolution ordering fix are unrelated." }, { "left": "pull_request:43823", "right": "pull_request:43995", "accept": false, - "reason": "Model addition vs Falcon refactor; not the same underlying change." + "reason": "New MobileLLM model work does not match Falcon output-interface refactoring." }, { - "left": "pull_request:43995", + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "These are different model additions/fixes with no shared concrete bug." + }, + { + "left": "pull_request:43823", "right": "pull_request:45213", "accept": false, - "reason": "Falcon output-interface refactor vs a model-creation skill/documentation PR; unrelated." + "reason": "A model addition and a model-creation skill draft are not the same mergeable code change." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45655", + "accept": false, + "reason": "Model addition vs. class annotation/config resolution fix are unrelated." }, { "left": "pull_request:43995", "right": "pull_request:44827", "accept": false, - "reason": "Falcon refactor vs Mistral4 tests; different models and different problems." + "reason": "Falcon output-collection refactoring and Mistral4 test fixes are different issues." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Model refactoring and the model-creation skill draft are unrelated." }, { "left": "pull_request:44827", "right": "pull_request:45170", "accept": false, - "reason": "Mistral4 test fixes vs a CLIP-like layrnorm typo correction; unrelated." + "reason": "Mistral4 test/integration fixes and `layrnorm` renaming are separate model-family changes." }, { "left": "pull_request:44827", "right": "pull_request:45655", "accept": false, - "reason": "Mistral4 tests vs config resolution fix; no shared bug or change." + "reason": "Mistral4 fixes and global `config` resolution ordering are different problems." }, { "left": "pull_request:43823", "right": "pull_request:44827", "accept": false, - "reason": "MobileLLM model addition vs Mistral4 tests; unrelated." + "reason": "They touch different models and do not represent one concrete bug or patch." }, { "left": "pull_request:44827", "right": "pull_request:45213", "accept": false, - "reason": "Mistral4 tests vs model-creation skill/docs; unrelated." + "reason": "Mistral4 fixes and the model-creation skill draft are unrelated." }, { "left": "pull_request:43823", "right": "pull_request:45213", "accept": false, - "reason": "MobileLLM model implementation vs a model-creation skill draft; different artifact types and purposes." + "reason": "A model implementation PR and a workflow/skill draft are not duplicates." }, { "left": "pull_request:43251", "right": "pull_request:43254", "accept": true, - "reason": "Both patches modify the same fixed_cross_entropy function in the same file to pass supported cross-entropy kwargs (weight/label_smoothing); they are the same underlying fix and could be merged into one PR." + "reason": "Both PRs fix the same `fixed_cross_entropy` code path by adding supported kwargs to `nn.functional.cross_entropy`; they are plausibly mergeable into one PR." } ] }, "evaluator_result": { "accept": true, - "feedback": "The summary is grounded in the packet, and the only accepted pair is well supported by the diffs: 43251 and 43254 are near-identical fixes to the same function in the same file. The other soft-edge rejections are conservative and do not overstate evidence. Minor note: phrases like \"only true duplicate\" are a bit strong, but not enough to invalidate the review." + "feedback": "The analysis is grounded in the packet and stays conservative. The only accepted soft pair, PR 43251 and PR 43254, is clearly the same `loss_utils.py` fix for `fixed_cross_entropy` and is plausibly mergeable into one PR. The remaining pairs are appropriately treated as unrelated thematic similarities rather than duplicates." }, "evaluator_used": true, "retried": false, @@ -15755,7 +15819,7 @@ "soft_pairs": [ "pull_request:40546|pull_request:42781" ], - "prepared_review_unit_hash": "caa0e7bef8faea9dac1b85be81a6d1783b3bc60403bd297258c98d620359889c", + "prepared_review_unit_hash": "311a7069f5fcab50d3073c27aaa643342b9c0c1bfc1b6e1dbcb944e8ed40b542", "component_budget": { "node_count": 2, "item_count": 2, @@ -15777,18 +15841,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related VibeVoice implementation PRs, but they are not duplicates: the first adds the base VibeVoice model, while the second adds VibeVoice Realtime and an acoustic tokenizer with extra generation/model changes. They overlap in shared package/auto-registration files, but they address different deliverables and code paths.", - "confidence": 0.9, + "summary": "Both PRs work on VibeVoice, but they are not the same change: #40546 is the core VibeVoice implementation, while #42781 adds VibeVoice Realtime plus acoustic-tokenizer and generation updates. The file overlap is mostly shared model scaffolding, not a single concrete bugfix or mergeable duplicate PR.", + "confidence": 0.91, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 40546 is the cleaner canonical PR for the core VibeVoice model implementation; it is the earlier, broader base integration and has substantial review activity.", + "canonical_pr_reason": "#40546 is the stronger canonical PR because it is the main VibeVoice implementation, has substantial review activity, and is not a draft.", "best_issue_reason": null, - "best_pr_reason": "PR 40546 is the best representative of the underlying VibeVoice model work because it introduces the foundational model/docs/auto mappings, whereas PR 42781 is a later extension focused on realtime/tokenizer support.", + "best_pr_reason": "#40546 is the best overall PR for this cluster because it is the foundational, more mature VibeVoice submission; #42781 is a draft extension focused on realtime support rather than the same patch.", "soft_edge_verdicts": [ { "left": "pull_request:40546", "right": "pull_request:42781", "accept": false, - "reason": "Reject: despite heavy filename overlap, 42781 is a separate realtime/tokenizer extension with additional model and generation changes, not the same concrete bug fix or mergeable duplicate of 40546." + "reason": "Reject: they share the same model family and some common files, but #42781 adds realtime-specific functionality and acoustic-tokenizer work, so it is not the same underlying change as #40546." } ] }, @@ -15818,7 +15882,7 @@ "soft_pairs": [ "pull_request:40755|pull_request:41224" ], - "prepared_review_unit_hash": "38f21745407f14b5136192ef8c8a8cd5bc260286e9bdc61f93701f765d589fd9", + "prepared_review_unit_hash": "68cabd18897882362474ecaa9f87a52d53472d0fdbed8f9215a7efe7d3eecc30", "component_budget": { "node_count": 2, "item_count": 2, @@ -15840,18 +15904,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "The two PRs are unrelated: one adds TimesFM covariate forecasting support, the other adds DINOv3 image classification support. They do not fix the same code-path problem and should not be deduplicated.", - "confidence": 0.98, + "summary": "The cluster contains two unrelated pull requests: one adds TimesFM covariate forecasting, the other adds DINOv3 image classification support. They target different models and different code paths, so this is not a duplicate cluster.", + "confidence": 0.99, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "No canonical PR: the PRs address different models and implement unrelated features in separate code paths.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "Neither PR is a good global representative for duplicate triage because they do not share an underlying bug or change.", "soft_edge_verdicts": [ { "left": "pull_request:40755", "right": "pull_request:41224", "accept": false, - "reason": "Different models and changes: TimesFM covariate forecasting vs DINOv3 image classification. No shared bug or mergeable single fix." + "reason": "Reject: these PRs are about different model families and unrelated features, not the same concrete fix or change." } ] }, @@ -15944,7 +16008,7 @@ "soft_pairs": [ "pull_request:41797|pull_request:45186" ], - "prepared_review_unit_hash": "6bc8242cf798dd2a779ef0d830fa98ea2a72e60f3d2b6ab3edfd21b9334fe638", + "prepared_review_unit_hash": "7d2d6f5f03e6dfee5016ae50ab821407ce59d5d70df41600e9d93598f6058ee4", "component_budget": { "node_count": 2, "item_count": 2, @@ -15966,18 +16030,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both items are model-addition PRs that touch shared registration/docs infrastructure, but they introduce different models (DeepSeekOCR vs Isaac) with different implementations and tests. They are not the same underlying change, so the soft duplicate edge should be rejected.", + "summary": "Both PRs are unrelated model-addition changes that happen to share the Transformers model-registration scaffolding. They add different models (DeepSeekOCR vs Isaac) with different codepaths, tests, and docs, so they should not be treated as duplicates or merged together.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "PR 41797 is the better representative only in the sense that it has much more review activity and appears more mature, but it is not a duplicate of PR 45186.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "PR 41797 is the strongest standalone candidate because it has broader discussion/review activity and a larger, more established patch, but it still addresses a different model than PR 45186.", "soft_edge_verdicts": [ { "left": "pull_request:41797", "right": "pull_request:45186", "accept": false, - "reason": "Different model integrations with distinct model directories, configs, and test suites; shared auto/docs plumbing is not enough to treat them as the same PR or merge into one change." + "reason": "Reject: these are separate model integration PRs for different architectures (DeepSeekOCR vs Isaac). The overlapping filenames are expected boilerplate for adding new models, not evidence of the same underlying bug or change, and they would not plausibly be merged into one PR." } ] }, @@ -16007,7 +16071,7 @@ "soft_pairs": [ "pull_request:42975|pull_request:42976" ], - "prepared_review_unit_hash": "1b9bf525443a07db81b3c6525437715628283fbb00744db00aee32e1d0cf61e5", + "prepared_review_unit_hash": "b0576f1a30906b4cd8b3c91bf1e474fb7061594dd793fc2268d1963c6bf11f35", "component_budget": { "node_count": 2, "item_count": 2, @@ -16029,18 +16093,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two separate maintenance PRs touching overlapping GitHub Actions workflows, but they update different actions and target different upgrade goals. They should not be treated as duplicates.", - "confidence": 0.88, + "summary": "Two workflow-maintenance PRs overlap in GitHub Actions files, but they address different upgrade tracks: one for Node 24 compatibility across many actions, the other for latest-version bumps centered on docker/build-push-action. They are related, not duplicates.", + "confidence": 0.91, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 42975 is the better canonical representative: it is broader, explicitly about Node 24 compatibility, and updates several core workflow actions across the repo.", + "canonical_pr_reason": "PR 42975 is the broader, more representative workflow-upgrade change and its title matches the main compatibility migration theme across many Actions updates.", "best_issue_reason": null, - "best_pr_reason": "PR 42975 is the most representative item in the cluster because it covers the larger workflow-action upgrade effort; PR 42976 is a narrower docker-action version bump.", + "best_pr_reason": "PR 42975 is the best representative of the cluster because it covers the wider set of workflow action compatibility upgrades rather than a narrower version-bump subset.", "soft_edge_verdicts": [ { "left": "pull_request:42975", "right": "pull_request:42976", "accept": false, - "reason": "Overlap in workflow filenames is not enough: 42975 upgrades checkout/cache/upload-artifact for Node 24 compatibility, while 42976 only bumps docker/build-push-action. Different actions, different concrete change, so not the same underlying PR." + "reason": "Shared workflow files are not enough here: 42975 is a Node 24 compatibility sweep, while 42976 mainly bumps docker/build-push-action and a few related actions to latest versions. Different concrete change sets, so they should not be merged as one PR." } ] }, @@ -16084,94 +16148,94 @@ "pull_request:45490|pull_request:45630", "pull_request:45626|pull_request:45630" ], - "prepared_review_unit_hash": "014709e08748e947ca9b15ee3418d60cec68e8754e980c3c69045833c036ad3a", + "prepared_review_unit_hash": "d88b994a84a0c92e362c0389ab91a50c99476b591432faa3818b15d4fe94310c", "component_budget": { "node_count": 7, "item_count": 7, "soft_pair_count": 10, - "serialized_chars": 23910, - "estimated_input_tokens": 5978, - "estimated_eval_tokens": 12212 + "serialized_chars": 23983, + "estimated_input_tokens": 5996, + "estimated_eval_tokens": 12248 }, "budget": { "node_count": 7, "item_count": 7, "soft_pair_count": 10, - "serialized_chars": 23910, - "estimated_input_tokens": 5978, - "estimated_eval_tokens": 12212 + "serialized_chars": 23983, + "estimated_input_tokens": 5996, + "estimated_eval_tokens": 12248 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This cluster is a loose collection of unrelated pull requests: several separate model additions (ViT NEPA, Molmo, Molmo2, CTSM, PP-FormulaNet, Kimi2-6) plus one distributed loading feature PR. They share repo areas like docs and auto mappings, but not the same underlying change.", - "confidence": 0.98, + "summary": "All candidate soft edges are false positives. The PRs target different underlying changes: separate model additions (ViT NEPA, Molmo, Molmo2, CTSM, PP-FormulaNet, Kimi2-6) plus one distributed pipeline-parallel infrastructure PR. Shared docs/auto-registration files are expected for model integration and do not indicate duplication.", + "confidence": 0.97, "canonical_issue_reason": null, - "canonical_pr_reason": null, + "canonical_pr_reason": "No single canonical PR: these are distinct model-addition or infrastructure changes, not duplicate implementations of the same bug or feature.", "best_issue_reason": null, - "best_pr_reason": null, + "best_pr_reason": "There is no true duplicate cluster to choose from. If forced to name a representative, PR 45626 is the most complete standalone model-support change, but it is still unrelated to the others.", "soft_edge_verdicts": [ { "left": "pull_request:42978", "right": "pull_request:43448", "accept": false, - "reason": "Different model additions: ViT NEPA vs Molmo. Shared docs/auto-registration changes are generic scaffolding, not the same bug or feature." + "reason": "Different model adds: ViT NEPA vs Molmo. Shared docs/auto files are routine integration overlap, not the same change." }, { "left": "pull_request:42978", "right": "pull_request:43451", "accept": false, - "reason": "Different model additions: ViT NEPA vs Molmo2. They are separate model integrations, not one mergeable change." + "reason": "Different model adds: ViT NEPA vs Molmo2. No shared bug or concrete code path." }, { "left": "pull_request:42978", "right": "pull_request:45490", "accept": false, - "reason": "Different model additions: ViT NEPA vs CTSM. No shared concrete code-path problem." + "reason": "Different models: ViT NEPA vs CTSM. Similar registration/docs work is not duplicate evidence." }, { "left": "pull_request:43448", "right": "pull_request:43451", "accept": false, - "reason": "Molmo and Molmo2 are related by name, but they are distinct model implementations with different files, APIs, and tests; not the same underlying PR." + "reason": "Molmo and Molmo2 are separate models with distinct implementations; not mergeable as one duplicate PR." }, { "left": "pull_request:43448", "right": "pull_request:45490", "accept": false, - "reason": "Molmo vs CTSM are unrelated model integrations with different architectures and code paths." + "reason": "Molmo vs CTSM are unrelated model additions despite both touching auto/docs plumbing." }, { "left": "pull_request:43451", "right": "pull_request:45490", "accept": false, - "reason": "Molmo2 vs CTSM are separate model additions; shared registry/docs edits are not enough to merge them." + "reason": "Molmo2 and CTSM are different model integrations, not the same underlying feature." }, { "left": "pull_request:44601", "right": "pull_request:45626", "accept": false, - "reason": "One is native pipeline-parallel loading support, the other is PP-FormulaNet model support. Different feature areas and code paths." + "reason": "One is core pipeline-parallel loading infrastructure; the other adds PP-FormulaNet model support. Different code paths and goals." }, { "left": "pull_request:45490", "right": "pull_request:45626", "accept": false, - "reason": "CTSM and PP-FormulaNet are different model additions; same broad subsystem, but not the same change." + "reason": "CTSM model support and PP-FormulaNet model support are distinct additions, not one concrete bug fix." }, { "left": "pull_request:45490", "right": "pull_request:45630", "accept": false, - "reason": "CTSM and Kimi2-6 are separate model integrations with distinct model files and tests." + "reason": "CTSM vs Kimi2-6 are separate model PRs; overlap is only in generic model registration/docs files." }, { "left": "pull_request:45626", "right": "pull_request:45630", "accept": false, - "reason": "PP-FormulaNet and Kimi2-6 are different model additions; similar documentation/auto-registration edits do not indicate duplicate content." + "reason": "PP-FormulaNet and Kimi2-6 are different model support PRs with no shared underlying change." } ] }, @@ -16204,52 +16268,52 @@ "pull_request:43291|pull_request:44815", "pull_request:43665|pull_request:44815" ], - "prepared_review_unit_hash": "de3d173608954f2699c6d6a6040b302db186163d732f2f12b4c7f2a7d24aebfb", + "prepared_review_unit_hash": "f3112d35e81b028bd9c17fc08841dda22d22618b79d8553240d81e6c0700b526", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 7092, - "estimated_input_tokens": 1773, - "estimated_eval_tokens": 3802 + "serialized_chars": 7093, + "estimated_input_tokens": 1774, + "estimated_eval_tokens": 3804 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, - "serialized_chars": 7092, - "estimated_input_tokens": 1773, - "estimated_eval_tokens": 3802 + "serialized_chars": 7093, + "estimated_input_tokens": 1774, + "estimated_eval_tokens": 3804 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These three PRs are not duplicates of the same change. PR 44815 is a substantive FP8/dequantization fix, PR 43291 is a Whisper tokenizer/test fix, and PR 43665 is a test-only placeholder change across CLIP/ViT. They share only broad similarity as small repo maintenance PRs.", - "confidence": 0.96, + "summary": "The three PRs are not duplicates of one another: two are test-only/whitespace-style changes around unrelated models, while one is a substantive FP8 dequant/loading fix. No soft pair should be merged.", + "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44815 is the clearest canonical PR because it contains a concrete, coherent code-path fix in model loading and FP8 integration, with meaningful implementation changes and related tests.", + "canonical_pr_reason": "PR #44815 is the strongest canonical candidate because it contains the only clearly substantive code-path fix, spanning model loading and FP8 dequantization logic, rather than isolated test edits.", "best_issue_reason": null, - "best_pr_reason": "PR 44815 is the best representative of the cluster because it is the most substantive and self-contained bug fix; the others are either unrelated test changes or a separate Whisper-specific tokenizer/test issue.", + "best_pr_reason": "PR #44815 is the best standalone PR in the cluster: it addresses a concrete functional bug in dequant/loading behavior and has broader, coherent code changes compared with the other test-focused PRs.", "soft_edge_verdicts": [ { "left": "pull_request:43291", "right": "pull_request:43665", "accept": false, - "reason": "Different targets and different problems: Whisper tokenizer/test behavior versus CLIP/ViT test assertions. No shared code path or mergeable unified fix." + "reason": "Both are test-oriented and touch different model areas (Whisper vs CLIP/ViT); they do not appear to fix the same underlying bug or change." }, { "left": "pull_request:43291", "right": "pull_request:44815", "accept": false, - "reason": "Whisper decoding/tests are unrelated to FP8 dequant/model-loading logic. They do not appear to fix the same underlying bug or change." + "reason": "Whisper tokenizer/test changes are unrelated to the FP8 dequant/model-loading fix; no shared concrete bug path." }, { "left": "pull_request:43665", "right": "pull_request:44815", "accept": false, - "reason": "One is test-only changes for CLIP/ViT; the other is a concrete FP8 dequantization fix. Same broad repository area, but not the same bug or change." + "reason": "The PRs target unrelated subsystems and concerns; one is test-only, the other is a functional dequantization fix, so they should not be merged as duplicates." } ] }, @@ -16282,7 +16346,7 @@ "pull_request:43488|pull_request:45476", "pull_request:43532|pull_request:45476" ], - "prepared_review_unit_hash": "4f2afa3ba9085205d6993f828b6b32c56156c501a0409469f2c60b65a339e59a", + "prepared_review_unit_hash": "9d912f16c57b415bd858a46bfde2461fa451986bb3978c8e1cc2e9b4309aa0b4", "component_budget": { "node_count": 3, "item_count": 3, @@ -16304,30 +16368,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are three unrelated synthetic '[don't merge]' PRs; the apparent overlap is incidental, mostly a shared vit test filename, not the same underlying bug or change.", - "confidence": 0.92, + "summary": "These are three unrelated open PRs with synthetic/test CI changes and incidental overlap in a Vit test file; they do not look like duplicate fixes or mergeable into one change.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "No true canonical duplicate exists here. PR #43532 is the most central by shared filenames and discussion activity, but it still changes unrelated workflow/test scaffolding rather than the same concrete fix as the others.", + "canonical_pr_reason": "PR #43532 is the best representative because it has the clearest, most substantive CI/workflow change set and is less clearly artificial than the tiny setup/version tweak in #43488 or the mixed workflow/code edits in #45476.", "best_issue_reason": null, - "best_pr_reason": "Not a good duplicate representative overall; if one must be picked, #43532 is the least-bad cluster center, but it is not actually a duplicate of the others.", + "best_pr_reason": "#43532 is the strongest canonical PR candidate for the cluster since it reflects a coherent workflow-oriented change, whereas the other PRs are narrower or mix unrelated edits.", "soft_edge_verdicts": [ { "left": "pull_request:43488", "right": "pull_request:43532", "accept": false, - "reason": "Different changes: setup.py/dependency/version tweaks versus workflow/testing utility edits. Shared vit test file is incidental, not the same bug or fix." + "reason": "One is a setup/version/dependency tweak; the other rewrites CI comment/scheduled workflow behavior. They do not fix the same underlying bug or change the same code path." }, { "left": "pull_request:43488", "right": "pull_request:45476", "accept": false, - "reason": "Unrelated code paths: packaging/version/deps changes versus CI workflow plus CLIP/tokenization and Chinese-CLIP test edits. Not mergeable as one fix." + "reason": "#43488 is a packaging/version test PR, while #45476 adds a PR CI workflow and touches tokenizer/test files. The overlap is superficial, not the same change." }, { "left": "pull_request:43532", "right": "pull_request:45476", "accept": false, - "reason": "Both touch tests/workflows, but they address different areas and no single concrete problem is shared. The overlap is too broad and incidental to treat as one PR." + "reason": "Both touch CI/test-related files, but the concrete changes differ: #43532 changes comment/scheduled workflows and notification/testing utilities, while #45476 adds a PR CI caller and unrelated tokenizer/test edits. Not a single duplicate fix." } ] }, @@ -16357,7 +16421,7 @@ "soft_pairs": [ "pull_request:43612|pull_request:43613" ], - "prepared_review_unit_hash": "4dfe0f50a9782a3941dd045116d4b6435a5268d773c763f37e811d89deb67dcb", + "prepared_review_unit_hash": "128b1f59a03cda1aafe9a5590fbcd5156207406405d6f5ec53023c26093ae84a", "component_budget": { "node_count": 2, "item_count": 2, @@ -16379,8 +16443,8 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are two different PRs for two different segmentation pipelines: one for Promptable Concept Segmentation and one for Promptable Visual Segmentation. They share scaffolding and docs/pipeline infrastructure, but they target different models and code paths, so they should not be deduplicated.", - "confidence": 0.98, + "summary": "These are two separate PRs for different segmentation pipelines: one adds Promptable Concept Segmentation, the other adds Promptable Visual Segmentation. They share scaffolding/docs patterns, but the concrete pipeline/model/test changes are distinct, so they should not be deduplicated.", + "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, @@ -16390,7 +16454,7 @@ "left": "pull_request:43612", "right": "pull_request:43613", "accept": false, - "reason": "Different underlying features: PR 43612 adds Promptable Concept Segmentation around SAM3, while PR 43613 adds Promptable Visual Segmentation across EdgeTAM/SAM/SAM2/SAM3-tracker. Similar framework changes do not make them the same fix/change." + "reason": "Different features and code paths: concept segmentation vs visual segmentation, with different pipeline modules, model docs, and tests. Shared docs/registry updates are generic scaffolding, not evidence of the same underlying change." } ] }, @@ -16420,12 +16484,12 @@ "soft_pairs": [ "pull_request:43651|pull_request:43663" ], - "prepared_review_unit_hash": "8d57bb2670f7c55b9ffc5336198ed8d163eb53b1071f9559bf4ca70fc329eb7a", + "prepared_review_unit_hash": "9960af740ca7aeae75a7dc025a3d6f3cada739dd5395791417ba764d0c5dc59f", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4292, + "serialized_chars": 4291, "estimated_input_tokens": 1073, "estimated_eval_tokens": 2402 }, @@ -16433,7 +16497,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4292, + "serialized_chars": 4291, "estimated_input_tokens": 1073, "estimated_eval_tokens": 2402 }, @@ -16442,18 +16506,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Two open PRs in `trainer.py` touch custom-trainer extensibility, but they change different code paths: one adds a loss-scaling hook for gradient accumulation, the other extracts signature-column filtering into an overrideable helper. They are related, not duplicates.", + "summary": "Two independent Trainer PRs touch the same file but address different code paths: gradient-accumulation loss scaling vs. signature/column filtering. They are not duplicates and should not be merged together.", "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #43651 is the stronger canonical representative because it introduces a concrete behavioral hook in the training loss path and changes the actual scaling logic, making it the more substantive and user-facing change.", + "canonical_pr_reason": "PR #43651 is the more concrete bugfix, directly changing training_step loss normalization and adding the helper that gates GA scaling behavior.", "best_issue_reason": null, - "best_pr_reason": "PR #43651 is the best single representative of the cluster because it targets a specific training bug/behavior with a clear functional outcome, whereas PR #43663 is mainly an internal refactor to expose override points for column filtering.", + "best_pr_reason": "PR #43651 is the best representative of the cluster because it has the clearest standalone behavior change and a tight, testable fix scope.", "soft_edge_verdicts": [ { "left": "pull_request:43651", "right": "pull_request:43663", "accept": false, - "reason": "Both PRs edit `trainer.py` and improve custom trainer extensibility, but they solve different problems: #43651 changes gradient-accumulation loss scaling, while #43663 changes signature-column filtering. They do not appear to be the same underlying bug or mergeable into one PR." + "reason": "Different underlying changes: #43651 alters gradient accumulation loss scaling in training_step, while #43663 refactors signature-column filtering for model inputs. Shared file alone is not enough for duplicate status." } ] }, @@ -16485,7 +16549,7 @@ "pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842" ], - "prepared_review_unit_hash": "7fcee83d8ac3d649e08cd8f329e5a97b91decb58f81b8d25e699463944f13649", + "prepared_review_unit_hash": "3227628139088fb21b84d39e92233a5c46c25618ba27d2b45103a70f11274779", "component_budget": { "node_count": 3, "item_count": 3, @@ -16507,33 +16571,33 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Three open PRs all touch `src/transformers/cli/serve.py` and two of them target the same tracked issue. The strongest duplicate pair is the two issue-43824 fixes; the 43656 PR looks related by topic but is not clearly the same concrete change.", - "confidence": 0.77, + "summary": "PR #43842 remains the best representative: it is the most focused fix for the `TypeAdapter`/annotation runtime failure in `src/transformers/cli/serve.py`. PR #43836 is a closely related duplicate on the same issue target. PR #43656 touches the same file but addresses a different typing/future-annotations path and is not clearly the same bugfix.", + "confidence": 0.91, "canonical_issue_reason": null, - "canonical_pr_reason": "PR #43842 is the best canonical representative: it directly addresses the TypeAdapter import/runtime NameError with a narrow, explicit fix and a clear issue link. It is more focused than #43836 and easier to treat as the primary duplicate target.", + "canonical_pr_reason": "PR #43842 is the most narrowly targeted fix for the concrete CLI `TypeAdapter` NameError/annotation-evaluation problem, with the clearest issue framing and smallest change set.", "best_issue_reason": null, - "best_pr_reason": "PR #43842 best matches the underlying bug report and contains the most precise code-path fix (guarding the `TypeAdapter` import with `TYPE_CHECKING` and stringizing the annotation).", + "best_pr_reason": "PR #43842 is the best representative because it directly fixes the specific `TypeAdapter` runtime annotation failure and is the most precise, low-collateral change.", "soft_edge_verdicts": [ { "left": "pull_request:43656", "right": "pull_request:43842", "accept": false, - "reason": "Both mention TypeAdapter NameError in the CLI, but they target different issues and the diffs suggest different fixes; not enough evidence that they are the same concrete change." + "reason": "They share `src/transformers/cli/serve.py`, but the diffs point to different annotation issues: #43656 is about `TimedModel` typing/future-annotations behavior, while #43842 is specifically about `TypeAdapter` under `TYPE_CHECKING`. Shared file and similarity alone are not enough to treat them as the same fix." }, { "left": "pull_request:43836", "right": "pull_request:43842", "accept": true, - "reason": "Same file, same issue target, and both are alternative fixes for the same import-time `TypeAdapter` NameError in `serve.py`; they are plausibly mergeable into one PR." + "reason": "Both PRs target the same explicit issue (`43824`) and the same `TypeAdapter` annotation/name-resolution problem in `src/transformers/cli/serve.py`, so they look like the same underlying bugfix." } ] }, "evaluator_result": { - "accept": true, - "feedback": "The summary is grounded in the packet: all three PRs touch `src/transformers/cli/serve.py`, 43836 and 43842 both target issue 43824, and 43656 targets a different issue (43576). The conservative non-match verdict for 43656 vs 43842 is justified. The 43836 vs 43842 pairing is also reasonable as a mergeable same-bugfix candidate, since both address the same TypeAdapter NameError in the same file and the diffs look compatible rather than contradictory." + "accept": false, + "feedback": "The summary is mostly grounded, but the soft-edge verdict for `pull_request:43656` vs `pull_request:43842` is too strong. The packet only shows shared filename and a similarity signal; it does not show a shared issue target or a clearly identical bugfix. The two diffs also touch different annotation paths: #43656 changes `TimedModel` typing/future-annotations behavior, while #43842 narrows in on `TypeAdapter` under `TYPE_CHECKING`. That is not enough evidence to confidently say they are the same underlying fix or obviously mergeable into one PR. The `43836` vs `43842` acceptance is better supported because both target the same issue and the same `TypeAdapter` annotation problem." }, "evaluator_used": true, - "retried": false, + "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null @@ -16625,7 +16689,6 @@ "pull_request:44017", "pull_request:44018", "pull_request:44019", - "pull_request:44024", "pull_request:44044", "pull_request:44056", "pull_request:44059", @@ -16691,7 +16754,7 @@ "pull_request:44056|pull_request:44101", "pull_request:44073|pull_request:44161", "pull_request:44101|pull_request:44161", - "pull_request:43997|pull_request:44017", + "pull_request:44017|pull_request:44018", "pull_request:43997|pull_request:44056", "pull_request:44071|pull_request:44076", "pull_request:44074|pull_request:44101", @@ -16699,6 +16762,7 @@ "pull_request:44056|pull_request:44161", "pull_request:43997|pull_request:44161", "pull_request:44074|pull_request:44161", + "pull_request:44018|pull_request:44161", "pull_request:43997|pull_request:44071", "pull_request:44010|pull_request:44071", "pull_request:44010|pull_request:44073", @@ -16717,379 +16781,456 @@ "pull_request:43996|pull_request:44007", "pull_request:43996|pull_request:44129", "pull_request:44044|pull_request:44129", - "pull_request:44044|pull_request:44154", - "pull_request:44024|pull_request:44076" + "pull_request:44044|pull_request:44154" ], - "prepared_review_unit_hash": "f9393d5a028c4cadc735050563d400d5f0d3c36796367e5e53d87dad9e2a2da2", + "prepared_review_unit_hash": "5d93359c60f1b14a49fcf682546f83e8987869c8ffed7a902c1ff5e0cbdeee6b", "component_budget": { "node_count": 28, "item_count": 28, - "soft_pair_count": 77, - "serialized_chars": 84501, - "estimated_input_tokens": 21126, - "estimated_eval_tokens": 42508 + "soft_pair_count": 78, + "serialized_chars": 84894, + "estimated_input_tokens": 21224, + "estimated_eval_tokens": 42704 }, "budget": { - "node_count": 28, - "item_count": 28, + "node_count": 27, + "item_count": 27, "soft_pair_count": 72, - "serialized_chars": 76933, - "estimated_input_tokens": 19234, - "estimated_eval_tokens": 38724 + "serialized_chars": 75091, + "estimated_input_tokens": 18773, + "estimated_eval_tokens": 37802 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "This cluster is mostly a set of model-specific standardized output-tracing refactors, not one single duplicate set. The only clear duplicate subgroup is the GPT-J/CodeGen work, with an earlier GPT-Neo pair also looking like a rebased duplicate; the rest are different models and should stay separate.", - "confidence": 0.7, + "summary": "This cluster is a batch of output-tracing refactors, but most PRs are model-specific and not true duplicates across models. The real duplicates are the same-model GPT-J/CodeGen and GPT-Neo changes; most cross-model soft edges should be rejected.", + "confidence": 0.86, "canonical_issue_reason": null, - "canonical_pr_reason": "44722 is the best representative for the duplicated GPT-J/CodeGen subgroup: it is the latest and most complete refactor on the same concrete output-tracing code paths, with the same standardized-decorator direction as the earlier GPT-J variants.", + "canonical_pr_reason": "PR 44722 is the broadest and clearest instance of the GPT-J/CodeGen standardized output-tracing refactor, with the same issue target and the most complete diff in that code path.", "best_issue_reason": null, - "best_pr_reason": "44722 is the strongest overall PR in the cluster because it cleanly targets the same output-tracing refactor, covers the shared GPT-J/CodeGen implementation, and appears to subsume earlier GPT-J-only variants.", + "best_pr_reason": "PR 44722 is the best representative PR for this cluster because it most directly captures the canonical GPT-J/CodeGen output-tracing change and subsumes the narrower GPT-J-only variants.", "soft_edge_verdicts": [ { "left": "pull_request:43996", "right": "pull_request:44085", "accept": false, - "reason": "Different model implementations (CVT/FNet vs GPT-J); same broad theme, but not the same code path or change." + "reason": "Different model families and code paths; matching the general refactor theme is not enough to merge them." }, { "left": "pull_request:43996", "right": "pull_request:44044", "accept": false, - "reason": "Both are output-tracing refactors, but for unrelated models (CVT/FNet vs DeBERTa-v2)." + "reason": "Different model families and code paths; same tracking-style refactor is too broad to count as a duplicate." }, { "left": "pull_request:44066", "right": "pull_request:44085", "accept": true, - "reason": "Both change GPT-J output tracing in the same file; 44085 reads like a narrower/rebased variant of the same fix." + "reason": "Same GPT-J output-tracing code path; 44085 looks like a narrower slice of the same refactor family." }, { "left": "pull_request:44007", "right": "pull_request:44072", "accept": false, - "reason": "ResNet/RT-DETR ResNet vs EfficientNet: same refactor pattern, but not the same model or bug." + "reason": "Different model families and code paths; they only share the standardized output-tracing theme." }, { "left": "pull_request:44072", "right": "pull_request:44722", "accept": false, - "reason": "EfficientNet is unrelated to GPT-J/CodeGen; same umbrella refactor idea only." + "reason": "Different model families and code paths; both are output-tracing refactors, but not the same concrete change." }, { "left": "pull_request:44066", "right": "pull_request:44072", "accept": false, - "reason": "GPT-J/CodeGen vs EfficientNet are different code paths and different model families." + "reason": "Different model families and code paths; no shared concrete bug/change." }, { "left": "pull_request:44013", "right": "pull_request:44044", "accept": false, - "reason": "MobileNetV2 and DeBERTa-v2 are unrelated implementations despite similar output-tracing cleanup." + "reason": "Different model families and code paths; same broad refactor pattern only." }, { "left": "pull_request:44066", "right": "pull_request:44086", "accept": false, - "reason": "GPT-J/CodeGen vs MGP-STR are unrelated model changes." + "reason": "Different model families and code paths; these are unrelated standardized-output refactors." }, { "left": "pull_request:44018", "right": "pull_request:44068", "accept": true, - "reason": "Same GPT-Neo output-tracing refactor on the same model file; 44068 looks like a later/reworked version of 44018." + "reason": "Same GPT-Neo output-tracing refactor in the same model file; these look like duplicate/overlapping revisions." }, { "left": "pull_request:44066", "right": "pull_request:44071", "accept": false, - "reason": "GPT-J/CodeGen vs MPT are different model code paths." + "reason": "Different model families and code paths; not the same concrete change." }, { "left": "pull_request:44066", "right": "pull_request:44068", "accept": false, - "reason": "GPT-J/CodeGen and GPT-Neo are different model implementations; not the same fix." + "reason": "GPT-J/CodeGen versus GPT-Neo; same refactor pattern but not the same code path." }, { "left": "pull_request:44018", "right": "pull_request:44066", "accept": false, - "reason": "GPT-Neo vs GPT-J/CodeGen: same standardized-output theme, but different code paths." + "reason": "GPT-Neo versus GPT-J/CodeGen; different concrete changes despite similar output-tracing goals." }, { "left": "pull_request:44019", "right": "pull_request:44722", "accept": false, - "reason": "ResNet is unrelated to GPT-J/CodeGen; different model family and code path." + "reason": "ResNet versus GPT-J/CodeGen; same meta-refactor idea, but not the same underlying change." }, { "left": "pull_request:44086", "right": "pull_request:44722", "accept": false, - "reason": "MGP-STR and GPT-J/CodeGen are unrelated implementations." + "reason": "Different model families and code paths; not a duplicate change." }, { "left": "pull_request:44019", "right": "pull_request:44085", "accept": false, - "reason": "ResNet vs GPT-J: different model and different underlying change." + "reason": "ResNet versus GPT-J; unrelated concrete code paths." }, { "left": "pull_request:44018", "right": "pull_request:44019", "accept": false, - "reason": "GPT-Neo and ResNet are unrelated model-specific refactors." + "reason": "GPT-Neo and ResNet are different model code paths; same refactor style is insufficient." }, { "left": "pull_request:44019", "right": "pull_request:44071", "accept": false, - "reason": "ResNet vs MPT are different code paths; not a duplicate." + "reason": "Different model families and code paths." }, { "left": "pull_request:44068", "right": "pull_request:44116", "accept": false, - "reason": "GPT-Neo vs Flaubert are different models and not the same bug/change." + "reason": "GPT-Neo versus Flaubert; not the same underlying change." }, { "left": "pull_request:44068", "right": "pull_request:44722", "accept": false, - "reason": "GPT-Neo and GPT-J/CodeGen are separate model implementations." + "reason": "GPT-Neo versus GPT-J/CodeGen; different concrete code paths." }, { "left": "pull_request:44018", "right": "pull_request:44116", "accept": false, - "reason": "GPT-Neo and Flaubert are unrelated model changes." + "reason": "Different model families and code paths." }, { "left": "pull_request:44071", - "right": "pull_request:44722", + "right": "pull_request:44085", "accept": false, - "reason": "MPT and GPT-J/CodeGen are different model families and code paths." + "reason": "MPT versus GPT-J; not the same change." }, { - "left": "pull_request:44018", + "left": "pull_request:44085", "right": "pull_request:44086", "accept": false, - "reason": "GPT-Neo vs MGP-STR: same pattern, different implementation." + "reason": "Different model families and code paths; only the standardized output-tracing theme matches." }, { - "left": "pull_request:44018", - "right": "pull_request:44085", + "left": "pull_request:44059", + "right": "pull_request:44086", "accept": false, - "reason": "GPT-Neo and GPT-J are different models; not mergeable as one fix." + "reason": "GPT-2 and MGP-STR are unrelated concrete code paths." }, { - "left": "pull_request:44068", - "right": "pull_request:44086", + "left": "pull_request:44059", + "right": "pull_request:44722", "accept": false, - "reason": "GPT-Neo vs MGP-STR are unrelated model-specific refactors." + "reason": "GPT-2 versus GPT-J/CodeGen; same broad refactor family, not the same change." }, { - "left": "pull_request:44071", - "right": "pull_request:44085", + "left": "pull_request:44098", + "right": "pull_request:44101", "accept": false, - "reason": "MPT vs GPT-J are different model code paths." + "reason": "ViLT versus Flaubert/XLM; different model code paths." }, { - "left": "pull_request:44059", - "right": "pull_request:44086", + "left": "pull_request:44073", + "right": "pull_request:44161", "accept": false, - "reason": "GPT2 and MGP-STR are unrelated changes." + "reason": "VisualBert versus LongT5; different model families and code paths." }, { - "left": "pull_request:44085", - "right": "pull_request:44086", + "left": "pull_request:44101", + "right": "pull_request:44161", "accept": false, - "reason": "GPT-J and MGP-STR are different model implementations." + "reason": "Flaubert/XLM versus LongT5; same refactor pattern only." }, { - "left": "pull_request:44059", - "right": "pull_request:44722", + "left": "pull_request:44017", + "right": "pull_request:44018", "accept": false, - "reason": "GPT2 and GPT-J/CodeGen are different model families; same refactor style only." + "reason": "Segformer versus GPT-Neo; different concrete code paths." }, { - "left": "pull_request:44098", + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet versus MPNet; not the same underlying bug/change." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "MPNet versus LongT5; different model code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "RegNet versus LongT5; unrelated concrete changes." + }, + { + "left": "pull_request:44074", "right": "pull_request:44101", "accept": false, - "reason": "ViLT and Flaubert/XLM are different model architectures and code paths." + "reason": "TextNet versus Flaubert/XLM; different model code paths." }, { - "left": "pull_request:44018", + "left": "pull_request:44059", "right": "pull_request:44071", "accept": false, - "reason": "GPT-Neo and MPT are unrelated model implementations." + "reason": "GPT-2 versus MPT; not the same concrete change." }, { - "left": "pull_request:44017", - "right": "pull_request:44722", + "left": "pull_request:44018", + "right": "pull_request:44059", + "accept": false, + "reason": "GPT-Neo versus GPT-2; same broad theme, different code paths." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44101", + "accept": false, + "reason": "MPNet versus Flaubert/XLM; different model families." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", "accept": false, - "reason": "Segformer and GPT-J/CodeGen are different models; not the same change." + "reason": "VisualBert versus LongT5; unrelated code paths." }, { "left": "pull_request:44017", "right": "pull_request:44161", "accept": false, - "reason": "Segformer and LongT5 are unrelated model-specific refactors." + "reason": "Segformer versus LongT5; not the same change." }, { - "left": "pull_request:43997", - "right": "pull_request:44017", + "left": "pull_request:44017", + "right": "pull_request:44073", "accept": false, - "reason": "RegNet and Segformer are different code paths." + "reason": "Segformer versus VisualBert; different model code paths." }, { - "left": "pull_request:43997", - "right": "pull_request:44056", + "left": "pull_request:44068", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo versus MPT; different underlying changes." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44086", "accept": false, - "reason": "RegNet and MPNet are unrelated model implementations." + "reason": "GPT-Neo versus MGP-STR; not a duplicate." }, { "left": "pull_request:44071", - "right": "pull_request:44076", + "right": "pull_request:44722", "accept": false, - "reason": "MPT and ImageGPT are different models; same refactor pattern only." + "reason": "MPT versus GPT-J/CodeGen; same refactor family only." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo versus GPT-J/CodeGen; different concrete code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-2 versus GPT-Neo; different model code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-2 versus MPT; unrelated changes." }, { "left": "pull_request:44074", - "right": "pull_request:44101", + "right": "pull_request:44161", "accept": false, - "reason": "TextNet and Flaubert/XLM are unrelated model changes." + "reason": "TextNet versus LongT5; different concrete code paths." }, { - "left": "pull_request:43997", - "right": "pull_request:44018", + "left": "pull_request:44073", + "right": "pull_request:44161", "accept": false, - "reason": "RegNet and GPT-Neo are different model families." + "reason": "VisualBert versus LongT5; not the same underlying change." }, { - "left": "pull_request:44056", + "left": "pull_request:44101", "right": "pull_request:44161", "accept": false, - "reason": "MPNet and LongT5 are unrelated code paths." + "reason": "Flaubert/XLM versus LongT5; same broad refactor theme only." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44018", + "accept": false, + "reason": "Segformer versus GPT-Neo; different model code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet versus MPNet; unrelated concrete changes." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44076", + "accept": false, + "reason": "MPT versus ImageGPT; different model families and code paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet versus Flaubert/XLM; not a duplicate change." }, { "left": "pull_request:43997", "right": "pull_request:44161", "accept": false, - "reason": "RegNet and LongT5 are different model implementations." + "reason": "RegNet versus LongT5; different underlying changes." }, { "left": "pull_request:44074", "right": "pull_request:44161", "accept": false, - "reason": "TextNet and LongT5 are unrelated changes." + "reason": "TextNet versus LongT5; different code paths." }, { "left": "pull_request:43997", "right": "pull_request:44071", "accept": false, - "reason": "RegNet and MPT are different model families." + "reason": "RegNet versus MPT; unrelated concrete changes." }, { "left": "pull_request:44010", "right": "pull_request:44071", "accept": false, - "reason": "SqueezeBert and MPT are unrelated model implementations." + "reason": "SqueezeBert versus MPT; different model code paths." }, { "left": "pull_request:44010", "right": "pull_request:44073", "accept": false, - "reason": "SqueezeBert and VisualBert are different code paths." + "reason": "SqueezeBert versus VisualBert; different concrete changes." }, { "left": "pull_request:44018", "right": "pull_request:44129", "accept": false, - "reason": "GPT-Neo and SpeechT5 are unrelated model-specific refactors." + "reason": "GPT-Neo versus SpeechT5; same refactor family only." }, { - "left": "pull_request:43996", + "left": "pull_request:44056", "right": "pull_request:44161", "accept": false, - "reason": "CVT/FNet and LongT5 are different model families." + "reason": "MPNet versus LongT5; not the same underlying change." }, { "left": "pull_request:43996", "right": "pull_request:44017", "accept": false, - "reason": "CVT/FNet and Segformer are separate model changes." + "reason": "CVT/FNet versus Segformer; different model families and code paths." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "CVT/FNet versus SpeechT5; unrelated concrete changes." }, { "left": "pull_request:44010", "right": "pull_request:44044", "accept": false, - "reason": "SqueezeBert and DeBERTa-v2 are unrelated implementations." + "reason": "SqueezeBert versus DeBERTa v2; different model code paths." }, { "left": "pull_request:44044", "right": "pull_request:44074", "accept": false, - "reason": "DeBERTa-v2 and TextNet are different code paths." + "reason": "DeBERTa v2 versus TextNet; different underlying changes." }, { "left": "pull_request:44066", "right": "pull_request:44722", "accept": true, - "reason": "Same GPT-J/CodeGen standardized output-tracing refactor on the same files; 44722 is the later, fuller version." + "reason": "Same GPT-J/CodeGen output-tracing refactor, same files and issue target; 44722 is the broader, cleaner revision." }, { "left": "pull_request:44085", "right": "pull_request:44722", "accept": true, - "reason": "Both cover the same GPT-J output-tracing change on the same code path; 44722 is broader because it also includes CodeGen." + "reason": "Both are GPT-J output-tracing refactors on the same code path; 44722 subsumes the narrower GPT-J-only variant." }, { "left": "pull_request:44085", "right": "pull_request:44129", "accept": false, - "reason": "GPT-J and SpeechT5 are different model families." + "reason": "GPT-J versus SpeechT5; same meta-pattern, but different concrete code paths." }, { "left": "pull_request:43996", "right": "pull_request:44007", "accept": false, - "reason": "CVT/FNet and ResNet/RT-DETR ResNet are unrelated model changes." + "reason": "CVT/FNet versus RegNet/ResNet/RT-DETR; different model families and code paths." }, { "left": "pull_request:43996", "right": "pull_request:44129", "accept": false, - "reason": "CVT/FNet and SpeechT5 are different model implementations." + "reason": "CVT/FNet versus SpeechT5; not the same underlying change." }, { "left": "pull_request:44044", "right": "pull_request:44129", "accept": false, - "reason": "DeBERTa-v2 and SpeechT5 are unrelated code paths." + "reason": "DeBERTa v2 versus SpeechT5; different model code paths." }, { "left": "pull_request:44044", "right": "pull_request:44154", "accept": false, - "reason": "DeBERTa-v2 and ViTS are different model families." - }, - { - "left": "pull_request:44024", - "right": "pull_request:44076", - "accept": false, - "reason": "FocalNet and ImageGPT are different models; same output-handling theme only." + "reason": "DeBERTa v2 versus Vits; unrelated concrete changes." } ] }, "evaluator_result": { "accept": true, - "feedback": "The cluster summary is mostly grounded in the packet: it correctly separates the shared-model duplicates from the broader model-specific refactors, and the accepted soft pairs are limited to cases that look mergeable within the same code path (GPT-J/CodeGen and GPT-Neo). The wording stays mostly cautious, with only mild inference about rebasing/later variants." + "feedback": "The summary is grounded in the packet, and the accepted soft-edge pairs are supported by shared model/file paths and/or clear subsumption within the same bugfix. The cross-model pairs are rejected conservatively, which fits the evidence." }, "evaluator_used": true, "retried": false, @@ -17112,76 +17253,85 @@ "nodes": [ "pull_request:43997", "pull_request:44010", + "pull_request:44024", "pull_request:44056", "pull_request:44073", "pull_request:44074", + "pull_request:44076", "pull_request:44129" ], "soft_pairs": [ + "pull_request:44024|pull_request:44076", "pull_request:44073|pull_request:44074", "pull_request:44056|pull_request:44073", "pull_request:44056|pull_request:44074", "pull_request:43997|pull_request:44010", "pull_request:44010|pull_request:44129" ], - "prepared_review_unit_hash": "1804fb911aff06d9dd743cca001df0c77a2f50fae53a4a53f2cdce2f99ce49fc", + "prepared_review_unit_hash": "84e4b13f017dbba84561f4bc07ea4ac80b0f152a249af3fffd9a02080a83fac4", "component_budget": { "node_count": 28, "item_count": 28, - "soft_pair_count": 77, - "serialized_chars": 84501, - "estimated_input_tokens": 21126, - "estimated_eval_tokens": 42508 + "soft_pair_count": 78, + "serialized_chars": 84894, + "estimated_input_tokens": 21224, + "estimated_eval_tokens": 42704 }, "budget": { - "node_count": 6, - "item_count": 6, - "soft_pair_count": 5, - "serialized_chars": 13068, - "estimated_input_tokens": 3267, - "estimated_eval_tokens": 6790 + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17124, + "estimated_input_tokens": 4281, + "estimated_eval_tokens": 8818 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { - "summary": "These PRs are all in the same broad migration theme\u2014standardized output tracing / capture_outputs adoption across different model implementations\u2014but they do not look like duplicate fixes or mergeable alternative implementations of the same concrete change. Each PR targets a different model code path, so the soft edges should be rejected.", - "confidence": 0.91, + "summary": "This cluster is a collection of separate per-model PRs that all refactor output tracing/collection to the new standardized capture_outputs/can_return_tuple pattern. They share a theme, but they are not the same concrete change or code path.", + "confidence": 0.93, "canonical_issue_reason": null, - "canonical_pr_reason": "No issue is present. As a representative PR for this cluster, PR #44010 is the clearest and most self-contained example of the standardized output-collection refactor, with explicit capture_outputs/can_return_tuple integration in one model file.", + "canonical_pr_reason": "PR 44073 is the clearest representative of the shared refactor theme: it explicitly describes standardized output tracing and shows the core capture_outputs/can_return_tuple migration pattern without being tied to an unusual or typoed title.", "best_issue_reason": null, - "best_pr_reason": "PR #44010 is a strong cluster representative because its title and diff directly reflect the shared refactor pattern, and the change is compact and focused on a single model implementation.", + "best_pr_reason": "PR 44073 is the best overall representative of the cluster because it most directly matches the common change pattern across the set and is written as a generic output-tracing refactor rather than a model-specific one-off.", "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Both are output-tracing refactors, but they touch different model implementations (FocalNet vs ImageGPT) and different code paths, so they are not the same underlying change." + }, { "left": "pull_request:44073", "right": "pull_request:44074", "accept": false, - "reason": "Both are output-tracing refactors, but for different models (VisualBert vs TextNet). They do not fix the same underlying bug or change the same code path." + "reason": "These are separate model-specific migrations for VisualBert and TextNet; same refactor style, but not one shared bug or change." }, { "left": "pull_request:44056", "right": "pull_request:44073", "accept": false, - "reason": "Same broad refactor style, but unrelated model implementations (MPNet vs VisualBert) and different internal tracing hooks. Not the same concrete change." + "reason": "MPNet and VisualBert are different model code paths; this is a repeated pattern, not a single mergeable PR." }, { "left": "pull_request:44056", "right": "pull_request:44074", "accept": false, - "reason": "Both migrate to capture_outputs, but they touch different architectures and model-specific tracing logic. Too broad to treat as duplicates." + "reason": "Different models and different forward/output-tracing implementations; too broad to treat as the same change." }, { "left": "pull_request:43997", "right": "pull_request:44010", "accept": false, - "reason": "RegNet and SqueezeBert are different code paths and the diffs are model-specific refactors, not alternative fixes for one shared defect." + "reason": "RegNet and SqueezeBert are unrelated model files, so despite similar standardized-output wording they are not duplicates." }, { "left": "pull_request:44010", "right": "pull_request:44129", "accept": false, - "reason": "Although both involve output tracing, SqueezeBert and SpeechT5 have distinct implementations and semantics; these are separate model-specific migrations, not one mergeable PR." + "reason": "SqueezeBert and SpeechT5 are distinct model-specific refactors; the shared theme is too broad to merge as one PR." } ] }, @@ -17264,292 +17414,286 @@ "pull_request:44028|pull_request:44030", "pull_request:44029|pull_request:44030" ], - "prepared_review_unit_hash": "4e0b0001e227255a5c7243202731f77ec05a5fd1105866914319761a00da76a4", + "prepared_review_unit_hash": "28f7a8c43129343984a2a0a4e72541caf876d52f1591591a8f7dac334cb90396", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 43, - "serialized_chars": 41324, - "estimated_input_tokens": 10331, - "estimated_eval_tokens": 20918 + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 43, - "serialized_chars": 41324, - "estimated_input_tokens": 10331, - "estimated_eval_tokens": 20918 + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "This is a cluster of model-specific output-tracing refactors that share a common pattern but are not duplicate fixes or mergeable changes to the same code path.", - "confidence": 0.98, + "summary": "This cluster is a batch of independent model-specific output-tracing refactors, not true duplicates. The PRs share a theme and terminology, but they touch different model code paths and should not be merged together as one change.", + "confidence": 0.94, "canonical_issue_reason": null, - "canonical_pr_reason": "No issue is present; if one PR must represent the cluster, 44025 is a good canonical example because it is a clean, self-contained tracing refactor across two closely related Depth Anything files.", + "canonical_pr_reason": "No issue artifacts are present. For a PR representative, #44003 is the strongest canonical anchor because it has the broadest refactor surface (mamba plus falcon_mamba) and introduces the reusable output-capturing plumbing.", "best_issue_reason": null, - "best_pr_reason": "44025 is the strongest representative PR: focused, easy to understand, and directly shows the output-tracing conversion pattern without depending on unrelated model code.", + "best_pr_reason": "#44003 is the most central and substantial output-tracing refactor in the set, so it best represents the cluster overall, even though the other PRs are not duplicates of it.", "soft_edge_verdicts": [ { "left": "pull_request:43998", "right": "pull_request:44002", "accept": false, - "reason": "Different model implementations and code paths; same refactor theme only." + "reason": "Different model implementations (timm_backbone vs upernet); same refactor theme, not the same underlying change." }, { "left": "pull_request:43998", "right": "pull_request:44000", "accept": false, - "reason": "Separate model families; similar output-tracing cleanup, not the same change." + "reason": "Different code paths and architectures; both are output-tracing cleanups, but not one concrete bug." }, { "left": "pull_request:43998", "right": "pull_request:44001", "accept": false, - "reason": "Different model code paths; only the refactor style overlaps." + "reason": "Separate model families with different forward paths; not a mergeable duplicate." }, { "left": "pull_request:43998", "right": "pull_request:44003", "accept": false, - "reason": "Unrelated model families; not the same underlying bug or patch." + "reason": "One is timm_backbone; the other is mamba/falcon_mamba plumbing. Similar motif, different change sets." }, { "left": "pull_request:43998", "right": "pull_request:44029", "accept": false, - "reason": "Different architectures and files; shared tracing refactor pattern only." + "reason": "Different model internals and forward signatures; not the same fix." }, { "left": "pull_request:43999", "right": "pull_request:44001", "accept": false, - "reason": "Different models; both alter output handling but not the same concrete fix." + "reason": "mobilenet_v1 and univnet are unrelated model paths; shared refactor language is not enough." }, { "left": "pull_request:43999", "right": "pull_request:44002", "accept": false, - "reason": "Different model implementations; too broad to be duplicates." + "reason": "Independent model refactors in different subsystems; not the same bug or change." }, { "left": "pull_request:43999", "right": "pull_request:44025", "accept": false, - "reason": "Different model families and files; not one mergeable fix." + "reason": "MobileNetV1 and Depth Anything are separate implementations with different output handling." }, { "left": "pull_request:43999", "right": "pull_request:44026", "accept": false, - "reason": "Separate encoder/decoder code path versus MobileNet; same refactor theme only." + "reason": "Different architectures and wrappers; similar cleanup, not duplicate work." }, { "left": "pull_request:43999", "right": "pull_request:44027", "accept": false, - "reason": "Different model-specific refactors, not the same underlying issue." + "reason": "mobilenet_v1 vs speech_encoder_decoder are distinct code paths and model behaviors." }, { "left": "pull_request:44000", "right": "pull_request:44001", "accept": false, - "reason": "Distinct model code paths; similar tracing refactor intent only." + "reason": "vision_text_dual_encoder and univnet are unrelated models; same refactor pattern only." }, { "left": "pull_request:44000", "right": "pull_request:44002", "accept": false, - "reason": "Different model families and implementations." + "reason": "Different model families; no evidence of a single shared defect." }, { "left": "pull_request:44000", "right": "pull_request:44025", "accept": false, - "reason": "Unrelated models; no shared concrete bug or change." + "reason": "Different files, different forward paths, and different model tasks; not one underlying change." }, { "left": "pull_request:44000", "right": "pull_request:44026", "accept": false, - "reason": "Different model implementations; same broad refactor pattern only." + "reason": "Vision-text dual encoder vs vision encoder-decoder: analogous refactors, but not the same concrete bug." }, { "left": "pull_request:44001", "right": "pull_request:44002", "accept": false, - "reason": "Different model-specific changes; not mergeable as one PR." + "reason": "UnivNet and UperNet are unrelated implementations; too broad to be a duplicate." }, { "left": "pull_request:44001", "right": "pull_request:44025", "accept": false, - "reason": "Separate models and code paths; not duplicates." + "reason": "Different model code and output semantics; only the high-level refactor theme overlaps." }, { "left": "pull_request:44001", "right": "pull_request:44026", "accept": false, - "reason": "Different model families; same style of refactor, not same fix." + "reason": "Separate encoder/decoder and audio model paths; not mergeable as one PR." }, { "left": "pull_request:44002", "right": "pull_request:44029", "accept": false, - "reason": "Unrelated models and code paths; only the output-tracing motif overlaps." + "reason": "UperNet and RWKV are different subsystems with unrelated forward logic." }, { "left": "pull_request:44002", "right": "pull_request:44025", "accept": false, - "reason": "Different model implementations; not the same concrete change." + "reason": "Different model families; same output-tracing cleanup pattern, but not a duplicate." }, { "left": "pull_request:44001", "right": "pull_request:44003", "accept": false, - "reason": "Different model families and code paths." + "reason": "UnivNet vs mamba/falcon_mamba touches different code paths and model internals." }, { "left": "pull_request:44002", "right": "pull_request:44003", "accept": false, - "reason": "Separate model-specific refactors; not duplicate fixes." + "reason": "UperNet and mamba/falcon_mamba are unrelated model implementations." }, { "left": "pull_request:44000", "right": "pull_request:44003", "accept": false, - "reason": "Different architectures and files; too broad to merge as one PR." + "reason": "Different architecture and files; the similarity is only that both refactor output handling." }, { "left": "pull_request:44003", "right": "pull_request:44028", "accept": false, - "reason": "Different model code paths; only broad tracing refactor similarity." + "reason": "mamba/falcon_mamba vs superpoint are distinct model paths; not the same underlying bug." }, { "left": "pull_request:44003", "right": "pull_request:44004", "accept": false, - "reason": "Different model families and concrete implementation details." + "reason": "CodeGen and mamba/falcon_mamba have different forward contracts and output plumbing." }, { "left": "pull_request:43998", "right": "pull_request:44004", "accept": false, - "reason": "Different models and forward paths; not the same bug or change." + "reason": "timm_backbone and CodeGen are unrelated code paths; do not collapse into one duplicate." }, { "left": "pull_request:44001", "right": "pull_request:44004", "accept": false, - "reason": "Separate model code paths; overlap is only in tracing cleanup." + "reason": "UnivNet and CodeGen are different models with different output semantics." }, { "left": "pull_request:44002", "right": "pull_request:44004", "accept": false, - "reason": "Different model implementations; not mergeable into one PR." - }, - { - "left": "pull_request:44000", - "right": "pull_request:44004", - "accept": false, - "reason": "Different models and changes; same general refactor theme only." + "reason": "UperNet and CodeGen are not the same change, only the same refactor style." }, { "left": "pull_request:44025", "right": "pull_request:44026", "accept": false, - "reason": "Different model families; no shared concrete bug." + "reason": "Both are encoder-decoder wrapper refactors, but they target different model families and code paths." }, { "left": "pull_request:44025", "right": "pull_request:44027", "accept": false, - "reason": "Different encoder/decoder stacks; not the same fix." + "reason": "Depth Anything and Speech Encoder-Decoder are separate implementations; not one concrete fix." }, { "left": "pull_request:44025", "right": "pull_request:44028", "accept": false, - "reason": "Distinct model implementations and output paths." + "reason": "Different models and output behavior; same tracing refactor theme only." }, { "left": "pull_request:44025", "right": "pull_request:44029", "accept": false, - "reason": "Different model architectures; same refactor motif only." + "reason": "Depth Anything and RWKV are unrelated subsystems." }, { "left": "pull_request:44025", "right": "pull_request:44030", "accept": false, - "reason": "Separate model-specific tracing changes; not duplicates." + "reason": "Depth Anything and DPR are different model stacks; not a duplicate change." }, { "left": "pull_request:44026", - "right": "pull_request:44029", + "right": "pull_request:44027", "accept": false, - "reason": "Different model code paths and files; not the same underlying issue." + "reason": "Vision encoder-decoder vs speech encoder-decoder are analogous wrappers, but they are distinct code paths." }, { "left": "pull_request:44026", - "right": "pull_request:44027", + "right": "pull_request:44028", "accept": false, - "reason": "Different encoder/decoder models; too broad to merge." + "reason": "Different model implementations; no shared concrete bug." }, { "left": "pull_request:44026", - "right": "pull_request:44028", + "right": "pull_request:44029", "accept": false, - "reason": "Distinct model implementations; only similar refactor style." + "reason": "Vision encoder-decoder and RWKV are unrelated." }, { "left": "pull_request:44026", "right": "pull_request:44030", "accept": false, - "reason": "Different model families and output handling code." + "reason": "Different model families and forward logic." }, { "left": "pull_request:44027", "right": "pull_request:44028", "accept": false, - "reason": "Different model-specific changes; not the same patch." + "reason": "Speech encoder-decoder and SuperPoint are separate model paths; similar refactor language is insufficient." }, { "left": "pull_request:44027", "right": "pull_request:44029", "accept": false, - "reason": "Separate model families and code paths." + "reason": "Different architectures and output contracts; not mergeable as one PR." }, { "left": "pull_request:44027", "right": "pull_request:44030", "accept": false, - "reason": "Different models; overlap is only in tracing refactor intent." + "reason": "Speech encoder-decoder vs DPR are unrelated changes." }, { "left": "pull_request:44028", "right": "pull_request:44029", "accept": false, - "reason": "Different architectures and files; not the same fix." + "reason": "SuperPoint and RWKV are distinct code paths with unrelated fixes." }, { "left": "pull_request:44028", "right": "pull_request:44030", "accept": false, - "reason": "Distinct model implementations; not mergeable as one PR." + "reason": "SuperPoint and DPR are separate model implementations; not the same underlying issue." }, { "left": "pull_request:44029", "right": "pull_request:44030", "accept": false, - "reason": "Different model families and code paths; same theme only." + "reason": "RWKV and DPR are different model families and cannot plausibly be one merged fix." } ] }, @@ -17582,7 +17726,7 @@ "pull_request:44662|pull_request:45075", "pull_request:45075|pull_request:45401" ], - "prepared_review_unit_hash": "2d0f0fc5315955fd6aa62002914147a970807a370e18e8c6a1ffb515d2a9293d", + "prepared_review_unit_hash": "1b8dbc7f0edd04501298069baadfa2b5f0d6289255aee980e140aa758a1d609e", "component_budget": { "node_count": 3, "item_count": 3, @@ -17604,30 +17748,30 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are three separate model-addition PRs that share Transformers integration boilerplate but implement different model families and code paths, so they should not be treated as duplicates.", + "summary": "All three pull requests are separate model-introduction PRs (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They share Transformers boilerplate and auto-registration changes, but they do not fix the same bug or implement the same model, so none should be clustered as duplicates.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45075 is the most complete standalone submission in the cluster: it includes model, processing, image-processing, conversion, auto-mapping, docs, and tests. Even so, it is specific to DeepSeek-OCR-2 and not a duplicate of the other model PRs.", + "canonical_pr_reason": "No true duplicate PR exists in this set. If a single representative is needed, PR #45075 is the strongest standalone candidate because it is the most mature non-draft model-addition PR with the most review activity and a complete implementation scope.", "best_issue_reason": null, - "best_pr_reason": "PR 45075 is the strongest representative PR here because it has the broadest end-to-end implementation and validation coverage, but it still addresses a different model than the others.", + "best_pr_reason": "PR #45075 is the best standalone representative: it is non-draft, has substantial review activity, and appears to contain a full end-to-end model addition with docs, tests, conversion, and auto-mapping updates.", "soft_edge_verdicts": [ { "left": "pull_request:44662", "right": "pull_request:45401", "accept": false, - "reason": "PenguinVL and Voxtral TTS are unrelated model additions for different tasks (vision-language vs TTS); they only share standard integration files, not the same underlying change." + "reason": "Different model integrations for different code paths (PenguinVL vs Voxtral TTS). The overlap is generic model-registration/docs boilerplate, not the same underlying change." }, { "left": "pull_request:44662", "right": "pull_request:45075", "accept": false, - "reason": "PenguinVL and DeepSeek-OCR-2 are different model implementations with distinct architectures and targets; shared boilerplate files do not make them the same change." + "reason": "These add different models (PenguinVL vs DeepSeek-OCR-2). Shared files are broad Transformers plumbing, but the concrete implementation and target functionality are unrelated." }, { "left": "pull_request:45075", "right": "pull_request:45401", "accept": false, - "reason": "DeepSeek-OCR-2 and Voxtral TTS modify different concrete model code paths and would not plausibly be merged into one PR; they are separate additions, not duplicates." + "reason": "DeepSeek-OCR-2 and Voxtral TTS are distinct model additions with different configs, processors, and tests. Similar auto-mapping/docs edits are insufficient to treat them as the same PR." } ] }, @@ -17657,12 +17801,12 @@ "soft_pairs": [ "pull_request:44979|pull_request:45363" ], - "prepared_review_unit_hash": "10791feb36235b332c347e0259d1d07798c1ed67a48d1abde9a2c6601f689eaa", + "prepared_review_unit_hash": "2bae581294da7eba217a756060c607967c100b2d4705bf2d364ef9e5d0f40f18", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4408, + "serialized_chars": 4407, "estimated_input_tokens": 1102, "estimated_eval_tokens": 2460 }, @@ -17670,7 +17814,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4408, + "serialized_chars": 4407, "estimated_input_tokens": 1102, "estimated_eval_tokens": 2460 }, @@ -17679,18 +17823,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related by the general theme of fusion, but they are not duplicates: PR 44979 adds a generic Module Fusion API, while PR 45363 implements a specific n-to-1 KernelConfig-based kernel fusion path with integration changes and an example.", - "confidence": 0.92, + "summary": "These are related by the broad theme of fusion, but they are not the same change: one introduces a generic Module Fusion API, while the other wires n-to-1 kernel fusion through KernelConfig and related integration points. I would not dedupe them.", + "confidence": 0.82, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 44979 is the more foundational/general artifact: it introduces the standalone module_fusion API surface and tests, whereas PR 45363 is a separate, concrete application of fusion built around KernelConfig and runtime integrations.", + "canonical_pr_reason": "PR #44979 is the more foundational change: it adds the new module-fusion abstraction itself, whereas #45363 is a separate kernel-config-driven integration on top of a different code path.", "best_issue_reason": null, - "best_pr_reason": "PR 45363 is the more complete end-user-facing implementation, but it is still distinct from 44979; if one needs a representative PR for the broader fusion work, 44979 is the cleaner canonical choice.", + "best_pr_reason": "PR #44979 is the best representative of this cluster because it is the core API change and is simpler to use as the anchor for related fusion work.", "soft_edge_verdicts": [ { "left": "pull_request:44979", "right": "pull_request:45363", "accept": false, - "reason": "Different underlying changes. 44979 adds a generic module fusion API in a new module; 45363 wires up n-to-1 kernel fusion through KernelConfig and integration/modeling changes. They are adjacent, but not the same code-path fix and would not be merged as one PR." + "reason": "They share a fusion theme, but they do not look like one underlying bug/change. #44979 adds a standalone Module Fusion API, while #45363 implements n-to-1 kernel fusion plumbing via KernelConfig and integrations; these would not plausibly be merged into one PR." } ] }, @@ -17720,12 +17864,12 @@ "soft_pairs": [ "pull_request:45086|pull_request:45317" ], - "prepared_review_unit_hash": "6d955ea1fdf1cd9524b976f7b1bd68da43b1c1e5a7521e1552eeae35f878d4a6", + "prepared_review_unit_hash": "9c62f3e8a65a521050fa64cfd12363de8ff6a474c256926aeef82c4b742402f5", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4413, + "serialized_chars": 4414, "estimated_input_tokens": 1104, "estimated_eval_tokens": 2464 }, @@ -17733,7 +17877,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 4413, + "serialized_chars": 4414, "estimated_input_tokens": 1104, "estimated_eval_tokens": 2464 }, @@ -17742,18 +17886,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both pull requests address the same AttributeError in `_patch_mistral_regex` by changing the same pre-tokenizer access in `tokenization_utils_tokenizers.py`; PR 45317 is the more complete version because it also adds tests.", + "summary": "Both PRs target the same Mistral regex patch bug in the same file and apply the same code change; PR #45317 is the more complete version because it adds tests and has more discussion.", "confidence": 0.98, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45317 is the better canonical PR because it implements the same fix as 45086 and adds test coverage, making it the more complete representation of the change.", + "canonical_pr_reason": "PR #45317 is the stronger canonical PR: same fix as #45086, but with added test coverage and more review discussion, making it the more complete representative of this change.", "best_issue_reason": null, - "best_pr_reason": "PR 45317 is the best overall PR because it fixes the same concrete bug as 45086 and includes an added test file, indicating a more finalized patch.", + "best_pr_reason": "PR #45317 is best overall because it directly fixes the AttributeError, includes tests, and is the more fully developed implementation of the shared fix.", "soft_edge_verdicts": [ { "left": "pull_request:45086", "right": "pull_request:45317", "accept": true, - "reason": "Same underlying bug fix in the same code path, same target issue, and the second PR is essentially the first fix plus tests; they could plausibly be merged into one PR." + "reason": "Same underlying bug and same code-path fix in the same file; #45317 is effectively the tested, expanded version of #45086." } ] }, @@ -17783,12 +17927,12 @@ "soft_pairs": [ "pull_request:45115|pull_request:45569" ], - "prepared_review_unit_hash": "cfb8b7461ef565b40654a7cdf0a087eee24689338dc543f480a26bca8841f7ab", + "prepared_review_unit_hash": "b906698b3fb430661c3e6cf78115b214fbfb01fed57d472e9bedaba9bd8d20a1", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6065, + "serialized_chars": 6066, "estimated_input_tokens": 1517, "estimated_eval_tokens": 3290 }, @@ -17796,7 +17940,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 6065, + "serialized_chars": 6066, "estimated_input_tokens": 1517, "estimated_eval_tokens": 3290 }, @@ -17805,18 +17949,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related NemotronH refactors, but they are not the same change: #45115 is a narrow inheritance/mapping cleanup, while #45569 is a broader model split introducing NemotronHDense and NemotronHSparse plus docs/tests. They overlap on shared core files, but they do not look mergeable into one PR without collapsing distinct goals.", - "confidence": 0.92, + "summary": "Two open PRs in the NemotronH area overlap on model plumbing, but they pursue different changes: one refactors NemotronH to inherit GraniteMoeHybrid-style structure, while the other splits NemotronH into dedicated dense/sparse model families with docs and tests.", + "confidence": 0.79, "canonical_issue_reason": null, - "canonical_pr_reason": "#45569 is the broader, more authoritative architectural change: it introduces the new NemotronHDense/NemotronHSparse structure, updates auto mappings, docs, and tests, and reads like the main follow-up model refactor rather than just a conversion-mapping cleanup.", + "canonical_pr_reason": "PR #45569 is the better canonical PR because it is the broader, more complete NemotronH redesign and includes the downstream docs/test/model registration changes that make it the likely final representative of the cluster.", "best_issue_reason": null, - "best_pr_reason": "#45569 is the best representative of the cluster because it covers the larger, user-facing model refactor and likely subsumes the narrower NemotronH inheritance cleanup in scope, even though the two PRs are not duplicates.", + "best_pr_reason": "PR #45569 best represents the cluster since it subsumes the shared NemotronH modeling work and adds the user-facing/model split changes; #45115 is a narrower refactor step and less representative of the overall change set.", "soft_edge_verdicts": [ { "left": "pull_request:45115", "right": "pull_request:45569", "accept": false, - "reason": "Both touch NemotronH internals, but #45115 is focused on conversion mapping and inheritance cleanup, while #45569 adds separate dense/sparse model packages, docs, and tests. They address related architecture work, not the same concrete code-path fix/change." + "reason": "They touch overlapping NemotronH files, but they are not the same concrete fix/change: #45115 is a structural refactor toward GraniteMoeHybrid compatibility, while #45569 introduces separate NemotronHDense/NemotronHSparse models plus docs/tests and broader API updates. Shared subsystem alone is not enough to merge them." } ] }, @@ -17846,12 +17990,12 @@ "soft_pairs": [ "pull_request:45332|pull_request:45333" ], - "prepared_review_unit_hash": "5986cbc6945d4170982623bf046289a50c3fc4820d898aeac2f04160437713b9", + "prepared_review_unit_hash": "8159cc6512800f1c6ef20ceef8d250fcfa8591c2704cd8d3cc5059f7805486a5", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5397, + "serialized_chars": 5398, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17859,7 +18003,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5397, + "serialized_chars": 5398, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, @@ -17868,18 +18012,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related but not duplicates: one PR adds heterogeneous per-layer configuration, the other adds heterogeneous modeling/cache support that builds on that config machinery.", - "confidence": 0.88, + "summary": "These are two related but distinct pull requests in the heterogeneous-modeling feature area: one adds per-layer config support, the other adds the broader per-layer modeling/cache integration. They share plumbing files, but they are not the same concrete change and should not be merged as duplicates.", + "confidence": 0.89, "canonical_issue_reason": null, - "canonical_pr_reason": "45332 is the broader umbrella PR: it covers heterogeneous modeling behavior and includes the new per-layer config plumbing it depends on, whereas 45333 is the narrower config-only slice.", + "canonical_pr_reason": "PR 45332 is the better canonical representative because it is the broader umbrella change: it adds heterogeneous model support across config, cache, masking, and modeling paths. PR 45333 is a narrower config-only piece.", "best_issue_reason": null, - "best_pr_reason": "45332 is the best representative of the cluster because it spans the larger end-to-end change and is the more central change path for heterogeneous model support.", + "best_pr_reason": "PR 45332 is the best overall representative for the cluster because it covers the larger end-to-end heterogeneous support work, while 45333 is only one subsystem slice of that effort.", "soft_edge_verdicts": [ { "left": "pull_request:45332", "right": "pull_request:45333", "accept": false, - "reason": "Related scope, but not the same concrete code change: 45333 adds config serialization/per-layer config support, while 45332 adds modeling/cache behavior for heterogeneous layers. They are complementary, not duplicative, and would not plausibly be merged as one PR without collapsing two distinct changes." + "reason": "Related feature work, but not the same underlying change: 45333 only adds heterogeneous config handling, while 45332 extends modeling/cache behavior and depends on that config groundwork. They would not plausibly be one PR." } ] }, @@ -17909,7 +18053,7 @@ "soft_pairs": [ "pull_request:45415|pull_request:45425" ], - "prepared_review_unit_hash": "298519b048c5901fc6a3fb69bd24e622f1d22a0776d739189a1a947a53945ed4", + "prepared_review_unit_hash": "7bcd7d04285a4253c91d4ef53638d4eddc49ef8d721a3fc934290aaa0ea21a9b", "component_budget": { "node_count": 2, "item_count": 2, @@ -17931,18 +18075,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both items are typing-related pull requests, but they address different scopes: one is a broad repo-wide type-checking sweep, the other adds/adjusts specific typing protocols and related model utilities. They are not the same underlying change.", - "confidence": 0.93, + "summary": "Both items are broad typing-focused PRs in Transformers, but they address different changes: one enables/expands type checking across many src/transformers files, while the other adds shared typing helper protocols and related model utility adjustments. They are related by theme, not the same underlying change.", + "confidence": 0.9, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45425 is the better canonical representative because it is the more focused, concrete typing change centered on `_typing.py` and related model utility compatibility, whereas 45415 is a broad type-checking cleanup across many files.", + "canonical_pr_reason": "PR 45425 is the better canonical PR because it is the more focused change centered on shared typing abstractions and targeted utility updates, whereas PR 45415 is a much broader type-checking sweep across many files.", "best_issue_reason": null, - "best_pr_reason": "PR 45425 is the most suitable overall because it targets a specific code-path/typing compatibility problem and is easier to reason about as a standalone change; 45415 is a wider type-checking sweep rather than the same fix.", + "best_pr_reason": "PR 45425 is the best representative of this cluster since it has a narrower, more specific code-change scope; PR 45415 is a separate large-scale typing rollout rather than the same concrete fix.", "soft_edge_verdicts": [ { "left": "pull_request:45415", "right": "pull_request:45425", "accept": false, - "reason": "Reject: 45415 is a broad typing sweep with many unrelated file edits and pyproject typing-ignore changes, while 45425 is a targeted typing-protocol/update PR. They share a theme but not the same concrete bug or mergeable change." + "reason": "Both are typing-related, but they do not look like the same underlying bug or change. PR 45415 is a wide type-checking/config sweep across many modules, while PR 45425 introduces shared typing protocols and related code-path adjustments. Shared filenames alone are not enough to treat them as duplicates." } ] }, @@ -17972,12 +18116,12 @@ "soft_pairs": [ "pull_request:45487|pull_request:45615" ], - "prepared_review_unit_hash": "a671d878d8083c012e599652ea63b278cdaf38554fbd255017e00bc2d723da64", + "prepared_review_unit_hash": "848e42b2cea82e09d945d113713b3b9afd3438d236f6bf1e52f32d6be69221cb", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5160, + "serialized_chars": 5159, "estimated_input_tokens": 1290, "estimated_eval_tokens": 2836 }, @@ -17985,7 +18129,7 @@ "node_count": 2, "item_count": 2, "soft_pair_count": 1, - "serialized_chars": 5160, + "serialized_chars": 5159, "estimated_input_tokens": 1290, "estimated_eval_tokens": 2836 }, @@ -17994,18 +18138,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are unrelated pull requests: one fixes a real model-parallel device placement bug in AltCLIP/ChineseCLIP code, while the other only adds XPU test expectations for qianfan_ocr.", - "confidence": 0.97, + "summary": "The two pull requests are unrelated: one fixes model-parallel/device-placement code in AltCLIP/ChineseCLIP and other models, while the other only adds XPU test expectations for Qianfan OCR.", + "confidence": 0.96, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45487 is the stronger canonical PR because it contains the substantive code fix: moving buffered token type ids to the correct device in model forward paths, with corresponding model coverage across several affected architectures.", + "canonical_pr_reason": "PR 45487 is the stronger representative of a real code change: it fixes device-placement/model-parallel bugs in model implementations rather than only adjusting tests.", "best_issue_reason": null, - "best_pr_reason": "PR 45487 is the best overall representative of the cluster because it addresses an actual runtime bug in model code, whereas PR 45615 is limited to test data updates and is not the same kind of change.", + "best_pr_reason": "PR 45487 is the best PR to represent the cluster because it contains the substantive cross-model fix; PR 45615 is just an XPU expectation update in tests.", "soft_edge_verdicts": [ { "left": "pull_request:45487", "right": "pull_request:45615", "accept": false, - "reason": "Reject: they do not fix the same underlying bug or change. 45487 patches device placement in model forward code for parallel execution; 45615 only adds XPU-specific expected outputs in tests for a different model." + "reason": "Reject: they do not fix the same concrete code-path problem. 45487 changes model code for AltCLIP/ChineseCLIP device handling, while 45615 only updates Qianfan OCR test outputs for XPU." } ] }, @@ -18035,7 +18179,7 @@ "soft_pairs": [ "pull_request:45548|pull_request:45662" ], - "prepared_review_unit_hash": "63e01767042f4ae9ab394af5a1beea79583758b7b6f16ac2fe35b8b1340898ac", + "prepared_review_unit_hash": "5fd759d95527c3d82c5ecf392742443d5f9005d030ff2fca2b2902589260839a", "component_budget": { "node_count": 2, "item_count": 2, @@ -18057,18 +18201,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "These are related EP/distributed-loading PRs, but they fix different failure modes and touch different code paths: one is about accelerate-launch/ZeRO-3 config handling, the other about FSDP2 expert tensors being overwritten during load. They should not be deduplicated.", - "confidence": 0.96, + "summary": "The two PRs both touch expert-parallel loading, but they fix different bugs on different code paths: one is about ZeRO-3/accelerate launch config handling, the other about FSDP2/rank-0 broadcast overwriting experts. Not duplicates.", + "confidence": 0.95, "canonical_issue_reason": null, - "canonical_pr_reason": "No canonical PR for duplication: the PRs target different EP bugs and are not merge-equivalent.", + "canonical_pr_reason": "PR #45662 is the more concrete and self-contained fix: it targets a specific expert-overwrite path during distributed loading and has a focused code change around shard wrapping.", "best_issue_reason": null, - "best_pr_reason": "45662 is the stronger standalone PR because it addresses a specific, concrete expert-overwrite bug with a direct code-path fix.", + "best_pr_reason": "PR #45662 is the stronger representative of the cluster because it addresses a clearly defined failure mode in the model loading path, whereas #45548 is a separate configuration/loading compatibility fix.", "soft_edge_verdicts": [ { "left": "pull_request:45548", "right": "pull_request:45662", "accept": false, - "reason": "Different underlying bugs. 45548 strips distributed_config and adds has_ep for accelerate-launch/ZeRO-3 loading, while 45662 post-wraps sharded params to prevent expert tensors being overwritten under FSDP2. Shared EP plumbing is not enough to treat them as the same change." + "reason": "Different underlying defects. #45548 fixes EP + DeepSpeed ZeRO-3 loading/config serialization for accelerate launch, while #45662 fixes EP + FSDP2 experts being overwritten by rank-0 broadcast during loading. Shared subsystem only, not the same bug." } ] }, @@ -18098,7 +18242,7 @@ "soft_pairs": [ "pull_request:45639|pull_request:45645" ], - "prepared_review_unit_hash": "13152e7f6d461b0ba41b83b184fbf1299fcd29dbd99ff0ed17d45f315a85b29b", + "prepared_review_unit_hash": "58bedb4f37eb30626691bdf6c14fc12b35c9b50598d456c17ffbbfe7a09dec37", "component_budget": { "node_count": 2, "item_count": 2, @@ -18120,18 +18264,18 @@ "aggressively_trimmed": false, "split": false, "analyst_result": { - "summary": "Both PRs target the same xdist/captured_info logging area, but PR 45645 is broader and not a strict duplicate of 45639; it adds CI log aggregation and other artifact-handling changes beyond the core testing_utils fix.", - "confidence": 0.84, + "summary": "Both PRs address the same xdist collision problem around captured_info logging, with 45645 appearing to subsume 45639 by adding the core fix plus CI log handling.", + "confidence": 0.92, "canonical_issue_reason": null, - "canonical_pr_reason": "PR 45645 is the more representative change in this cluster because it contains the core xdist-safe captured_info fix plus the CI workflow changes needed to consume multiple per-worker files.", + "canonical_pr_reason": "PR 45645 is the better canonical representative because it contains the same core testing_utils xdist-safe fix as 45639 and also includes the extra workflow/log-preservation changes tied to the same bug.", "best_issue_reason": null, - "best_pr_reason": "PR 45645 is the best overall representative because it addresses the underlying collision problem and also updates the downstream workflow to preserve debug output end-to-end.", + "best_pr_reason": "PR 45645 is the strongest overall fit: it covers the concrete captured_info xdist collision fix and the related CI/reporting adjustments, making it the most complete version of the change.", "soft_edge_verdicts": [ { "left": "pull_request:45639", "right": "pull_request:45645", - "accept": false, - "reason": "Related but not the same change set: 45639 only makes the testing_utils log file xdist-safe, while 45645 also changes the CI workflow (and related artifact handling) to read multiple captured_info files. They share a subsystem and issue target, but they are not strict duplicates and would not be merged as one PR without losing scope." + "accept": true, + "reason": "Same underlying captured_info xdist collision bug; 45645 includes the same testing_utils fix and extends it with CI log aggregation, so the PRs are plausibly mergeable into one change." } ] }, diff --git a/analysis/current/manifest.json b/analysis/current/manifest.json index 0f38dcf545f37518eb2edb4513dbfafd045e07e8..2cfcdfe7eca62158933aa445cd663f9b2b700934 100644 --- a/analysis/current/manifest.json +++ b/analysis/current/manifest.json @@ -1,8 +1,8 @@ { - "analysis_id": "hybrid-model-20260430t000042z", + "analysis_id": "hybrid-model-20260430t060020z", "archived_artifacts": { - "hybrid": "snapshots/20260430T000042Z/analysis-runs/hybrid-model-20260430t000042z/analysis-report-hybrid.json", - "hybrid_reviews": "snapshots/20260430T000042Z/analysis-runs/hybrid-model-20260430t000042z/analysis-report-hybrid.llm-reviews.json" + "hybrid": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.llm-reviews.json" }, "artifacts": { "hybrid": "analysis/current/analysis-report-hybrid.json", @@ -10,9 +10,9 @@ }, "channel": "canonical", "model": null, - "published_at": "2026-04-30T00:10:18Z", + "published_at": "2026-04-30T06:08:50Z", "repo": "huggingface/transformers", "schema_version": 1, - "snapshot_id": "20260430T000042Z", + "snapshot_id": "20260430T060020Z", "variant": "hybrid" } diff --git a/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.json b/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.json new file mode 100644 index 0000000000000000000000000000000000000000..60f63961e37de93f72d81efd205a8cdaff8e765d --- /dev/null +++ b/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.json @@ -0,0 +1,2017 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260430T060020Z", + "generated_at": "2026-04-30T06:08:49Z", + "evidence_quality": "full", + "llm_enrichment": true, + "meta_bugs": [ + { + "cluster_id": "cluster-43979-11", + "summary": "Cluster of 1 issues and 10 PRs centered on issue #43979.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43979, + "canonical_pr_number": 44007, + "issue_numbers": [ + 43979 + ], + "pr_numbers": [ + 43996, + 44007, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43996, + "right_pr_number": 44007, + "code_similarity": 0.179, + "size_similarity": 0.576, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.429, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44013, + "code_similarity": 0.122, + "size_similarity": 0.318, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.392, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44044, + "code_similarity": 0.245, + "size_similarity": 0.864, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.479, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44066, + "code_similarity": 0.225, + "size_similarity": 0.818, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.408, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44072, + "code_similarity": 0.14, + "size_similarity": 0.303, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.528, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44085, + "code_similarity": 0.216, + "size_similarity": 0.783, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44129, + "code_similarity": 0.163, + "size_similarity": 0.643, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.229, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44154, + "code_similarity": 0.153, + "size_similarity": 0.535, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.31, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44722, + "code_similarity": 0.225, + "size_similarity": 0.848, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.368, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44013, + "code_similarity": 0.19, + "size_similarity": 0.553, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.531, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44044, + "code_similarity": 0.186, + "size_similarity": 0.667, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.354, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44066, + "code_similarity": 0.188, + "size_similarity": 0.704, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.315, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44072, + "code_similarity": 0.212, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.708, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44085, + "code_similarity": 0.195, + "size_similarity": 0.735, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.318, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44129, + "code_similarity": 0.103, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.191, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44154, + "code_similarity": 0.238, + "size_similarity": 0.93, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.344, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44722, + "code_similarity": 0.178, + "size_similarity": 0.679, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.28, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44044, + "code_similarity": 0.126, + "size_similarity": 0.368, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.351, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44066, + "code_similarity": 0.127, + "size_similarity": 0.389, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.325, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44072, + "code_similarity": 0.29, + "size_similarity": 0.952, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.667, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44085, + "code_similarity": 0.131, + "size_similarity": 0.406, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.329, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44129, + "code_similarity": 0.07, + "size_similarity": 0.205, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.192, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44154, + "code_similarity": 0.177, + "size_similarity": 0.594, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.389, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44722, + "code_similarity": 0.118, + "size_similarity": 0.375, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.287, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44066, + "code_similarity": 0.25, + "size_similarity": 0.947, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.404, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44072, + "code_similarity": 0.136, + "size_similarity": 0.351, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.442, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44085, + "code_similarity": 0.24, + "size_similarity": 0.906, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.394, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44129, + "code_similarity": 0.147, + "size_similarity": 0.555, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.243, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44154, + "code_similarity": 0.17, + "size_similarity": 0.62, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.306, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44722, + "code_similarity": 0.257, + "size_similarity": 0.982, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.402, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44072, + "code_similarity": 0.133, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.393, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44085, + "code_similarity": 0.763, + "size_similarity": 0.957, + "file_overlap": 0.5, + "area_overlap": 0.825, + "patch_similarity": 0.887, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ] + } + ] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44129, + "code_similarity": 0.145, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.263, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44154, + "code_similarity": 0.174, + "size_similarity": 0.654, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.286, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44722, + "code_similarity": 0.907, + "size_similarity": 0.964, + "file_overlap": 1.0, + "area_overlap": 0.808, + "patch_similarity": 0.874, + "shared_filenames": [ + "src/transformers/models/codegen/modeling_codegen.py", + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/codegen/modeling_codegen.py", + "left_ranges": [ + [ + 245, + 266 + ] + ], + "right_ranges": [ + [ + 228, + 234 + ] + ] + }, + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44085, + "code_similarity": 0.137, + "size_similarity": 0.387, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44129, + "code_similarity": 0.074, + "size_similarity": 0.195, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.231, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44154, + "code_similarity": 0.175, + "size_similarity": 0.566, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.414, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44722, + "code_similarity": 0.124, + "size_similarity": 0.357, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.347, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44129, + "code_similarity": 0.141, + "size_similarity": 0.503, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.272, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44154, + "code_similarity": 0.18, + "size_similarity": 0.684, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.289, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44722, + "code_similarity": 0.728, + "size_similarity": 0.923, + "file_overlap": 0.5, + "area_overlap": 0.791, + "patch_similarity": 0.78, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44154, + "code_similarity": 0.099, + "size_similarity": 0.344, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.199, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44722, + "code_similarity": 0.146, + "size_similarity": 0.545, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.247, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44154, + "right_pr_number": 44722, + "code_similarity": 0.164, + "size_similarity": 0.631, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.255, + "shared_filenames": [], + "shared_file_areas": [] + } + ] + }, + { + "cluster_id": "cluster-41211-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41211.", + "status": "open", + "confidence": 0.55, + "canonical_issue_number": 41211, + "canonical_pr_number": 44339, + "issue_numbers": [ + 41211 + ], + "pr_numbers": [ + 41356, + 44339 + ], + "evidence_types": [ + "closing_reference" + ], + "pr_comparisons": [ + { + "left_pr_number": 41356, + "right_pr_number": 44339, + "code_similarity": 0.155, + "size_similarity": 0.094, + "file_overlap": 0.273, + "area_overlap": 0.078, + "patch_similarity": 0.183, + "shared_filenames": [ + "docs/source/en/model_doc/deimv2.md", + "src/transformers/models/deimv2/__init__.py", + "src/transformers/models/deimv2/configuration_deimv2.py", + "src/transformers/models/deimv2/modeling_deimv2.py", + "tests/models/deimv2/__init__.py", + "tests/models/deimv2/test_modeling_deimv2.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/deimv2.md", + "left_ranges": [ + [ + 1, + 132 + ] + ], + "right_ranges": [ + [ + 1, + 65 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/__init__.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/configuration_deimv2.py", + "left_ranges": [ + [ + 1, + 74 + ] + ], + "right_ranges": [ + [ + 1, + 266 + ] + ] + }, + { + "filename": "tests/models/deimv2/test_modeling_deimv2.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 1734 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-41115-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41115.", + "status": "open", + "confidence": 0.55, + "canonical_issue_number": 41115, + "canonical_pr_number": 45613, + "issue_numbers": [ + 41115 + ], + "pr_numbers": [ + 41116, + 45613 + ], + "evidence_types": [ + "closing_reference" + ], + "pr_comparisons": [ + { + "left_pr_number": 41116, + "right_pr_number": 45613, + "code_similarity": 0.805, + "size_similarity": 0.885, + "file_overlap": 0.75, + "area_overlap": 0.856, + "patch_similarity": 0.693, + "shared_filenames": [ + "docs/source/en/_toctree.yml", + "docs/source/en/model_doc/minicpm3.md", + "src/transformers/models/auto/modeling_auto.py", + "src/transformers/models/minicpm3/__init__.py", + "src/transformers/models/minicpm3/configuration_minicpm3.py", + "src/transformers/models/minicpm3/modeling_minicpm3.py", + "src/transformers/models/minicpm3/modular_minicpm3.py", + "tests/models/minicpm3/__init__.py", + "tests/models/minicpm3/test_modeling_minicpm3.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/_toctree.yml", + "left_ranges": [ + [ + 713, + 720 + ] + ], + "right_ranges": [ + [ + 713, + 720 + ] + ] + }, + { + "filename": "docs/source/en/model_doc/minicpm3.md", + "left_ranges": [ + [ + 1, + 67 + ] + ], + "right_ranges": [ + [ + 1, + 45 + ] + ] + }, + { + "filename": "src/transformers/models/auto/modeling_auto.py", + "left_ranges": [ + [ + 285, + 291 + ], + [ + 700, + 706 + ], + [ + 1298, + 1304 + ] + ], + "right_ranges": [ + [ + 699, + 705 + ], + [ + 1300, + 1306 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/__init__.py", + "left_ranges": [ + [ + 1, + 29 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/configuration_minicpm3.py", + "left_ranges": [ + [ + 1, + 141 + ] + ], + "right_ranges": [ + [ + 1, + 126 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/modeling_minicpm3.py", + "left_ranges": [ + [ + 1, + 544 + ] + ], + "right_ranges": [ + [ + 1, + 522 + ] + ] + }, + { + "filename": "src/transformers/models/minicpm3/modular_minicpm3.py", + "left_ranges": [ + [ + 1, + 444 + ] + ], + "right_ranges": [ + [ + 1, + 342 + ] + ] + }, + { + "filename": "tests/models/minicpm3/test_modeling_minicpm3.py", + "left_ranges": [ + [ + 1, + 109 + ] + ], + "right_ranges": [ + [ + 1, + 136 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43824-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43824, + "canonical_pr_number": 43836, + "issue_numbers": [ + 43824 + ], + "pr_numbers": [ + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45081-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45081, + "canonical_pr_number": 45317, + "issue_numbers": [ + 45081 + ], + "pr_numbers": [ + 45086, + 45317 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45086, + "right_pr_number": 45317, + "code_similarity": 0.589, + "size_similarity": 0.34, + "file_overlap": 0.5, + "area_overlap": 1.0, + "patch_similarity": 0.136, + "shared_filenames": [ + "src/transformers/tokenization_utils_tokenizers.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/tokenization_utils_tokenizers.py", + "left_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ], + "right_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43698-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43698.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43698, + "canonical_pr_number": 43779, + "issue_numbers": [ + 43698 + ], + "pr_numbers": [ + 43779, + 43816 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43779, + "right_pr_number": 43816, + "code_similarity": 0.418, + "size_similarity": 0.538, + "file_overlap": 1.0, + "area_overlap": 0.02, + "patch_similarity": 0.02, + "shared_filenames": [ + "src/transformers/integrations/integration_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/integration_utils.py", + "left_ranges": [ + [ + 2227, + 2241 + ], + [ + 2303, + 2309 + ] + ], + "right_ranges": [ + [ + 2278, + 2291 + ], + [ + 2309, + 2322 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45561-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45561.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45561, + "canonical_pr_number": 45645, + "issue_numbers": [ + 45561 + ], + "pr_numbers": [ + 45639, + 45645 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45639, + "right_pr_number": 45645, + "code_similarity": 0.532, + "size_similarity": 0.619, + "file_overlap": 0.5, + "area_overlap": 0.543, + "patch_similarity": 0.451, + "shared_filenames": [ + "src/transformers/testing_utils.py", + "tests/utils/test_testing_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/testing_utils.py", + "left_ranges": [ + [ + 3529, + 3536 + ], + [ + 3753, + 3787 + ] + ], + "right_ranges": [ + [ + 3525, + 3558 + ], + [ + 3782, + 3788 + ] + ] + }, + { + "filename": "tests/utils/test_testing_utils.py", + "left_ranges": [ + [ + 1, + 86 + ] + ], + "right_ranges": [ + [ + 1, + 114 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] + } + ], + "duplicate_issues": [], + "duplicate_prs": [ + { + "cluster_id": "cluster-41115-3", + "canonical_pr_number": 45613, + "duplicate_pr_numbers": [ + 41116 + ], + "target_issue_number": 41115, + "reason": "PRs in cluster-41115-3 are treated as duplicates because they converge on issue #41115 with closing_reference evidence." + }, + { + "cluster_id": "cluster-41211-3", + "canonical_pr_number": 44339, + "duplicate_pr_numbers": [ + 41356 + ], + "target_issue_number": 41211, + "reason": "PRs in cluster-41211-3 are treated as duplicates because they converge on issue #41211 with closing_reference evidence." + }, + { + "cluster_id": "cluster-43240-3", + "canonical_pr_number": 43251, + "duplicate_pr_numbers": [ + 43254 + ], + "target_issue_number": 43240, + "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43698-3", + "canonical_pr_number": 43779, + "duplicate_pr_numbers": [ + 43816 + ], + "target_issue_number": 43698, + "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43824-3", + "canonical_pr_number": 43836, + "duplicate_pr_numbers": [ + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-43824-3 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43979-11", + "canonical_pr_number": 44007, + "duplicate_pr_numbers": [ + 43996, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "target_issue_number": 43979, + "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, + { + "cluster_id": "cluster-45081-3", + "canonical_pr_number": 45317, + "duplicate_pr_numbers": [ + 45086 + ], + "target_issue_number": 45081, + "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-45561-3", + "canonical_pr_number": 45645, + "duplicate_pr_numbers": [ + 45639 + ], + "target_issue_number": 45561, + "reason": "PRs in cluster-45561-3 are treated as duplicates because they converge on issue #45561 with closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "best_issue": { + "cluster_id": "cluster-41211-3", + "issue_number": 41211, + "reason": "Issue #41211 is the strongest global issue candidate because it is open, belongs to a cluster with 3 artifacts, and carries 6 discussion comments plus 6 inbound references.", + "score": 148.0 + }, + "best_pr": { + "cluster_id": "cluster-43979-11", + "pr_number": 44007, + "reason": "PR #44007 is the strongest global PR candidate because it is open, belongs to a cluster with 11 artifacts, links to 1 issues, and carries 2 review/discussion events.", + "score": 238.886 + } +} diff --git a/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.llm-reviews.json b/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.llm-reviews.json new file mode 100644 index 0000000000000000000000000000000000000000..320e1728d2387f1f94cf27bc794344970e112fb7 --- /dev/null +++ b/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.llm-reviews.json @@ -0,0 +1,18290 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260430T060020Z", + "generated_at": "2026-04-30T06:08:49Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "abf237f969daad485a12d4b0d204c1711bd7299e7fb440ef3dfe7c90d77acb11", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2477, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2477, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These three issues are not duplicates of one another: one is about Tapas tokenization mismatching TensorFlow, one is about a misleading auth/error message when downloading private models, and one is about failing to load bert-base-uncased. The only loosely related item is the generic model-loading report, but it still describes a different failure mode.", + "confidence": 0.93, + "canonical_issue_reason": "Issue #16618 is the broadest model-loading report, so it is the closest thing to a representative issue in this set, but it does not share the same underlying bug as the other two.", + "canonical_pr_reason": null, + "best_issue_reason": "#16618 is the best representative issue because it is the most general and user-facing model loading failure among the three, whereas #13244 and #15980 are clearly about different code paths.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Tapas tokenization mismatch is a different problem from failing to load bert-base-uncased; different model/component and different underlying bug." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "A bad error message for unauthorized private-model downloads is an auth/download UX issue, not the same bug as a model load failure for bert-base-uncased." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34634", + "issue:34928", + "issue:35545", + "issue:36010", + "issue:36331", + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:42915", + "issue:43039", + "issue:43404", + "issue:43425", + "issue:43531", + "issue:43541", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43792", + "issue:43827", + "issue:43828", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:43975", + "issue:44292", + "issue:44322", + "issue:44457", + "issue:44509", + "issue:44512", + "issue:44560", + "issue:44661", + "issue:44805", + "issue:44841", + "issue:44863", + "issue:44918", + "issue:45070", + "issue:45081", + "issue:45084", + "issue:45161", + "issue:45237", + "issue:45362", + "issue:45464", + "issue:45507", + "issue:45685" + ], + "soft_pairs": [ + "issue:44322|issue:45464", + "issue:45237|issue:45685", + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:45237|issue:45507", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:35545|issue:36010", + "issue:43638|issue:44805", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918", + "issue:44661|issue:44805", + "issue:34634|issue:35545", + "issue:30277|issue:43638", + "issue:43596|issue:43638", + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:43716|issue:45507", + "issue:39290|issue:45081", + "issue:43828|issue:45161", + "issue:43425|issue:44292", + "issue:44918|issue:45161", + "issue:34928|issue:41720", + "issue:43716|issue:45685", + "issue:43541|issue:43716", + "issue:45507|issue:45685", + "issue:43828|issue:44292", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:43039|issue:44918", + "issue:41762|issue:43638", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43828|issue:45685", + "issue:44918|issue:45685", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805", + "issue:34928|issue:36010", + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:39290|issue:41720", + "issue:43866|issue:44863", + "issue:43039|issue:44560", + "issue:43854|issue:43866", + "issue:41093|issue:43638", + "issue:39290|issue:43531", + "issue:43425|issue:45070", + "issue:45362|issue:45464", + "issue:43404|issue:43866", + "issue:43792|issue:45070", + "issue:45464|issue:45685", + "issue:43541|issue:45685", + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762" + ], + "prepared_review_unit_hash": "33747c85a35b0f0d69b854d1c03460296fbb9dd31fbe61633ce71156d5e2309b", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 67, + "serialized_chars": 47117, + "estimated_input_tokens": 11780, + "estimated_eval_tokens": 23816 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous, with one clearly duplicate issue pair and many soft-similarity matches that are only broad theme overlaps. The best representative issue is a generic index/shape-error report, but this is not a clean single-topic duplicate cluster.", + "confidence": 0.64, + "canonical_issue_reason": "issue:44805 is the broadest representative of the repeated index/shape-error family in this cluster, with the most nearby duplicate-like reports; the rest are mostly model-specific variants or unrelated bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44805 is the best single issue to keep as the cluster anchor because it captures the most reusable error pattern while still being specific enough to point at the likely code path.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44322", + "right": "issue:45464", + "accept": false, + "reason": "Different failures: missing config attribute vs streaming chat/completions runtime issue; not the same bug." + }, + { + "left": "issue:45237", + "right": "issue:45685", + "accept": false, + "reason": "Different backends and error types (AMD GPU runtime vs MPS histogram op); same broad hardware theme only." + }, + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Different model paths and symptoms: dtype mismatch in image preprocessing vs AMD GPU execution failure." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "Both are model/backend errors, but one is autocast dtype mismatch and the other is AMD GPU incompatibility; not one bug." + }, + { + "left": "issue:45237", + "right": "issue:45507", + "accept": false, + "reason": "Different model families and failures; no shared concrete code-path fix." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Distinct model/runtime problems; too broad to treat as duplicates." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "DeepSpeed collective mismatch vs ONNX export error are unrelated." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "Both are model-specific errors, but onnx export and FP8 runtime are different code paths." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "Different failures: ONNX export vs auto device-mapping cuda assert." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are v5 docs cleanups, but they target different removed references and would not be one concrete fix." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "ONNX export error and GenerationMixin import error are unrelated." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Same general zero3/index-error family, but these reports are about different triggers and model setups; not enough to merge confidently." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "Same broad error shape, but BertModel ZeRO-3 loading and mask-shape mismatch are not the same concrete bug." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "Both mention tensor-shape style issues, but one is a mask mismatch and the other is GPT-OSS tensor-parallel behavior." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Different failures: mask shape mismatch vs Qwen3.5 embedding unpacking with TRL." + }, + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Tokenizer mapping add-model bug and mask/index shape error are unrelated." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "BarkProcessor voice_preset issue is unrelated to ModernBERT ONNX export." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "Different DeepSpeed-related bugs; one is collective mismatch, the other is ZeRO-3 index-out-of-bounds on Bert." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "These describe the same ZeRO-3 index-out-of-bounds loading bug, with only the Bert variant differing." + }, + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Different problems: detokenization bug vs LoRA merge/save/load result mismatch." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Liger Kernel cross-entropy call and GPT-OSS tensor-parallel issue are different code paths." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "NVFP4 runtime failure and LoRA merge/save mismatch are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Activation checkpointing/FSDP recomputation mismatch is a different bug from the mask/index shape errors." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Both involve DeepSpeed, but the concrete failures differ: 2-D weight runtime error vs collective mismatch." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "ZeRO-3 Bert loading and GPT-OSS TP behavior are different issues." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "FSDP activation-checkpointing mismatch and CustomTrainer argument change are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "Different models and error classes; no common bug." + }, + { + "left": "issue:43716", + "right": "issue:45507", + "accept": false, + "reason": "Different model-specific failures: dtype mismatch vs invalid method call in GraniteMoEHybrid." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Different model/API failures: Gemma3 config attribute vs tokenizer regex patch crash." + }, + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Autocast dtype mismatch and GPT-OSS MoE tensor-parallel failure are not the same bug." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "TRL embedding unpacking and GPT-OSS tensor-parallel failure are unrelated." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "Activation checkpointing/FSDP mismatch and CUDA assert on device mapping are different problems." + }, + { + "left": "issue:43716", + "right": "issue:45685", + "accept": false, + "reason": "Different backends and symptoms: image-preprocessor dtype mismatch vs MPS histogram not implemented." + }, + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Torch dynamo grouped_mm tracing error and image-preprocessor dtype mismatch are unrelated." + }, + { + "left": "issue:45507", + "right": "issue:45685", + "accept": false, + "reason": "Invalid-method bug in GraniteMoEHybrid and MPS Int histogram failure are different code paths." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "Autocast dtype mismatch and NVFP4 model runtime failure do not share a concrete fix." + }, + { + "left": "issue:43716", + "right": "issue:44292", + "accept": false, + "reason": "Different model/backend failures; only a broad runtime-error similarity." + }, + { + "left": "issue:44292", + "right": "issue:44918", + "accept": false, + "reason": "Different model-specific runtime errors with no shared bug." + }, + { + "left": "issue:43039", + "right": "issue:44918", + "accept": false, + "reason": "Liger Kernel cross-entropy call and TRL embedding unpacking are different issues." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": false, + "reason": "Both are ZeRO-3 loading errors, but different models and not enough evidence for the same root bug." + }, + { + "left": "issue:43827", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs-related, but one is pipeline-task removal and the other is the removed run command; not one change." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Different model-specific bugs: Mistral-3 dtype mismatch vs Qwen3-vl video StopIteration." + }, + { + "left": "issue:43828", + "right": "issue:45685", + "accept": false, + "reason": "Different runtime backends and symptoms; not the same bug." + }, + { + "left": "issue:44918", + "right": "issue:45685", + "accept": false, + "reason": "TRL unpacking bug and MPS Int histogram limitation are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Both touch training/runtime behavior, but cross-entropy kernel routing and autocast mismatch are different defects." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch and trainer signature mismatch are unrelated." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Liger Kernel cross-entropy issue does not match the index/mask shape error family." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "Activation checkpointing mismatch and GenerationMixin import failure are unrelated." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Same broad ZeRO-3 theme, but different models and likely different underlying causes." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Docs mentioning return_all_scores and docs mentioning run command removal are separate documentation issues." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Gemma3 sliding-window attribute error and Qwen3 auto device mapping CUDA assert are unrelated." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Corrupted checkpoint vs NemotronH load implementation bug are different problems." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross-entropy routing and Qwen3-vl video StopIteration are unrelated." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "Model-loading test failure and corrupted checkpoint are different issues." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Both are index-shape style errors, but the masked-tensor mismatch and ZeRO-3 Bert loading failure are not the same bug." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Different Qwen/Gemma sliding-window and configuration issues." + }, + { + "left": "issue:43425", + "right": "issue:45070", + "accept": false, + "reason": "Torch version incompatibility and pydantic PretrainedConfig breakage are unrelated." + }, + { + "left": "issue:45362", + "right": "issue:45464", + "accept": false, + "reason": "Qwen3.5 chat crash and streaming chat/completions failure are not the same concrete bug." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Mistral3 weight-tying bug and corrupted checkpoint are unrelated." + }, + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Whisper model load failure and pydantic config breakage are unrelated." + }, + { + "left": "issue:45464", + "right": "issue:45685", + "accept": false, + "reason": "Streaming inference failure and MPS Int histogram backend limitation are different issues." + }, + { + "left": "issue:43541", + "right": "issue:45685", + "accept": false, + "reason": "Torch dynamo tracing failure and MPS histogram backend limitation are unrelated." + }, + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Grouped_mm tracing error and autocast dtype mismatch are different bugs." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "GenerationMixin import failure and Gemma3 sliding-window attribute error are unrelated." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Processor failure for Voxtral and template-node compile error are different problems." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Both are tensor shape/index errors, but they involve different model-loading paths and are not clearly the same bug." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The evaluation is grounded in the packet and stays conservative. The cluster is indeed heterogeneous, the single accepted near-duplicate pair (issue:43596 / issue:43638) is well-supported by the shared ZeRO-3 index-out-of-bounds title pattern, and the other soft-edge rejections do not overstate similarity. The choice of issue:44805 as a broad index/shape-error anchor is plausible and not exaggerated." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 2, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:39186", + "issue:39290", + "issue:39692", + "issue:40990", + "issue:41093", + "issue:41553", + "issue:42915", + "issue:43054", + "issue:43296", + "issue:43366", + "issue:43404", + "issue:43531", + "issue:43541", + "issue:43572", + "issue:43596", + "issue:43644", + "issue:43645", + "issue:43716", + "issue:43742", + "issue:43828", + "issue:43866", + "issue:43873", + "issue:43883", + "issue:43950", + "issue:43975", + "issue:43994", + "issue:44291", + "issue:44292", + "issue:44360", + "issue:44387", + "issue:44403", + "issue:44492", + "issue:44512", + "issue:44530", + "issue:44534", + "issue:44589", + "issue:44863", + "issue:44918", + "issue:44928", + "issue:44960", + "issue:45005", + "issue:45070", + "issue:45161", + "issue:45399", + "issue:45464", + "issue:45507", + "issue:45632", + "issue:45698" + ], + "soft_pairs": [ + "issue:43296|issue:43366", + "issue:43572|issue:45070", + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:43828|issue:45464", + "issue:44291|issue:45070", + "issue:43366|issue:45464", + "issue:43531|issue:45070", + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:41093|issue:43596", + "issue:43541|issue:44589", + "issue:45632|issue:45698", + "issue:40990|issue:44960", + "issue:43716|issue:44918", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005", + "issue:44360|issue:44512", + "issue:42915|issue:45399", + "issue:43873|issue:45005", + "issue:39186|issue:44918", + "issue:44492|issue:44512", + "issue:44918|issue:45507", + "issue:43975|issue:44292", + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:44292|issue:45507", + "issue:39692|issue:43054", + "issue:41553|issue:43883", + "issue:43828|issue:44928", + "issue:43866|issue:44403" + ], + "prepared_review_unit_hash": "dbac255fda6533e7aa51ea4b9b8f9a2c901579a27a6620d9d550667541270fcc", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37180, + "estimated_input_tokens": 9295, + "estimated_eval_tokens": 18846 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous and does not form one duplicate set. The only clear duplicate mini-cluster is the Transformers v5 non-persistent-buffer corruption regression.", + "confidence": 0.92, + "canonical_issue_reason": "issue:44534 is the broadest and clearest statement of the non-persistent-buffer regression, and it matches the other buffer-corruption reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44534 is the best anchor for triage because it most directly names the underlying v5 buffer-corruption bug and generalizes the symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "Different problems: PaddleOCR-VL load failure vs gpt-oss GGUF support request." + }, + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "Unrelated regressions: missing StableLmConfig field vs pydantic PretrainedConfig breakage." + }, + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "OOM/memory-growth bug vs tied-weights regression; not the same code path." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Grouped_mm tracing crash vs GPT-OSS tensor-parallel failure; distinct causes." + }, + { + "left": "issue:43828", + "right": "issue:45464", + "accept": false, + "reason": "Autocast dtype mismatch in Phi MoE vs streaming chat/completions failure in Qwen3.5." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "`init_empty_weights` argument error vs pydantic model-field regression; unrelated." + }, + { + "left": "issue:43366", + "right": "issue:45464", + "accept": false, + "reason": "General support request vs a specific streaming inference bug." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "Qwen3-MoE sliding-window bug vs pydantic field breakage; no shared underlying defect." + }, + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Gemma3 sliding_window_pattern attribute issue vs MobileLLM load key error; different models and failures." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both mention SigLIP2, but one is embedding-quality degradation and the other is bad pipeline/AutoModel outputs; too vague to merge." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Mask length mismatch vs zero3/BertModel index error; different runtime failures." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Torch-dynamo grouped_mm crash vs missing Float8 storage object; unrelated." + }, + { + "left": "issue:45632", + "right": "issue:45698", + "accept": false, + "reason": "Cache-path collision for local trust_remote_code models vs loading the wrong custom module after save_pretrained; similar area but not the same bug." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "High perplexity on gpt-oss-20b vs GLM5 model title; no concrete overlap." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Image preprocessor dtype mismatch vs Qwen3.5 embedding unpacking in TRL SFT; different failure modes." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "Specific Mistral3 tie-weights bug vs broader translation-model tied-weights regression; too broad to treat as one issue." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Notebook custom-model initialization breakage vs flash-attn fallback selection problem; unrelated." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash vs tied-weights regression; different subsystems." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint loading failure vs tied-weights issue; not the same bug." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "DSA indexer ReLU discussion vs docs command removal typo; unrelated." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure vs flash-attn fallback blocking; distinct problems." + }, + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Quantization/offloading behavior vs tied-weights regression; no shared concrete code path." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP 2-D weight runtime error vs Qwen3.5 SFT embedding unpacking; unrelated." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "Cache-strategy typo vs stale docs command mention; not duplicates." + }, + { + "left": "issue:44918", + "right": "issue:45507", + "accept": false, + "reason": "TRL SFT embedding issue vs GraniteMoE invalid-method call; unrelated." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "Detokenization bug vs Qwen NVFP4 runtime error; different symptoms and likely causes." + }, + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Same regression: non-persistent buffers are corrupted during `from_pretrained`; 44534 is the broader formulation." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Both report the same v5 non-persistent-buffer corruption/junk-filled buffer bug." + }, + { + "left": "issue:44292", + "right": "issue:45507", + "accept": false, + "reason": "Qwen NVFP4 execution failure vs GraniteMoE invalid method; unrelated." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "SigLIP2 docs example/quantization problems vs poor SigLIP2 text embeddings; not the same concrete bug." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Bad AutoTokenizer error for Voxtral vs missing `all_tied_weights_keys` on Molmo; unrelated." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Autocast dtype mismatch in Phi MoE vs Qwen3.5 RLHF NaN from 3D position_ids; different root causes." + }, + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "Corrupted checkpoint report vs loading-noise complaint; not the same bug." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The assessment is grounded in the packet and stays conservative on the soft edges. The only clear duplicate mini-cluster is indeed the v5 non-persistent-buffer corruption set around 43644/43950/44534, and the rejected pairings are mostly well-justified as distinct bugs or support requests. The cluster summary does not overclaim broader duplication." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 3, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36331", + "issue:38175", + "issue:41553", + "issue:41762", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43054", + "issue:43257", + "issue:43404", + "issue:43493", + "issue:43643", + "issue:43742", + "issue:43782", + "issue:43792", + "issue:43824", + "issue:43827", + "issue:43854", + "issue:43856", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:43940", + "issue:43950", + "issue:43975", + "issue:44220", + "issue:44291", + "issue:44368", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44509", + "issue:44589", + "issue:44661", + "issue:44863", + "issue:44960", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45237", + "issue:45313", + "issue:45356", + "issue:45399", + "issue:45520", + "issue:45701" + ], + "soft_pairs": [ + "issue:36331|issue:41762", + "issue:41553|issue:43881", + "issue:43872|issue:44589", + "issue:43054|issue:45701", + "issue:38175|issue:43054", + "issue:42947|issue:44387", + "issue:43054|issue:43493", + "issue:43881|issue:43883", + "issue:44403|issue:44451", + "issue:43975|issue:45520", + "issue:43643|issue:45020", + "issue:43975|issue:45237", + "issue:44403|issue:44488", + "issue:42947|issue:43856", + "issue:43782|issue:44960", + "issue:42491|issue:44368", + "issue:41762|issue:42915", + "issue:43975|issue:44451", + "issue:45020|issue:45356", + "issue:44661|issue:45020", + "issue:43824|issue:43872", + "issue:44991|issue:45399", + "issue:43827|issue:44509", + "issue:45081|issue:45399", + "issue:43792|issue:44220", + "issue:43404|issue:43950", + "issue:43257|issue:43866", + "issue:41553|issue:44991", + "issue:43742|issue:45399", + "issue:42915|issue:45020", + "issue:43742|issue:45020", + "issue:43940|issue:45313", + "issue:43872|issue:44291", + "issue:41553|issue:44488", + "issue:43854|issue:44863", + "issue:42915|issue:43872" + ], + "prepared_review_unit_hash": "a29ee64e71a379dbd30c8d96af11219ce02cd1a36dd1e86edb4c585a80ad79ed", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 36, + "serialized_chars": 37865, + "estimated_input_tokens": 9467, + "estimated_eval_tokens": 19190 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab-bag of distinct Transformers regressions, not one tight duplicate set. Only a few pairs look like true duplicates: the docs/pipeline pair, the SigLIP2 discrepancy pair, and one Qwen3 ZeRO-3/loading pair.", + "confidence": 0.74, + "canonical_issue_reason": "Issue 45020 is the best umbrella-style issue here: it is open, broad, and captures the general class of recent-version model-loading regressions better than the more narrowly scoped reports.", + "canonical_pr_reason": null, + "best_issue_reason": "45020 is the most useful triage hub among these because it is broad enough to subsume several remote-code/loading breakages, while the other issues are mostly model-specific edge cases.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "Different failure surfaces: trainer custom loss signature vs Gemma3 ZeRO-3 loading IndexError." + }, + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Both are loading failures, but for different models and different bug classes." + }, + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Both are compatibility/type errors, but the underlying causes differ (_is_hf_initialized vs Float8 storage lookup)." + }, + { + "left": "issue:43054", + "right": "issue:45701", + "accept": false, + "reason": "SigLIP2 embedding quality regression vs tokenization version-change report; not the same bug." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": false, + "reason": "Same model family, but one is zero probabilities and the other is worse text embeddings; too little to treat as the same defect." + }, + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both mention memory behavior, but checkpointing/LoRA and int4 OOM are different code paths." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": true, + "reason": "Both point to a SigLIP2 implementation mismatch/discrepancy affecting outputs; plausible same underlying model bug." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different models and different missing-attribute/loading failures." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "Generic loading noise vs a specific model load failure; not the same issue." + }, + { + "left": "issue:43975", + "right": "issue:45520", + "accept": false, + "reason": "Tokenizer detokenization bug vs flash-attn import/KeyError on Python 3.13." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "Remote-code handling is related, but this is a specific missing-fields AutoConfig bug, not clearly the same defect as the broad umbrella report." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "Unrelated: tokenizer detokenization vs AMD GPU execution failure." + }, + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "One is logging noise, the other is a model load failure for a specific checkpoint." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Different training-memory regressions with different mechanisms and models." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different model families and different failure modes." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "Both touch Qwen3/LoRA, but one is a hf4.x/hf5.x checkpoint incompatibility and the other is a tie_word_embeddings warning." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Both involve ZeRO-3/loading, but the models and reported failures are not the same concrete bug." + }, + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "Different tokenizer/model-specific issues." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Broad remote_code breakage vs a Kimi tokenizer regression with codec handling and warning text; not clearly identical." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "Model registration edge case vs broad remote_code regressions; too different to merge as duplicates." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "Import/export issue for a missing class vs bitsandbytes initializer incompatibility." + }, + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer loading regression vs flash-attn fallback gating; unrelated." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Both are the same docs regression: pipeline task references that remained after v5 removals." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer regex crash vs flash-attn fallback gating; unrelated subsystems." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "Possibly adjacent Whisper/audio code, but not enough evidence that they are the same bug." + }, + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "Weight tying in Mistral3 vs non-persistent buffer corruption; different defects." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "Different Qwen3 MoE and Ovis2 issues with no shared concrete failure." + }, + { + "left": "issue:41553", + "right": "issue:44991", + "accept": false, + "reason": "Different model-loading failures with different root causes." + }, + { + "left": "issue:43742", + "right": "issue:45399", + "accept": false, + "reason": "Different subsystems: MobileLLM config/key error vs flash-attn fallback gating." + }, + { + "left": "issue:42915", + "right": "issue:45020", + "accept": false, + "reason": "FineGrainedFP8Config failure is not the same as the broad remote_code regression report." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "Specific MobileLLM load failure vs generic remote_code breakage." + }, + { + "left": "issue:43940", + "right": "issue:45313", + "accept": true, + "reason": "Both report Qwen3-family ZeRO-3 weight loading failures with the same missing-weights symptom and loader path." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": true, + "reason": "These are the same unexpected `_is_hf_initialized` argument bug during model loading/init_empty_weights." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative overall. The accepted duplicate calls are reasonably supported by matching symptoms or the same specific regression class (docs pipeline removal, `_is_hf_initialized`, ZeRO-3 weight loading, SigLIP2 output discrepancy). The rejected pairs are also framed cautiously and do not overclaim sameness from superficial similarity alone." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 4, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:41093", + "issue:42491", + "issue:42617", + "issue:42915", + "issue:43257", + "issue:43317", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43596", + "issue:43638", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43720", + "issue:43824", + "issue:43828", + "issue:43872", + "issue:43950", + "issue:43957", + "issue:43994", + "issue:44155", + "issue:44479", + "issue:44530", + "issue:44534", + "issue:44560", + "issue:44589", + "issue:44661", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44910", + "issue:44918", + "issue:44928", + "issue:45030", + "issue:45305", + "issue:45325", + "issue:45538", + "issue:45584", + "issue:45588", + "issue:45685" + ], + "soft_pairs": [ + "issue:44928|issue:45588", + "issue:43257|issue:45305", + "issue:36331|issue:41093", + "issue:41093|issue:44805", + "issue:43872|issue:45538", + "issue:44479|issue:44560", + "issue:44530|issue:44863", + "issue:24643|issue:30064", + "issue:43366|issue:43828", + "issue:44155|issue:45584", + "issue:43366|issue:43531", + "issue:43550|issue:44910", + "issue:44560|issue:45685", + "issue:43645|issue:43646", + "issue:43950|issue:45325", + "issue:43824|issue:43957", + "issue:42915|issue:43957", + "issue:43366|issue:44589", + "issue:44560|issue:44918", + "issue:42617|issue:43366", + "issue:38175|issue:43994", + "issue:43646|issue:43950", + "issue:36010|issue:42915", + "issue:44560|issue:44805", + "issue:44877|issue:45030", + "issue:43638|issue:44661", + "issue:43596|issue:44661", + "issue:43317|issue:44756", + "issue:43644|issue:44534", + "issue:38175|issue:39692", + "issue:42491|issue:43720" + ], + "prepared_review_unit_hash": "b17b5455cc32a76c0855dc6b3026e9587863bab5c095e1a0d623f4e18c9ab0ef", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37221, + "estimated_input_tokens": 9306, + "estimated_eval_tokens": 18868 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a noisy mix of unrelated Transformers issues; only a few soft pairs look like true duplicates of the same underlying bug/regression. The clearest repeated theme is the non-persistent-buffer corruption regression in v5.", + "confidence": 0.58, + "canonical_issue_reason": "Issue 44534 is the clearest representative of the non-persistent-buffer regression: concise, specific, and matching the same bug described in 43644.", + "canonical_pr_reason": null, + "best_issue_reason": "44534 is the best single issue to anchor the shared buffer-corruption regression subgroup because it states the core problem directly and broadly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44928", + "right": "issue:45588", + "accept": false, + "reason": "Different failures: 3D position_ids/BF16 SDPA fallback vs an s_aux=None AttributeError in flash_attention.py." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Both involve Qwen3/MoE and DeepSpeed, but they describe different bugs: loading/conversion vs gradient averaging." + }, + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "Different code paths and symptoms: custom trainer API mismatch vs a tensor/mask shape mismatch." + }, + { + "left": "issue:41093", + "right": "issue:44805", + "accept": false, + "reason": "Same generic IndexError shape template, but no evidence they stem from the same model or code path." + }, + { + "left": "issue:43872", + "right": "issue:45538", + "accept": false, + "reason": "Unrelated: bitsandbytes init incompatibility vs a CLIPTokenizer model_max_length issue." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": true, + "reason": "Both report the same 5.3.0 Qwen video-input regression/StopIteration class of failure across related model variants." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "Different bugs: Qwen3.5 PagedAttentionCache group-type crash vs NemotronH checkpoint loading." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "Completely different subsystems and errors: DeepSpeed training weight shape vs segmentation-map processing." + }, + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "Different issues: GGUF architecture support vs a dtype mismatch under autocast." + }, + { + "left": "issue:44155", + "right": "issue:45584", + "accept": false, + "reason": "Different domains and failures: batched audio track leakage vs empty-transcription generation after align_special_tokens." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "Both touch Qwen3/MoE-adjacent areas, but one is GGUF architecture support and the other is a sliding_window bug." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different concrete failures: torch.compile/SDPA on Bamba vs Qwen3.5 flash-attention crashes from 3D position_ids handling." + }, + { + "left": "issue:44560", + "right": "issue:45685", + "accept": false, + "reason": "Unrelated: Qwen video StopIteration regression vs an MPS histogram op not implemented for Int." + }, + { + "left": "issue:43645", + "right": "issue:43646", + "accept": true, + "reason": "These describe the same custom-model initialization regression; 43645 is just the Jupyter notebook repro variant." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Different regressions: non-persistent buffer corruption vs Qwen2.5-VL temporal position_ids scaling." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Different loading failures: missing Qwen2.5-VL class import vs meta-device loading breakage." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Different bugs: Qwen3Moe FineGrainedFP8Config failure vs meta-device model-loading breakage." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "No clear overlap beyond general loading/runtime issues; gpt-oss GGUF support is unrelated to Float8 storage lookup." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "Related model family, but the failures are different: video StopIteration vs embedding unpacking with TRL SFT trainer." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "Different problems: 3d_parallel.py launch failure vs GGUF gpt-oss architecture support." + }, + { + "left": "issue:38175", + "right": "issue:43994", + "accept": false, + "reason": "Same model family, but the reports point to different symptoms and likely different root causes." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": false, + "reason": "Custom model initialization break vs non-persistent buffer corruption are separate regressions." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "Different bugs: missing GenerationMixin import vs Qwen3Moe FP8 failure." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Different errors: video StopIteration vs a mask/tensor shape mismatch." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": false, + "reason": "Both are config-related, but the model families and failure modes are too different to treat as one bug." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different failures: DeepSpeed ZeRO3 Bert index error vs add-new-model-like/tokenizer-mapping behavior." + }, + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "Different bugs: ZeRO3 Bert zero-size tensor index error vs tokenizer mapping interaction." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "Different resource/loading issues: gpu+cpu offload dequantization vs mmap OOM on Strix Halo." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": true, + "reason": "Same underlying v5 regression corrupting non-persistent buffers; titles are effectively two phrasings of the same bug." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "Shared SigLIP2 context, but one is zero probabilities while the other is a documentation example with multiple errors." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "Different incompatibilities: Qwen3 MoE LoRA/hf4.x vs bitsandbytes Int8Params kwargs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative overall. The three accepted pairs are plausibly same-bug groupings from the titles alone, and the rejected pairs are mostly justified by different model families, subsystems, or error modes. No clear overstatement stands out." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 5, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:39692", + "issue:41720", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43257", + "issue:43335", + "issue:43404", + "issue:43425", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43708", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44075", + "issue:44155", + "issue:44164", + "issue:44291", + "issue:44387", + "issue:44402", + "issue:44451", + "issue:44488", + "issue:44661", + "issue:44683", + "issue:44756", + "issue:44910", + "issue:44960", + "issue:44977", + "issue:44991", + "issue:45216", + "issue:45276", + "issue:45292", + "issue:45313", + "issue:45335", + "issue:45357", + "issue:45362", + "issue:45406", + "issue:45588", + "issue:45663", + "issue:45704" + ], + "soft_pairs": [ + "issue:45313|issue:45357", + "issue:44960|issue:44977", + "issue:44451|issue:44488", + "issue:43854|issue:43931", + "issue:43708|issue:44075", + "issue:42491|issue:45276", + "issue:41720|issue:42915", + "issue:43856|issue:43873", + "issue:39692|issue:43994", + "issue:43257|issue:43854", + "issue:44488|issue:44991", + "issue:42915|issue:44756", + "issue:42491|issue:43931", + "issue:45362|issue:45406", + "issue:44661|issue:44683", + "issue:43644|issue:43645", + "issue:43856|issue:44387", + "issue:44910|issue:45588", + "issue:42491|issue:45292", + "issue:43404|issue:43454", + "issue:45276|issue:45335", + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:44402|issue:44488", + "issue:41720|issue:44155", + "issue:39692|issue:44960", + "issue:43335|issue:45663", + "issue:43425|issue:43957", + "issue:42947|issue:45704", + "issue:43873|issue:44387", + "issue:44387|issue:44756", + "issue:43854|issue:45216", + "issue:44291|issue:44756" + ], + "prepared_review_unit_hash": "202d00b8dbabf16f6f4dcf935d784cfba66be1b94cfe284fe0e84e55e7423e26", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 33, + "serialized_chars": 37000, + "estimated_input_tokens": 9250, + "estimated_eval_tokens": 18756 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous Transformers bug reports with no single cluster-wide duplicate. I found two strong near-duplicate issue pairs around model weight tying and embedding resizing, but the rest are separate regressions across different models/features.", + "confidence": 0.63, + "canonical_issue_reason": "Issue 45276 is the best canonical anchor: it states the broader `resize_token_embeddings`/output-embedding regression, while 45335 is a narrower model-specific manifestation.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45276 is the strongest general representative of the duplicate-like subset because its symptom is model-agnostic and likely shared across variants.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Same model family, but different code paths: one is ZeRO-3 loading of `language_model` weights, the other is incorrect visual-encoder keys during `save_pretrained`." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Both involve flash-attention and generation, but they affect different models and describe different failures; not the same concrete bug." + }, + { + "left": "issue:44451", + "right": "issue:44488", + "accept": false, + "reason": "Both are model-loading failures for different checkpoints, but the titles do not show a shared root cause or code path." + }, + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Different failure modes: unit-test loading for GLM-4.7 versus a shape-mismatch load error for Qwen3-VL." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Unrelated bugs: Trainer checkpoint step calculation versus SGD optimizer arguments being ignored." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Qwen3 MoE LoRA compatibility across hf4.x/hf5.x is unrelated to the resize-token-embeddings regression." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Separate issues: CUDA assert with auto device mapping versus FineGrainedFP8Config failure." + }, + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "Both mention memory/offloading, but one is training memory usage and the other is quantization/offload behavior; not the same bug." + }, + { + "left": "issue:39692", + "right": "issue:43994", + "accept": false, + "reason": "Same model family, but one is a docs/example + quantization problem and the other is a runtime AutoModel/pipeline correctness issue." + }, + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Different problems: Qwen3 MoE weight conversion with accelerate/deepspeed versus GLM-4.7 load failure." + }, + { + "left": "issue:44488", + "right": "issue:44991", + "accept": false, + "reason": "Both fail to load models, but they involve different checkpoints and no shared concrete loading bug is shown." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "Qwen3 MoE FP8 training failure and Strix Halo mmap OOM are unrelated." + }, + { + "left": "issue:42491", + "right": "issue:43931", + "accept": false, + "reason": "Different model-specific load/training bugs with no evidence of a common root cause." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "A chat crash in Qwen3.5 is not the same as Gemma4Processor missing `_tokenizer` in `transformers serve`." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Tokenizer-mapping registration bug versus compiled flex_attention incompatibility; unrelated code paths." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "Both are Transformers 5.0 regressions, but one is about non-persistent buffers and the other about custom model initialization in notebooks." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "Both relate to memory, but one is Qwen3 MoE training efficiency and the other is int4 quantization reserved-memory growth leading to OOM." + }, + { + "left": "issue:44910", + "right": "issue:45588", + "accept": false, + "reason": "Different flash-attention failures: misinterpreted 3D position_ids versus `s_aux=None` on sink-less models." + }, + { + "left": "issue:42491", + "right": "issue:45292", + "accept": false, + "reason": "Qwen3 MoE LoRA compatibility is unrelated to the generic `resize_token_embeddings`/output_embeddings bug." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": true, + "reason": "Both report the same underlying weight-tying regression: `lm_head` is not tied correctly, causing broken generation across model variants." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": true, + "reason": "Same concrete bug: `resize_token_embeddings` does not propagate to the output/decoder embeddings, with 45335 being a model-specific instance." + }, + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "Importing `GenerationMixin` and Whisper `return_language` pipeline behavior are unrelated regressions." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "Different API breakages: `extra_state` handling in save/load versus an unexpected `_is_hf_initialized` argument in `init_empty_weights`." + }, + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Tokenizer vocab-size mismatch and a separate model-loading failure are not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "CUDA assert in Qwen3 auto device mapping is unrelated to AudioFlamingo3 track-to-track embedding/token leakage." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "Different models and different failure modes; one is a SigLIP2 docs/runtime issue, the other is a flash-attention generation bug." + }, + { + "left": "issue:43335", + "right": "issue:45663", + "accept": false, + "reason": "Sparse-layer construction in SwitchTransformers and Gemma-4 FSDP2 shared_kv_states rebuilding are separate issues." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Torch 2.10 incompatibility and meta-device loading breakage are distinct regressions." + }, + { + "left": "issue:42947", + "right": "issue:45704", + "accept": false, + "reason": "Gradient checkpointing with PEFT LoRA and an apex RMSNorm memory leak are unrelated." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Both mention quantization/memory, but one is offloading behavior and the other is increased reserved CUDA memory under int4 quantization." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "OOM from int4 reserved-memory growth is different from disabling mmap on Strix Halo to avoid OOM." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "GLM-4.7 unit-test loading failure is unrelated to Qwen3.5 save_pretrained checkpoint corruption." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "Unexpected `_is_hf_initialized` in meta initialization is unrelated to Strix Halo mmap OOM." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Grounded overall. The canonical issue choice and the two accepted soft pairs are conservative and supported by the titles: 45276/45335 are clearly the same resize_token_embeddings/output_embeddings regression, and 43404/43454 both describe lm_head weight-tying failures with similar symptoms. The remaining pairwise rejections are appropriately cautious, and the cluster summary correctly frames the rest as heterogeneous regressions rather than one duplicate family." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 6, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:39404", + "issue:42947", + "issue:43257", + "issue:43299", + "issue:43381", + "issue:43425", + "issue:43611", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43716", + "issue:43828", + "issue:43856", + "issue:43901", + "issue:43906", + "issue:43931", + "issue:43957", + "issue:43994", + "issue:44079", + "issue:44164", + "issue:44265", + "issue:44292", + "issue:44402", + "issue:44451", + "issue:44479", + "issue:44509", + "issue:44530", + "issue:44589", + "issue:44671", + "issue:44743", + "issue:44756", + "issue:44805", + "issue:44863", + "issue:44877", + "issue:44898", + "issue:44912", + "issue:44928", + "issue:44960", + "issue:44991", + "issue:45084", + "issue:45276", + "issue:45292", + "issue:45335", + "issue:45588", + "issue:45663", + "issue:45684", + "issue:45698", + "issue:45701" + ], + "soft_pairs": [ + "issue:44292|issue:44912", + "issue:45684|issue:45698", + "issue:44743|issue:44960", + "issue:45276|issue:45292", + "issue:43957|issue:44756", + "issue:42947|issue:43381", + "issue:43381|issue:44928", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:45588|issue:45663", + "issue:44479|issue:44877", + "issue:44402|issue:44451", + "issue:45292|issue:45335", + "issue:43645|issue:44164", + "issue:43257|issue:44863", + "issue:43828|issue:43957", + "issue:43425|issue:43828", + "issue:44589|issue:44756", + "issue:39404|issue:45701", + "issue:44479|issue:45663", + "issue:43906|issue:44530", + "issue:43931|issue:44863", + "issue:43611|issue:43645", + "issue:43901|issue:44509", + "issue:43425|issue:43716", + "issue:44530|issue:45084", + "issue:39404|issue:44912", + "issue:43299|issue:43931", + "issue:44863|issue:45663", + "issue:44479|issue:44805", + "issue:43257|issue:45663", + "issue:44265|issue:44898", + "issue:43994|issue:44079", + "issue:44671|issue:44991", + "issue:43856|issue:44756", + "issue:39404|issue:44671" + ], + "prepared_review_unit_hash": "a689f282053f7d16620b28985709eb38ab5b58ccf675ea4e9caa7f9c379ee8c9", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38394, + "estimated_input_tokens": 9599, + "estimated_eval_tokens": 19454 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a heterogeneous set of Transformers v5 regressions around loading, custom-model plumbing, tokenizer/config behavior, and model-specific runtime bugs. The only clear duplicate chain is the resize_token_embeddings trio; most other soft pairs are too broad or target different code paths.", + "confidence": 0.67, + "canonical_issue_reason": "Issue 43611 is the broadest representative of the cluster\u2019s main theme: a Transformers v5 model-loading regression affecting custom-model behavior via `base_model_prefix`. It is more central than the narrower, model-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "43611 is the best single triage anchor in this set because it is a general v5 loading regression with broad applicability, whereas most other issues are narrower one-off model or runtime failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Both are quantization/loading failures, but they involve different quantization formats and different failure modes; not the same bug." + }, + { + "left": "issue:45684", + "right": "issue:45698", + "accept": false, + "reason": "Both involve custom-model save/load flows, but one is a permissions issue and the other loads the wrong module; distinct underlying problems." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "Different model/runtime bugs: recurrent cache state reset vs a GLM5-specific issue." + }, + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "Same `resize_token_embeddings` regression; 45276 is a Gemma4-specific manifestation of the broader output_embeddings failure in 45292." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Unrelated loading/meta-device bug versus a Strix Halo mmap/OOM issue." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Both mention gradient checkpointing, but one is about LoRA ineffective checkpointing and the other about eval-mode restrictions." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Different failures: eval-mode checkpointing restriction vs Qwen3.5 RLHF NaNs from 3D position_ids/SDPA fallback." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "save/from_pretrained extra_state handling is unrelated to the Strix Halo mmap/OOM problem." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom model initialization breakage and Trainer resume/max_steps calculation are different code paths." + }, + { + "left": "issue:45588", + "right": "issue:45663", + "accept": false, + "reason": "Flash-attention sink-less crash and Gemma-4 FSDP2 KeyError are separate model/integration bugs." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "A video-input regression for Qwen variants is not the same as granite_speech config strictness." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Tokenizer vocab-size mismatch and a specific model-loading failure are not the same underlying issue." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Same `resize_token_embeddings` family bug: both report the resized embeddings not propagating to the model\u2019s target embedding/output layers." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "Notebook custom-model initialization and extra_state save/load failures are different regressions." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Different model families and different loading failures; not the same bug." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Autocast dtype mismatch and meta-device loading failure are distinct runtime/loading problems." + }, + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "Torch version incompatibility is too broad and does not match the specific autocast dtype mismatch bug." + }, + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to mmap/OOM behavior." + }, + { + "left": "issue:39404", + "right": "issue:45701", + "accept": false, + "reason": "Whisper pipeline return_language regression is unrelated to a general tokenization-change report." + }, + { + "left": "issue:44479", + "right": "issue:45663", + "accept": false, + "reason": "Different areas: video input regression versus Gemma-4 training/FSDP2 state handling." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "Isolated reproduction of another issue and a PagedAttentionCache crash are not the same bug." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Qwen3-VL shape mismatch and NemotronH checkpoint loading are different model-loading failures." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both are v5/custom-model regressions, but one is about `base_model_prefix` loading and the other is about notebook-defined custom model initialization." + }, + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Both are docs-related, but they refer to different pipeline/task deprecations and not the same fix." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "Torch incompatibility is not the same as a Mistral-3 image preprocessor dtype mismatch." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "Different failures: PagedAttentionCache invalid group type vs compile-time non-template-node error." + }, + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline language-return issue is unrelated to MXFP4 quantization fallback." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Qwen3VL-MoE loading regression and Qwen3-VL weight-shape mismatch are related only by model family, not the same bug." + }, + { + "left": "issue:44863", + "right": "issue:45663", + "accept": false, + "reason": "NemotronH checkpoint loading and Gemma-4 FSDP2 KeyError are different code-path failures." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "Video-input regression and a mask shape IndexError are unrelated." + }, + { + "left": "issue:43257", + "right": "issue:45663", + "accept": false, + "reason": "Qwen3 MoE conversion/loading issue is unrelated to Gemma-4 FSDP2 shared_kv_states behavior." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "torch.export/torch_compilable_check failure and Perceiver resolution/interpolation failure are different bugs." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "Nonsensical Siglip2 outputs and ModelOutput key assignment are distinct issues." + }, + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "CamemBERT prediction regressions and tokenizer loading for est-roberta are different problems." + }, + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Qwen3 MoE memory efficiency and Strix Halo mmap/OOM are not the same underlying bug." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper pipeline language return and CamemBERT masked-LM prediction regressions are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays suitably conservative. The only accepted soft-edge chain is the resize_token_embeddings family, which is well supported by the titles and similarity scores. The other pairwise rejections are cautious and plausible, and the chosen anchor/canonical issue is defensible as the broadest v5 loading regression in the set." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 7, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:32090", + "issue:34567", + "issue:36032", + "issue:39404", + "issue:40444", + "issue:40990", + "issue:42915", + "issue:42947", + "issue:43262", + "issue:43317", + "issue:43377", + "issue:43404", + "issue:43421", + "issue:43425", + "issue:43452", + "issue:43482", + "issue:43541", + "issue:43582", + "issue:43596", + "issue:43611", + "issue:43632", + "issue:43643", + "issue:43644", + "issue:43645", + "issue:43646", + "issue:43716", + "issue:43883", + "issue:43957", + "issue:44164", + "issue:44206", + "issue:44220", + "issue:44451", + "issue:44466", + "issue:44479", + "issue:44488", + "issue:44530", + "issue:44589", + "issue:44683", + "issue:44811", + "issue:44849", + "issue:44918", + "issue:44987", + "issue:44991", + "issue:45081", + "issue:45084", + "issue:45325", + "issue:45584", + "issue:45685" + ], + "soft_pairs": [ + "issue:43425|issue:44918", + "issue:43596|issue:44683", + "issue:44589|issue:45084", + "issue:40990|issue:42947", + "issue:43425|issue:45685", + "issue:36032|issue:43452", + "issue:44991|issue:45081", + "issue:32090|issue:39404", + "issue:43883|issue:44488", + "issue:43957|issue:44164", + "issue:43582|issue:43632", + "issue:44164|issue:44479", + "issue:44488|issue:45081", + "issue:40990|issue:43421", + "issue:43611|issue:43644", + "issue:44488|issue:44987", + "issue:44206|issue:44220", + "issue:43883|issue:44991", + "issue:44811|issue:45584", + "issue:43716|issue:45084", + "issue:34567|issue:40444", + "issue:43643|issue:44530", + "issue:43644|issue:43646", + "issue:43452|issue:43482", + "issue:44451|issue:44987", + "issue:36032|issue:42915", + "issue:43317|issue:43482", + "issue:43541|issue:45084", + "issue:42915|issue:44530", + "issue:44849|issue:45325", + "issue:43404|issue:44466", + "issue:36032|issue:43645", + "issue:44987|issue:44991", + "issue:43262|issue:43377" + ], + "prepared_review_unit_hash": "9ce1b3fb6c876777168b003ada4f81f6cda4475110afb221468782be82383e60", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 34, + "serialized_chars": 37986, + "estimated_input_tokens": 9497, + "estimated_eval_tokens": 19250 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose bag of unrelated Transformers issues that only share broad surface keywords like loading, tokenizers, Whisper, or v5 regressions. I would not merge any of the soft pairs; the title-level overlap is not enough to indicate the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43611 is the broadest, most central regression-style report in the set: a generic Transformers v5 loading break tied to `base_model_prefix`, which makes it the best representative of the cluster\u2019s overall theme despite the cluster\u2019s mixed topics.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43611 is the most generally applicable issue here and best captures the recurring v5 model-loading regression pattern seen in several nearby titles.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Different failures: Torch version incompatibility vs Qwen3.5 embedding unpacking with TRL SFT; same broad area, not the same bug." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "One is a DeepSpeed ZeRO3/BertModel indexing crash; the other is a torch>=2.9 flex_attention compilation failure. Different code paths." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "Float8 storage lookup error vs template-node compilation error; no evidence of a shared underlying defect." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "Perplexity quality issue on GPT-OSS is unrelated to gradient checkpointing/LoRA behavior." + }, + { + "left": "issue:43425", + "right": "issue:45685", + "accept": false, + "reason": "Torch 2.10 incompatibility is unrelated to an MPS histogram kernel missing for Int." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "Tokenizer method-name conflict in T5 is unrelated to `gguf_file` loading failures for AutoTokenizer/AutoModel." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "Both affect tokenizer loading, but one is an EMBEDDIA RoBERTa tokenizer regression and the other is a Mistral regex-patching crash; different bugs." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer GPU broadcast NoneType error and Whisper `return_language` pipeline regression are unrelated." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Both are model-loading failures, but the symptoms and model families differ; not the same underlying issue." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "Meta-device loading breakage and extra_state save/load handling are different serialization/loading problems." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "Apple Silicon allocator warmup TypeError is unrelated to the `_is_hf_initialized` flag regression." + }, + { + "left": "issue:44164", + "right": "issue:44479", + "accept": false, + "reason": "save/from_pretrained extra_state handling is not the same as a Qwen video-input regression." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Both are loading-related, but the affected models and failure modes are different; no shared concrete code-path bug." + }, + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "Perplexity regression and runtime special-token/post-processor update behavior are unrelated." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Both mention v5 model-loading behavior, but one is `base_model_prefix` loading and the other is non-persistent buffer initialization; different root causes." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Different model loading regressions: `cjvt/sleng-bert` vs `physical-intelligence/fast`." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": false, + "reason": "`LasrFeatureExtractor` unsupported argument crash and `_torch_extract_fbank_features()` are related audio areas but not the same concrete failure." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Missing `all_tied_weights_keys` during loading is not the same as tokenizer loading for EMBEDDIA/est-roberta." + }, + { + "left": "issue:44811", + "right": "issue:45584", + "accept": false, + "reason": "Both are Whisper-related, but one is batch decoding skip-special-tokens behavior and the other is empty transcription after align_special_tokens; different bugs." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Mistral image-preprocessor dtype mismatch and template compilation failure are unrelated." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "TrainerState token counting and Qwen2.5-VL iterable dataset multi-image finetuning are unrelated." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "`AutoConfig` missing fields with `trust_remote_code=True` is unrelated to Qwen3.5 PagedAttentionCache group-type crashes." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 custom-model regressions, but one is non-persistent buffer junk and the other is custom model initialization; too broad and not the same bug." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": false, + "reason": "`gguf_file` loading breakage and Qwen2.5-GGUF v5 loading failure are related only at a high level; not enough to treat as the same defect." + }, + { + "left": "issue:44451", + "right": "issue:44987", + "accept": false, + "reason": "Model loading failure for `vesteinn/ScandiBERT` is unrelated to `physical-intelligence/fast` loading failure." + }, + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "T5 tokenizer special-token conflict and Qwen3Moe FineGrainedFP8Config failure are unrelated." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "device_map/offload dequantized-model loading and Qwen2.5-GGUF loading failure are different loading paths and different bugs." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Mixtral grouped_mm torch dynamo tracing error is unrelated to template-node compilation errors." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and PagedAttentionCache linear_attention crash are different Qwen issues with different failure modes." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen3.5 regressions, but one is output_hidden_states handling and the other is video rope-position scaling; not the same bug." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "Both concern tied weights/lm_head, but one is a specific model not tying weights and the other is device-dependent serialization of lm_head.weight; related area, not the same concrete bug." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer special-token method conflict is unrelated to custom model initialization in Jupyter notebooks." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading failures, but they involve different models and different parts of the stack (model vs tokenizer)." + }, + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Audio processor chat-template sampling-rate default and MIMI encoder padding-mask batching are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 8, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36032", + "issue:40444", + "issue:40990", + "issue:41553", + "issue:42175", + "issue:42915", + "issue:43296", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43421", + "issue:43531", + "issue:43540", + "issue:43582", + "issue:43611", + "issue:43632", + "issue:43645", + "issue:43646", + "issue:43742", + "issue:43819", + "issue:43856", + "issue:43994", + "issue:44062", + "issue:44079", + "issue:44220", + "issue:44265", + "issue:44291", + "issue:44403", + "issue:44451", + "issue:44479", + "issue:44534", + "issue:44568", + "issue:44610", + "issue:44683", + "issue:44756", + "issue:44811", + "issue:44877", + "issue:44987", + "issue:45081", + "issue:45084", + "issue:45127", + "issue:45216", + "issue:45325", + "issue:45357", + "issue:45507", + "issue:45542", + "issue:45663" + ], + "soft_pairs": [ + "issue:43646|issue:44534", + "issue:45084|issue:45507", + "issue:43994|issue:44610", + "issue:43645|issue:44534", + "issue:41553|issue:43582", + "issue:43322|issue:43540", + "issue:44403|issue:45081", + "issue:45216|issue:45357", + "issue:43377|issue:43819", + "issue:44451|issue:45081", + "issue:44220|issue:44683", + "issue:44265|issue:44610", + "issue:43404|issue:45127", + "issue:44987|issue:45081", + "issue:42915|issue:45084", + "issue:43632|issue:44756", + "issue:44479|issue:45325", + "issue:44062|issue:44987", + "issue:43296|issue:43531", + "issue:42915|issue:44291", + "issue:43611|issue:44534", + "issue:36032|issue:43742", + "issue:40444|issue:45325", + "issue:43317|issue:43856", + "issue:40990|issue:44811", + "issue:44079|issue:45663", + "issue:42175|issue:45542", + "issue:43421|issue:44568", + "issue:43531|issue:44877" + ], + "prepared_review_unit_hash": "fc9d6ba6f1a358bd2278e135540c8147bb4e6e117f4bef7de2279c8c25ea8d3f", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36393, + "estimated_input_tokens": 9099, + "estimated_eval_tokens": 18454 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. I only see two likely duplicate pairs: the Qwen3.5 `save_pretrained` regression pair and the TensorFlow backend installation/package pair. The rest are separate bugs affecting different models or code paths.", + "confidence": 0.78, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole cluster because it splits into unrelated bug themes; if forced to pick one representative, issue 45216 is the clearest regression-style report in the strongest duplicate pair.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45216 is the best standalone representative among the duplicate-like reports: it is broad, regression-focused, and its narrower follow-up (45357) maps to the same `save_pretrained` problem.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Different failures: custom model init vs non-persistent buffer initialization." + }, + { + "left": "issue:45084", + "right": "issue:45507", + "accept": false, + "reason": "Unrelated code paths: template compilation error vs GraniteMoEHybrid method dispatch." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "Different models and symptoms; one is bad outputs, the other is a processor/image-size mismatch." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Jupyter custom-model init issue is distinct from the buffer junk regression." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "Bad AutoTokenizer error for Voxtral is unrelated to an Apple Silicon warmup TypeError." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Both are multimodal/video-related, but they target different models and concrete failures." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "Generic loading noise is not the same bug as the Mistral regex backend crash." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Same Qwen3.5 `save_pretrained` regression; 45357 is a narrower symptom of the same broken save path." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "Different model families and different numerical mismatch causes." + }, + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "Loading a specific model is unrelated to the Mistral tokenizer backend crash." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Audio feature extraction and compiled flex_attention are separate subsystems." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "Export failure due to torch_compilable_check is unrelated to the OmDet processor size bug." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "Both mention weight tying, but they describe different models and different failure modes." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Different model-loading regressions; no shared code-path evidence." + }, + { + "left": "issue:42915", + "right": "issue:45084", + "accept": false, + "reason": "Qwen3Moe FP8 config failure is unrelated to the template-node compilation error." + }, + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Different bugs: `_is_hf_initialized` breakage vs mmap/OOM handling." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Related model family, but one is a broad video regression and the other is a still-image rope-index scaling bug." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "AddedToken keyword conflict is unrelated to loading `physical-intelligence/fast`." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "Different model families and failure mechanisms." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "FineGrainedFP8Config failure is not the same as the `_is_hf_initialized` init_empty_weights TypeError." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "Base-model-prefix loading regression is distinct from non-persistent buffer corruption." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "Tokenizer method name conflict is unrelated to MobileLLM key errors." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "IterableDataset multi-image failure is not the same as the Qwen2.5-VL rope-index regression." + }, + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "Dequantized offload/device-map bug is unrelated to Qwen3 MoE memory inefficiency." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Perplexity anomaly and Whisper decode behavior are unrelated." + }, + { + "left": "issue:44079", + "right": "issue:45663", + "accept": false, + "reason": "ModelOutput key assignment and Gemma4 attention state rebuilding are different bugs." + }, + { + "left": "issue:42175", + "right": "issue:45542", + "accept": true, + "reason": "Both describe the same TensorFlow packaging/backend omission after install; 45542 is a narrower manifestation of the same missing-TF problem." + }, + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Related tokenizer-special-token area, but not enough evidence they are the same concrete bug." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior is unrelated to the granite_speech config load failure." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative. The two accepted pairs are plausible duplicates from their titles alone: 45216/45357 both concern Qwen3.5 `save_pretrained`, and 42175/45542 both concern missing/undefined TensorFlow backend installation behavior. The remaining rejects are appropriately cautious given the evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 9, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43296", + "issue:43377", + "issue:43425", + "issue:43454", + "issue:43482", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43644", + "issue:43646", + "issue:43749", + "issue:43792", + "issue:43818", + "issue:43828", + "issue:43881", + "issue:43883", + "issue:43994", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44403", + "issue:44466", + "issue:44488", + "issue:44534", + "issue:44589", + "issue:44683", + "issue:44829", + "issue:44898", + "issue:44991", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45325", + "issue:45357", + "issue:45381", + "issue:45479", + "issue:45491", + "issue:45538" + ], + "soft_pairs": [ + "issue:42915|issue:44589", + "issue:43818|issue:45357", + "issue:43632|issue:44534", + "issue:43425|issue:44403", + "issue:36032|issue:41720", + "issue:44683|issue:45020", + "issue:44829|issue:45479", + "issue:43482|issue:45538", + "issue:33666|issue:40444", + "issue:43792|issue:44589", + "issue:44403|issue:44991", + "issue:44079|issue:44534", + "issue:43377|issue:45491", + "issue:35707|issue:40444", + "issue:43482|issue:43525", + "issue:43749|issue:44079", + "issue:44291|issue:44589", + "issue:43881|issue:44488", + "issue:43644|issue:44079", + "issue:40444|issue:45381", + "issue:45325|issue:45381", + "issue:44265|issue:45072", + "issue:41720|issue:45081", + "issue:43632|issue:45072", + "issue:43531|issue:44291", + "issue:43611|issue:43646", + "issue:42175|issue:43296", + "issue:43994|issue:44898", + "issue:43454|issue:44466", + "issue:42915|issue:43482", + "issue:43526|issue:43883", + "issue:43828|issue:44683" + ], + "prepared_review_unit_hash": "e8ea2e32678e773748ab36fc95676192af28330d8609650f76e5133572d8eead", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 32, + "serialized_chars": 36813, + "estimated_input_tokens": 9204, + "estimated_eval_tokens": 18664 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly distinct model- or release-specific regression reports. A few pairs are thematically close (especially v5 and Qwen2.5-VL issues), but none are strong enough to merge as the same underlying bug/change.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45020 is the broadest open umbrella for recent-transformers regressions around remote_code/model loading, so it best fits as the cluster anchor among these mostly specific breakage reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the most useful representative issue because it is broad, current, and open, making it the best landing point for related regression reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:44589", + "accept": false, + "reason": "Both mention FP8/float8 problems, but one is a FineGrainedFP8Config failure and the other is a Float8 storage lookup error; these look like different bugs." + }, + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Different models and different code paths: Video-LLaVA tower/weights vs Qwen3.5 save_pretrained visual-key handling." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "Both are transformers v5 regressions, but one concerns `_is_hf_initialized` and the other non-persistent buffer serialization; not the same bug." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Torch compatibility/versioning issue vs a generic loading-noise report; no concrete shared failure mode." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "Tokenizer method-name conflict and a CUDA assert during auto device mapping are unrelated failures." + }, + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "One is a torch>=2.9 compiled flex_attention failure; the other is a broader remote_code loading regression. Different issues." + }, + { + "left": "issue:44829", + "right": "issue:45479", + "accept": false, + "reason": "Both affect classification training, but one is tied to flash_attention_3 and the other to `num_labels=1` loss behavior; different root causes." + }, + { + "left": "issue:43482", + "right": "issue:45538", + "accept": false, + "reason": "GGUF loading under transformers v5 and CLIPTokenizer's absurd `model_max_length` are unrelated." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Different Qwen2-VL finetuning problems: multi-GPU training vs iterable-dataset multi-image prompting." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper runtime failure and a Float8 storage error do not point to the same underlying bug." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Generic loading noise vs a specific tokenizer-loading regression for EMBEDDIA/est-roberta; not the same defect." + }, + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "ModelOutput key assignment and non-persistent buffer serialization are different internal behaviors." + }, + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Both involve batching/padding effects, but one is MIMI encoder padding-mask mismatch and the other is Gemma3 sliding-window NaNs; different code paths." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to Qwen2.5-VL multi-image IterableDataset failures." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "GGUF loading failure and missing `pad_token_id` on `Llama4Config` are distinct model/load-path issues." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "FSDP CPU RAM efficient loading is unrelated to ModelOutput key bookkeeping." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "`init_empty_weights` with unexpected `_is_hf_initialized` argument and a Float8 storage lookup error are separate problems." + }, + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "Model load failure for glm-4v-9b vs tokenizer loading for cjvt/sleng-bert; no shared bug." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Non-persistent buffer junk and ModelOutput key assignment are unrelated regressions." + }, + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Multi-image prompt finetuning and Qwen2.5-VL video position-id errors are different modalities and symptoms." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL position-id issues, but one is still-image temporal scaling and the other is video vision-position IDs; close, but not clearly the same bug." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export / torch_compilable_check failures and bfloat16 dtype mismatches are not the same code-path issue." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "A800 auto device mapping CUDA asserts and Mistral tokenizer regex patch crashes are unrelated." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "`_is_hf_initialized` regression and bfloat16 dtype mismatch are distinct v5-related bugs." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and `init_empty_weights` argument handling do not share the same root cause." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are v5 loading/init regressions, but one is about `base_model_prefix` and the other custom model initialization; too different to treat as one bug." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Packaging/back-end selection under pip install and PaddleOCR-VL loading in vLLM are unrelated." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "Different vision-model failures: nonsensical SigLIP2 outputs vs Perceiver resolution/interpolation failure." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "One is missing weight tying in AyaVisionConfig; the other is device-dependent serialization of tied weights. Related theme, but not the same bug." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "FineGrainedFP8Config failure and Qwen2.5-GGUF loading failure are unrelated." + }, + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and Molmo missing `all_tied_weights_keys` are different implementation bugs." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Autocast dtype mismatch in Phi-tiny-MoE and compiled flex_attention failure on torch>=2.9 are separate issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 10, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:39692", + "issue:40444", + "issue:40990", + "issue:41720", + "issue:43262", + "issue:43296", + "issue:43317", + "issue:43322", + "issue:43377", + "issue:43404", + "issue:43454", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43632", + "issue:43701", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43792", + "issue:43819", + "issue:43873", + "issue:43881", + "issue:43957", + "issue:44186", + "issue:44265", + "issue:44291", + "issue:44387", + "issue:44466", + "issue:44534", + "issue:44589", + "issue:44610", + "issue:44617", + "issue:44683", + "issue:44877", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45325", + "issue:45357", + "issue:45563", + "issue:45584", + "issue:45632", + "issue:45684", + "issue:45701" + ], + "soft_pairs": [ + "issue:44387|issue:44617", + "issue:43525|issue:44589", + "issue:43792|issue:43957", + "issue:43881|issue:44991", + "issue:43701|issue:45325", + "issue:43262|issue:43746", + "issue:43317|issue:43873", + "issue:43632|issue:45563", + "issue:40990|issue:45584", + "issue:44291|issue:44877", + "issue:34634|issue:35707", + "issue:43606|issue:44265", + "issue:43873|issue:45701", + "issue:44186|issue:44265", + "issue:44610|issue:45325", + "issue:35707|issue:41720", + "issue:43454|issue:43550", + "issue:43606|issue:43819", + "issue:43377|issue:44610", + "issue:43377|issue:44898", + "issue:43454|issue:45072", + "issue:39692|issue:40444", + "issue:43749|issue:44534", + "issue:44466|issue:45357", + "issue:43792|issue:44683", + "issue:43454|issue:43720", + "issue:43296|issue:44991", + "issue:43296|issue:44877", + "issue:41720|issue:43322", + "issue:45632|issue:45684", + "issue:43526|issue:43881", + "issue:43404|issue:45357" + ], + "prepared_review_unit_hash": "a3f019850be47be4d50b99b16ba25fc72bb988de21f306a90e943eba929fce66", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37325, + "estimated_input_tokens": 9332, + "estimated_eval_tokens": 18920 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose set of Transformer/model-loading regressions with only superficial title similarity. None of the soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43632 is the broadest representative of the cluster because it describes a general Transformers v5 initialization/loading regression that could sit behind several downstream failures, but it is still too general to serve as a true duplicate anchor for the rest.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Both mention OOM, but one is int4 quantization CUDA reserved memory and the other is a Sam3Video load-time CUDA OOM; different model and failure mechanism." + }, + { + "left": "issue:43525", + "right": "issue:44589", + "accept": false, + "reason": "Llama4Config missing pad_token_id is a config-attribute bug; Float8 storage lookup is a serialization/storage-type error. No shared code path." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "Whisper-large-v2 runtime failure and meta-device loading regressions are both loading-related, but they are distinct model/path problems." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "glm-4v-9b loading failure and tokenizer loading for EMBEDDIA/est-roberta are unrelated models with different failure surfaces." + }, + { + "left": "issue:43701", + "right": "issue:45325", + "accept": false, + "reason": "Checkpoint resume key mismatch is a training-state issue; Qwen2.5-VL rope index scaling is an inference/indexing bug." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio chat-template sampling-rate default and GraniteSpeech PEFT checkpoint loading are separate audio/model-loading issues." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both involve offloading/quantization, but one is a specific dequantized-model load failure and the other is a broader offloading behavior report." + }, + { + "left": "issue:43632", + "right": "issue:45563", + "accept": false, + "reason": "_is_hf_initialized breaks model init/loading; stale num_return_sequences warning is a generate warning bug." + }, + { + "left": "issue:40990", + "right": "issue:45584", + "accept": false, + "reason": "High perplexity on gpt-oss-20b and Whisper empty-transcription failure are unrelated model behaviors." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "Unexpected _is_hf_initialized argument during init_empty_weights and strict GraniteSpeech config rejection are different root causes." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "BarkProcessor voice_preset and progressive generation with inputs_embeds/past_key_values are unrelated API/model issues." + }, + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "Bark-small CPU offload device mismatch and torch.export failure with torch_compilable_check are different subsystems." + }, + { + "left": "issue:43873", + "right": "issue:45701", + "accept": false, + "reason": "Quantization/offloading behavior and tokenizer changes across versions are not the same bug." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "LayoutLMv2Tokenizer padding/truncation crashes are a tokenizer bug; torch.export with torch_compilable_check is an export-time failure." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "OmDet-Turbo processor size mismatch and Qwen2.5-VL rope-index scaling affect different model components." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "inputs_embeds/past_key_values generation issue and Qwen3 auto-device-map cudaErrorAssert are unrelated failures." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "AyaVision lm_head weight tying and Bamba-9B-v2 torch.compile+SDPA are different models and code paths." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "Bark-small offload device mismatch and DAC.from_latents mismatch are separate audio-model bugs." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "MIMI batched-vs-single padding-mask bug and OmDet-Turbo processor/model-size mismatch are unrelated." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "Missing padding-mask support in MIMI and Perceiver non-default-resolution failure are different model-specific issues." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "lm_head tying in AyaVision and dtype mismatches in SwitchTransformers/TimmWrapperModel are unrelated regressions." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "SigLIP2 docs example errors and Qwen2.5-VL IterableDataset training failure are different problems; one is documentation, the other training/input handling." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "FSDP CPU RAM efficient loading breakage and non-persistent buffers filled with junk are both v5-related, but the underlying bugs differ." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Both concern serialization/save_pretrained regressions, but one is lm_head.weight tying and the other is incorrect visual-encoder keys." + }, + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Whisper runtime failure and compiled flex_attention failure on torch>=2.9 are unrelated." + }, + { + "left": "issue:43454", + "right": "issue:43720", + "accept": false, + "reason": "AyaVision weight tying and BitNet packed-weight unpacking during accelerate loading are different load paths." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "PaddleOCR-VL load failure in vLLM and tokenizer loading failure for EMBEDDIA/est-roberta are not the same issue." + }, + { + "left": "issue:43296", + "right": "issue:44877", + "accept": false, + "reason": "PaddleOCR-VL model loading and strict GraniteSpeech config loading are distinct failures." + }, + { + "left": "issue:41720", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segmentation fault are different model-loading failures with no shared code path." + }, + { + "left": "issue:45632", + "right": "issue:45684", + "accept": false, + "reason": "Cache-path collision for trust_remote_code and read-only permissions propagation in save_pretrained are separate filesystem issues." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "BeitImageProcessorFast reduce_labels bug and glm-4v-9b loading failure are unrelated." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "Mistral3 lm_head tying and Qwen3.5 visual-encoder key serialization are different model/config regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 11, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36010", + "issue:36331", + "issue:40990", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43295", + "issue:43316", + "issue:43317", + "issue:43322", + "issue:43388", + "issue:43421", + "issue:43450", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43577", + "issue:43606", + "issue:43708", + "issue:43720", + "issue:43782", + "issue:43873", + "issue:44186", + "issue:44292", + "issue:44361", + "issue:44387", + "issue:44423", + "issue:44479", + "issue:44492", + "issue:44509", + "issue:44610", + "issue:44671", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44898", + "issue:44913", + "issue:44918", + "issue:44977", + "issue:44991", + "issue:45059", + "issue:45072", + "issue:45161", + "issue:45406", + "issue:45464", + "issue:45701", + "issue:45704" + ], + "soft_pairs": [ + "issue:41720|issue:44977", + "issue:44423|issue:45406", + "issue:40990|issue:45701", + "issue:44186|issue:45072", + "issue:43873|issue:44756", + "issue:43577|issue:43708", + "issue:44387|issue:45704", + "issue:42617|issue:42915", + "issue:44877|issue:44991", + "issue:43450|issue:45701", + "issue:44479|issue:44918", + "issue:44610|issue:44898", + "issue:43550|issue:43720", + "issue:36331|issue:42915", + "issue:36331|issue:42617", + "issue:44186|issue:44610", + "issue:43316|issue:44913", + "issue:44671|issue:44743", + "issue:42915|issue:43317", + "issue:43295|issue:45701", + "issue:43550|issue:43606", + "issue:41720|issue:42617", + "issue:43531|issue:43572", + "issue:41720|issue:45161", + "issue:44292|issue:44479", + "issue:43388|issue:45059", + "issue:44186|issue:44361", + "issue:43540|issue:43782", + "issue:42915|issue:43322", + "issue:41720|issue:43873", + "issue:36010|issue:42617", + "issue:45406|issue:45464", + "issue:36331|issue:41720", + "issue:43421|issue:45701", + "issue:44492|issue:44509" + ], + "prepared_review_unit_hash": "afcb82aacfa15a5d3d0b91f4f4f792ec8127c090451cff972af8788896d79520", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 35, + "serialized_chars": 38063, + "estimated_input_tokens": 9516, + "estimated_eval_tokens": 19288 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs are mostly superficial keyword matches across unrelated model families, subsystems, and failure modes. None look like the same underlying bug/change, so there is no coherent duplicate set here.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the items do not converge on one underlying defect; they span unrelated bugs in tokenization, model loading, quantization, serving, video processing, and training.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue is suitable, because none of these issues is representative of a real duplicate cluster and several are clearly unrelated false positives.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "Both are Qwen-related, but one is an auto-device-map CUDA assert and the other is a flash-attention generation problem; different bugs." + }, + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both involve serving multimodal models, but the failures are different AttributeErrors on different objects/code paths." + }, + { + "left": "issue:40990", + "right": "issue:45701", + "accept": false, + "reason": "One is a perplexity complaint on a specific model/dataset; the other is a version-dependent tokenization change. Too broad and not the same bug." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "Completely different subsystems: tokenizer NER/padding crash vs dtype mismatch in inference." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/offload, but one is quantization offloading behavior and the other is mmap OOM on Strix Halo; not the same defect." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "One is a model dtype loading bug; the other is Trainer checkpoint resume step \u0a97\u0aa3culation. Unrelated." + }, + { + "left": "issue:44387", + "right": "issue:45704", + "accept": false, + "reason": "Both are memory-related, but one concerns int4 quantization reserved memory and the other a T5/apex memory leak; different causes and fixes." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "Different models and failures: a 3d_parallel.py runtime issue vs Qwen3Moe FP8 loading failure." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading problems, but config strictness and tokenizer loading are distinct bugs." + }, + { + "left": "issue:43450", + "right": "issue:45701", + "accept": false, + "reason": "Video processor batching shape bug vs tokenization/version change; no shared code path." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Both mention Qwen and multimodal/training, but one is a video-input regression and the other is an SFT embedding-unpacking failure." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "Both are image-resolution mismatches, but they affect different models and different preprocessing/model expectations; not the same bug." + }, + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "One is torch.compile with SDPA on Bamba; the other is accelerate loading/unpacking packed BitNet weights. Different code paths." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "Trainer compute_loss API break vs Qwen3Moe FP8 load failure; unrelated." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "CustomTrainer keyword-arg regression vs 3d_parallel.py failure; unrelated." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "Tokenizer crash on NER/padding vs OmDet-Turbo processor image-size mismatch; unrelated." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Both are config/API consistency issues, but for different config classes and different state-reload behavior." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "CamemBERT masked-LM prediction issue and Qwen3.5 recurrent-state reset are unrelated model-specific bugs." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "Both involve loading/offload themes, but one is Qwen3Moe FP8 failure and the other dequantized model offload loading; not the same bug." + }, + { + "left": "issue:43295", + "right": "issue:45701", + "accept": false, + "reason": "Regression around processor.tokenizer/images vs a general tokenization-version complaint; different issues." + }, + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "Different models and failures: SDPA/torch.compile on Bamba vs CPU-offload device mismatch on Bark." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Qwen auto device mapping CUDA assert and 3d_parallel.py failure do not share a concrete bug." + }, + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE vs missing pad_token_idx in StableLmConfig; unrelated." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Both touch MoE/model-parallel topics, but one is a device-map CUDA assert and the other tensor-parallel support for GPT-OSS; different code paths." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "Different Qwen variants and different failures: NVFP4 runtime error vs video-input regression." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "Metric-gathering label truncation vs SAM3 text/bounding-box odd behavior; unrelated." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Different tokenizer/model bugs with no shared code path." + }, + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Video-processing ValueError in Qwen3OmniMoe vs Qwen3VL weight_only loading error; similar family, different bug." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3Moe FP8 failure and Llava Next segmentation fault are unrelated model-loading problems." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Auto device mapping CUDA assert vs quantization offloading behavior; too broad to be the same defect." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "ImportError for GenerationMixin vs 3d_parallel.py runtime issue; unrelated." + }, + { + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Both are serving/runtime issues, but one is Gemma4Processor missing _tokenizer and the other is Qwen3.5 streaming chat/completions failure; different bugs." + }, + { + "left": "issue:36331", + "right": "issue:41720", + "accept": false, + "reason": "Trainer compute_loss signature regression vs Qwen auto device-map CUDA assert; unrelated." + }, + { + "left": "issue:43421", + "right": "issue:45701", + "accept": false, + "reason": "Runtime special-token post-processor update issue vs general tokenization-version change; not the same bug." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "A typo in cache strategy docs/code naming is not the same underlying issue as stale docs mentioning removed pipeline tasks." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 12, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:30064", + "issue:33357", + "issue:34567", + "issue:34634", + "issue:35707", + "issue:39692", + "issue:41720", + "issue:42886", + "issue:43381", + "issue:43421", + "issue:43525", + "issue:43531", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43746", + "issue:43782", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:43994", + "issue:44062", + "issue:44077", + "issue:44186", + "issue:44368", + "issue:44451", + "issue:44514", + "issue:44545", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44671", + "issue:44843", + "issue:44898", + "issue:44918", + "issue:44936", + "issue:44987", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45081", + "issue:45356", + "issue:45357", + "issue:45381", + "issue:45479", + "issue:45704" + ], + "soft_pairs": [ + "issue:33357|issue:35707", + "issue:44514|issue:44545", + "issue:30064|issue:34634", + "issue:41720|issue:43746", + "issue:43606|issue:43720", + "issue:43873|issue:45704", + "issue:43550|issue:43653", + "issue:44625|issue:45479", + "issue:44368|issue:45357", + "issue:43421|issue:43927", + "issue:43606|issue:43653", + "issue:34567|issue:35707", + "issue:44062|issue:44451", + "issue:42886|issue:44843", + "issue:34634|issue:41720", + "issue:44451|issue:44991", + "issue:43782|issue:45381", + "issue:43421|issue:43653", + "issue:44898|issue:45072", + "issue:44077|issue:44625", + "issue:44987|issue:45042", + "issue:44918|issue:44936", + "issue:43531|issue:44589", + "issue:43421|issue:44186", + "issue:43994|issue:44671", + "issue:43381|issue:43844", + "issue:39692|issue:44610", + "issue:45081|issue:45356", + "issue:43525|issue:43531" + ], + "prepared_review_unit_hash": "c8a901a66f9db0d6b3f7cc0f6c24f66aac4c3dce2e50696a50a672280724d4fc", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36083, + "estimated_input_tokens": 9021, + "estimated_eval_tokens": 18298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most pairs are clearly unrelated. I accepted two soft edges where the issues describe the same underlying failure, and rejected the rest as different bugs/features. No PRs were present in the input.", + "confidence": 0.76, + "canonical_issue_reason": "issue:44545 is the cleaner duplicate target for the Qwen2_5_VLProcessor batched padding=False crash: it has the exact symptom/title and more inbound references than 44514.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44843 is the strongest standalone issue in the set: it names the concrete offline failure path (`_patch_mistral_regex` / `model_info()` in `HF_HUB_OFFLINE` mode) and is directly actionable.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "Unrelated bugs: MacOS bus error with a CLIP model vs progressive generation with `inputs_embeds`/`past_key_values`." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "Same bug: both report `Qwen2_5_VLProcessor.apply_chat_template` crashing on batched input when `padding=False`." + }, + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "Different subsystems and failures: segmentation-map processing vs Bark `voice_preset` handling." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Different code paths: auto device mapping CUDA assert vs PEFT/local-checkpoint loading for GraniteSpeech." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "Both involve loading/offload, but the concrete failures differ: device mismatch vs packed-weight unpacking." + }, + { + "left": "issue:43873", + "right": "issue:45704", + "accept": false, + "reason": "Quantization/offloading behavior is unrelated to the T5 Apex RMSNorm memory leak." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "Different problems: `torch.compile`/SDPA on Bamba vs tokenizer special-token decode output." + }, + { + "left": "issue:44625", + "right": "issue:45479", + "accept": false, + "reason": "Both mention `num_labels`, but one is config propagation and the other is zero-loss classification behavior." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "Unrelated: a tied-embeddings warning during LoRA fine-tuning vs incorrect visual-encoder keys on save/load." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "Different tokenization bugs: runtime post-processor updates vs saving/loading custom token IDs." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "An `AddedToken` argument error is not the same as failing to load a specific model checkpoint." + }, + { + "left": "issue:42886", + "right": "issue:44843", + "accept": true, + "reason": "Same underlying offline tokenizer failure: both center on `HF_HUB_OFFLINE=1` breaking cache-based loading, and 44843 identifies the concrete `model_info()` call in the patch path." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Unrelated failures: Qwen3-MoE `sliding_window` vs missing `Float8_e4m3fnStorage`." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Both are tokenizer-related, but one is about special-token post-processing and the other is a crash on NER/batched padding." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Different models and symptoms: SigLIP2 pipeline output issues vs CamemBERT masked-LM predictions." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Different failure modes: resolution/interpolation mismatch vs dtype mismatch in bfloat16 inference." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "No shared bug: `post_init` policy for PatchTSMixer vs Qwen3.5 `num_labels` propagation." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "Different issues: loading `physical-intelligence/fast` vs PIL image processors wrongly requiring torchvision." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "Both involve trainers, but one is about unpacking Qwen3.5 input embeddings with TRL and the other about `evaluate()` after `train()`." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Different bugs with no concrete overlap: model sliding-window config vs a float8 storage lookup error." + }, + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Unrelated training/runtime behaviors: gradient checkpointing in eval mode vs gradient explosion with ZeRO-3." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "A documentation/example mismatch is not the same as a processor/model input-size mismatch." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Related area (`fix_mistral_regex`), but the concrete failures differ: a crash on missing `backend_tokenizer` vs codec/regression/warning behavior in Kimi-K2.5." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Different root causes: missing `pad_token_id` on `Llama4Config` vs Qwen3-MoE `sliding_window` behavior." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The analysis is grounded in the packet and the soft-edge decisions are conservative. The two accepted pairs are effectively duplicate symptom reports, while the rejected pairs are mostly clearly different bugs/features. The issue-only summary is also accurate, since no PRs appear in the input." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 13, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:43322", + "issue:43335", + "issue:43381", + "issue:43421", + "issue:43531", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43596", + "issue:43618", + "issue:43643", + "issue:43644", + "issue:43653", + "issue:43697", + "issue:43716", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43756", + "issue:43818", + "issue:43819", + "issue:43873", + "issue:43927", + "issue:44008", + "issue:44062", + "issue:44186", + "issue:44220", + "issue:44448", + "issue:44488", + "issue:44661", + "issue:44671", + "issue:44811", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:45072", + "issue:45081", + "issue:45183", + "issue:45381", + "issue:45479", + "issue:45685", + "issue:45704" + ], + "soft_pairs": [ + "issue:43596|issue:44220", + "issue:43618|issue:43697", + "issue:41720|issue:45704", + "issue:43746|issue:45381", + "issue:43381|issue:45704", + "issue:45183|issue:45479", + "issue:39692|issue:43873", + "issue:43746|issue:43873", + "issue:44220|issue:44661", + "issue:40444|issue:43746", + "issue:43540|issue:44008", + "issue:43716|issue:44008", + "issue:43335|issue:43756", + "issue:43720|issue:43819", + "issue:43653|issue:43720", + "issue:30064|issue:36010", + "issue:44062|issue:44488", + "issue:44843|issue:45081", + "issue:43421|issue:44811", + "issue:43322|issue:45685", + "issue:43577|issue:43818", + "issue:44186|issue:44898", + "issue:43550|issue:45072", + "issue:43531|issue:43643", + "issue:43643|issue:44877", + "issue:43550|issue:43927", + "issue:43540|issue:45381", + "issue:44448|issue:44671", + "issue:43644|issue:43749" + ], + "prepared_review_unit_hash": "90251e6fc359fbfe3b6237cdcd4c9dc41209d1dac58a78f3107b5a4b9788210d", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 36155, + "estimated_input_tokens": 9039, + "estimated_eval_tokens": 18334 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a heterogeneous grab-bag of unrelated tokenizer, vision, quantization, loading, and regression reports. None of the soft pairs look like the same underlying bug/change, so I would not merge any of them as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "Issue 44843 is the best standalone representative because it is a concrete core-path AutoTokenizer regression with broad user impact and a clear reproduction; the rest are mostly model-specific or orthogonal bugs, so there is no strong cluster-wide duplicate canonical.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44843 is the most generally impactful and self-contained bug in the set, and it is better suited as a representative issue than the many narrow model-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "Different failure areas: DeepSpeed/BertModel zero-sized indexing vs audio fbank feature extraction." + }, + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "Both are Transformers v5 regressions, but they affect different models and different code paths." + }, + { + "left": "issue:41720", + "right": "issue:45704", + "accept": false, + "reason": "Unrelated problems: Qwen3 auto device mapping CUDA assert vs T5 RMSNorm memory leak." + }, + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "Local PEFT checkpoint loading for GraniteSpeech is unrelated to Qwen2.5-VL video position-id errors." + }, + { + "left": "issue:43381", + "right": "issue:45704", + "accept": false, + "reason": "Gradient checkpointing in eval mode and an apex RMSNorm leak are separate bugs." + }, + { + "left": "issue:45183", + "right": "issue:45479", + "accept": false, + "reason": "Input validation/XOR messaging bug is not the same as the zero-loss classification bug." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "Docs example mismatches/quantization errors are unrelated to offloading behavior with quantization." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "GraniteSpeech local checkpoint loading and quantized offloading failures are different issues." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Audio feature extraction bug is unrelated to add-new-model-like failing inside TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "Qwen2.5-VL multi-image iterable dataset failure is unrelated to GraniteSpeech checkpoint loading." + }, + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Video-input processing for Qwen3OmniMoe and Gemma3n variable-name collision are different code-path failures." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Image-preprocessor dtype mismatch and audio_mel_mask attribute collision are unrelated model bugs." + }, + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "SwitchTransformers sparse-layer creation and Smollm3 RoPE-layer dropping are different configuration/model issues." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "Packed-weight unpacking during accelerate loading is unrelated to DAC.from_latents/STE mismatch." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and BitNet packed-weight loading are different bugs." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Void segmentation-map processing and a GenerationMixin import error are unrelated." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "AddedToken special-argument conflict is not the same as failing to load cjvt/sleng-bert." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Both involve _patch_mistral_regex, but one is an offline model_info call and the other is a backend_tokenizer attribute crash; different root causes." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Runtime special-token/post-processor syncing is unrelated to Whisper batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:43322", + "right": "issue:45685", + "accept": false, + "reason": "Llava Next segfault and an MPS histogram op limitation are unrelated." + }, + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "Blip2 dtype propagation and Video-LLaVA temporal-attention/weight-sharing problems are different issues." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer padding/NER crash is unrelated to Perceiver interpolation failure." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "torch.compile with SDPA on Bamba-9B-v2 is unrelated to bf16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and missing config fields from trust_remote_code are separate bugs." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Missing fields from trust_remote_code and strict granite_speech config rejection are different config-loading failures." + }, + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "Bamba SDPA compile failure and DiaConfig token-ID persistence loss are unrelated." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "These are different Qwen multimodal bugs with different models and failure modes." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Different output regressions in Pegasus vs CamemBERT are not the same bug." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Non-persistent buffer initialization junk and FSDP CPU RAM efficient loading are unrelated loading problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 14, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:36010", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43262", + "issue:43526", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43606", + "issue:43650", + "issue:43653", + "issue:43697", + "issue:43720", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43906", + "issue:43994", + "issue:44060", + "issue:44186", + "issue:44291", + "issue:44368", + "issue:44442", + "issue:44485", + "issue:44509", + "issue:44554", + "issue:44561", + "issue:44610", + "issue:44671", + "issue:44805", + "issue:44821", + "issue:44857", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45020", + "issue:45072", + "issue:45081", + "issue:45084", + "issue:45325", + "issue:45406", + "issue:45701" + ], + "soft_pairs": [ + "issue:43782|issue:44977", + "issue:44561|issue:45020", + "issue:43653|issue:43819", + "issue:43825|issue:43994", + "issue:43577|issue:43582", + "issue:43550|issue:43819", + "issue:43825|issue:43827", + "issue:44821|issue:45084", + "issue:41720|issue:44991", + "issue:44186|issue:44442", + "issue:44485|issue:44509", + "issue:44671|issue:45701", + "issue:30064|issue:36331", + "issue:44805|issue:44936", + "issue:43606|issue:45072", + "issue:42175|issue:45081", + "issue:43650|issue:43906", + "issue:43782|issue:45325", + "issue:45081|issue:45406", + "issue:44060|issue:44368", + "issue:34634|issue:36010", + "issue:44610|issue:45072", + "issue:43749|issue:44291", + "issue:43720|issue:45072", + "issue:43526|issue:43697", + "issue:43582|issue:44857", + "issue:43526|issue:44554", + "issue:43262|issue:43819" + ], + "prepared_review_unit_hash": "52a60483563db2d382ffe5e1c330a824a7f9888d1a2916a36013965a252936ea", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 28, + "serialized_chars": 35719, + "estimated_input_tokens": 8930, + "estimated_eval_tokens": 18116 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous set of Transformers regressions across unrelated models, tokenizers, processors, and trainer paths. The titles are thematically similar in places (especially v5/remote_code regressions), but none of the soft pairs looks like the same concrete bug/change.", + "confidence": 0.74, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the broadest umbrella for the repeated recent-version/remote_code regression reports, and it is open with the widest apparent user impact among the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and failure modes: Qwen3VL weight-only loading vs Qwen3.5 flash-attention generation." + }, + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "Both concern recent-version/remote_code breakage, but 44561 is a specific removed-helper failure; 45020 is a broader umbrella report, not the same bug." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Tokenizer special-token registration vs DAC latent/STE mismatch are unrelated code paths." + }, + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "One is an error-message/docs issue around removed translation tasks; the other is a model output correctness regression." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "BLIP2 dtype loading bug and Apple Silicon allocator TypeError are separate subsystems and symptoms." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba is unrelated to DAC.from_latents parity with the forward pass." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "Runtime error-message wording and docs drift after v5 removals are related thematically but not the same underlying fix." + }, + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "AutoImageProcessor URL loading and template-node compilation are unrelated." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "Qwen3 device-mapping CUDA assert and tokenizer loading for EMBEDDIA/est-roberta are different failures." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "Both are tokenizer-related, but LayoutLMv2 NER/padding crashes and FastSpeech2Conformer loading are distinct code paths." + }, + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still mentioning removed pipeline tasks." + }, + { + "left": "issue:44671", + "right": "issue:45701", + "accept": false, + "reason": "CamemBERT masked-LM regression and general tokenization changes are not the same bug." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Image processor segmentation-map handling and CustomTrainer.compute_loss signature mismatch are unrelated." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Mask-shape IndexError in one model path and trainer.evaluate() failure after training are different issues." + }, + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "CPU offload device mismatch in Bark-small is unrelated to bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Package install backend selection and Mistral regex patching are different concerns." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "A low-information title does not establish the same underlying reproducer as an isolated reproduction of issue 38071." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Different Qwen variants and different bugs: weight_only loading error vs RoPE position-id scaling." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Mistral tokenizer patching crash and Gemma4Processor missing _tokenizer are separate loading failures." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Both mention tied-weights warnings, but one is a false tie warning in Qwen3-Next and the other is a repeated LoRA config warning in Qwen3.5." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice_preset and GenerationMixin import failure are unrelated." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "Processor output size mismatch for OmDet-Turbo and dtype mismatches in inference wrappers are different bugs." + }, + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "FSDP CPU RAM-efficient loading and init_empty_weights argument handling are separate loading mechanisms." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "Packed-weight unpacking in accelerate loading is unrelated to bfloat16 dtype mismatches." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and RTDetrV2 output differences are different model/processor regressions." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "AppleSilicon caching allocator TypeError and LwDetrImageLoss AMP/CUDA crash are unrelated." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "Label reduction in an image processor and an MPS attention correctness issue are not the same bug." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio chat-template sampling-rate defaults and DAC latent reconstruction mismatch are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 15, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:34567", + "issue:34634", + "issue:36010", + "issue:36331", + "issue:38175", + "issue:39692", + "issue:40990", + "issue:43257", + "issue:43262", + "issue:43329", + "issue:43450", + "issue:43452", + "issue:43454", + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43638", + "issue:43645", + "issue:43653", + "issue:43696", + "issue:43697", + "issue:43720", + "issue:43746", + "issue:43749", + "issue:43756", + "issue:43819", + "issue:43854", + "issue:43927", + "issue:43931", + "issue:44479", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44849", + "issue:44912", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45042", + "issue:45084", + "issue:45356", + "issue:45357", + "issue:45488", + "issue:45538", + "issue:45600", + "issue:45701" + ], + "soft_pairs": [ + "issue:43329|issue:45600", + "issue:45084|issue:45538", + "issue:34634|issue:36331", + "issue:43653|issue:44912", + "issue:44568|issue:45488", + "issue:43452|issue:44991", + "issue:43638|issue:44936", + "issue:43257|issue:43329", + "issue:38175|issue:44610", + "issue:43454|issue:43577", + "issue:43854|issue:44977", + "issue:43746|issue:44811", + "issue:43257|issue:43696", + "issue:44589|issue:45042", + "issue:39692|issue:44912", + "issue:36010|issue:43645", + "issue:44568|issue:45356", + "issue:43819|issue:43927", + "issue:43720|issue:44610", + "issue:43749|issue:44756", + "issue:43550|issue:44912", + "issue:43329|issue:44479", + "issue:43262|issue:44811", + "issue:43531|issue:45042", + "issue:36010|issue:36331", + "issue:43257|issue:43931", + "issue:34567|issue:40990", + "issue:44849|issue:45701", + "issue:43257|issue:43756", + "issue:43697|issue:45357", + "issue:43329|issue:43450" + ], + "prepared_review_unit_hash": "82ccb925142449a579afe4b3ae8514fd2f7c7e44328b847830011cc4b3ecd921", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37133, + "estimated_input_tokens": 9284, + "estimated_eval_tokens": 18824 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The soft pairs mostly span unrelated Transformer bugs across tokenization, multimodal/video, training, loading, and quantization. None are strong enough to treat as the same underlying fix. Issue 43329 is the best representative because it pinpoints a concrete failing code path.", + "confidence": 0.73, + "canonical_issue_reason": "issue:43329 is the most concrete, code-path-specific report here: it names `_get_num_multimodal_tokens`, the video branch, and the undefined symbols causing the failure.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43329 is the strongest standalone representative of an actual bug in this set; the others are broader symptoms or different subsystems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43329", + "right": "issue:45600", + "accept": false, + "reason": "Different bugs: multimodal video token counting vs stale auto_mappings CI references." + }, + { + "left": "issue:45084", + "right": "issue:45538", + "accept": false, + "reason": "Unrelated: compile-time template-node error vs CLIP tokenizer max-length behavior." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "Different subsystems: BarkProcessor preset loading vs trainer loss signature mismatch." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "Special-token registration bug is unrelated to MXFP4 quantization fallback." + }, + { + "left": "issue:44568", + "right": "issue:45488", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different code paths and models." + }, + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "Different loading failures for different APIs/models; not the same defect." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "Separate trainer/deepspeed failure from post-train evaluate regression." + }, + { + "left": "issue:43257", + "right": "issue:43329", + "accept": false, + "reason": "Qwen3 MoE weight conversion and video token counting are distinct bugs." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "Different vision-model issues: zero probabilities vs processor output shape mismatch." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "lm_head weight tying bug is unrelated to BLIP2 dtype propagation." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and different failures; not one code path." + }, + { + "left": "issue:43746", + "right": "issue:44811", + "accept": false, + "reason": "PEFT checkpoint loading and Whisper batch_decode special-token handling are unrelated." + }, + { + "left": "issue:43257", + "right": "issue:43696", + "accept": false, + "reason": "Weight conversion bug is unrelated to an out-of-memory error." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to torchvision being required." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "Documentation/example errors do not match the gpt-oss quantization fallback bug." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "Importing GenerationMixin and Jupyter custom-model init breakage are different problems." + }, + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "Both concern tokenizers, but the failures are different and model-specific." + }, + { + "left": "issue:43819", + "right": "issue:43927", + "accept": false, + "reason": "DAC STE mismatch and DiaConfig token-ID persistence are unrelated." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "Packed-weight unpacking in accelerate is unrelated to OmDet processor sizing." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "Both involve loading/memory, but the root causes are different." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "torch.compile SDPA failure is unrelated to MXFP4 quantization loading." + }, + { + "left": "issue:43329", + "right": "issue:44479", + "accept": false, + "reason": "Same broad video area, but one is undefined helper variables and the other is a broader regression." + }, + { + "left": "issue:43262", + "right": "issue:44811", + "accept": false, + "reason": "Audio chat-template sampling rate bug differs from Whisper batch_decode behavior." + }, + { + "left": "issue:43531", + "right": "issue:45042", + "accept": false, + "reason": "Qwen3-MoE sliding-window bug is unrelated to PIL backend torchvision requirements." + }, + { + "left": "issue:36010", + "right": "issue:36331", + "accept": false, + "reason": "ImportError for GenerationMixin and custom compute_loss kwarg are unrelated." + }, + { + "left": "issue:43257", + "right": "issue:43931", + "accept": false, + "reason": "Same model family, but different failure modes and loading paths." + }, + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "TrainerState token counting and GPT-OSS perplexity are unrelated." + }, + { + "left": "issue:44849", + "right": "issue:45701", + "accept": false, + "reason": "Qwen3.5 hidden-states bug and tokenization-version change are not the same issue." + }, + { + "left": "issue:43257", + "right": "issue:43756", + "accept": false, + "reason": "MoE weight conversion bug is unrelated to Smollm3 RoPE-layer dropping." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "RTDetr output drift and Qwen3.5 visual-key saving are different bugs." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Both touch video processors, but one is token counting and the other is batched shape output." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 16, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:33357", + "issue:34567", + "issue:34689", + "issue:36010", + "issue:38175", + "issue:41720", + "issue:42175", + "issue:42915", + "issue:43317", + "issue:43452", + "issue:43493", + "issue:43526", + "issue:43540", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43742", + "issue:43749", + "issue:43756", + "issue:43824", + "issue:43906", + "issue:43927", + "issue:43994", + "issue:44062", + "issue:44164", + "issue:44291", + "issue:44479", + "issue:44488", + "issue:44560", + "issue:44737", + "issue:44821", + "issue:44898", + "issue:44912", + "issue:44938", + "issue:44987", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45137", + "issue:45325", + "issue:45375", + "issue:45381", + "issue:45405", + "issue:45479", + "issue:45538", + "issue:45588" + ], + "soft_pairs": [ + "issue:44987|issue:45538", + "issue:44479|issue:45381", + "issue:41720|issue:43540", + "issue:43577|issue:45072", + "issue:43452|issue:43582", + "issue:33357|issue:34567", + "issue:43540|issue:44560", + "issue:43317|issue:45538", + "issue:42175|issue:43824", + "issue:44291|issue:45042", + "issue:42915|issue:43742", + "issue:42175|issue:44938", + "issue:43927|issue:44912", + "issue:42915|issue:45538", + "issue:42915|issue:44821", + "issue:44062|issue:44991", + "issue:43540|issue:44479", + "issue:43526|issue:45479", + "issue:42175|issue:44488", + "issue:43906|issue:45381", + "issue:38175|issue:44898", + "issue:45137|issue:45405", + "issue:44737|issue:45588", + "issue:43749|issue:44164", + "issue:43756|issue:45325", + "issue:28282|issue:34689", + "issue:36010|issue:42175", + "issue:28282|issue:29127", + "issue:43643|issue:45375", + "issue:43493|issue:43994", + "issue:43742|issue:44291" + ], + "prepared_review_unit_hash": "3eed4d5c97e623344dc153291b2a5f6d0707370049d77732bbf0c78082e56c73", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 37114, + "estimated_input_tokens": 9279, + "estimated_eval_tokens": 18814 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous cluster of Transformers bug reports spanning unrelated areas (model loading/imports, tokenizer/config issues, vision/video regressions, quantization, and training/runtime bugs). Only a couple of soft edges look like true duplicates; the rest are just broad topical similarity.", + "confidence": 0.68, + "canonical_issue_reason": "No single canonical issue fits the whole cluster cleanly; the items split into several unrelated bug families. If forced, the Qwen video-regression reports are the closest to a reusable anchor, but the cluster is not a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44479 is the most representative concrete regression report in the set: it is specific, user-facing, and broad enough to capture the strongest Qwen video-input subcluster. It is still not a good umbrella for the entire heterogeneous cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44987", + "right": "issue:45538", + "accept": false, + "reason": "Different failures: one is a model-loading regression for a specific repo, the other is a tokenizer max-length constant issue." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Both describe the same Qwen video-input regression around incorrect position IDs / video handling in transformers 5.3.0." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "A device-mapping CUDA assert on A800 is not the same bug as a Qwen3OmniMoe video-input ValueError." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype mismatches, but they affect different models and different code paths; not the same concrete bug." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "GGUF loading / from_pretrained behavior and an Apple Silicon allocator warmup TypeError are unrelated." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "CLIP/Mac bus error and a TrainerState token counter not updating are unrelated runtime behaviors." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Both are video-related, but the model families and reported failure modes differ too much to treat as the same bug." + }, + { + "left": "issue:43317", + "right": "issue:45538", + "accept": false, + "reason": "Dequantized model offload failure is unrelated to CLIPTokenizer's model_max_length behavior." + }, + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Packaging/backend omission for TensorFlow is unrelated to an ImportError for a specific Qwen class." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "init_empty_weights/_is_hf_initialized and PIL backend torchvision requirements are different code paths." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and MobileLLM config-key loading error are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "Missing TensorFlow backend from installation and Python 3.14 loading failure are not the same bug." + }, + { + "left": "issue:43927", + "right": "issue:44912", + "accept": false, + "reason": "A save/load config-token-ID bug is unrelated to MXFP4 quantization falling back to bf16." + }, + { + "left": "issue:42915", + "right": "issue:45538", + "accept": false, + "reason": "Qwen3Moe FP8 loading and CLIPTokenizer max length have no concrete overlap." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "An FP8 model-loading bug is unrelated to loading AutoImageProcessor from a URL." + }, + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "AddedToken special-argument handling and tokenizer loading for EMBEDDIA/est-roberta are different failures." + }, + { + "left": "issue:43540", + "right": "issue:44479", + "accept": false, + "reason": "Both involve Qwen video inputs, but the reported models and symptoms differ; not enough to call them the same bug." + }, + { + "left": "issue:43526", + "right": "issue:45479", + "accept": false, + "reason": "BEiT Fast label reduction and sequence-classification zero-loss behavior are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "A backend-install issue is not the same as a model-specific loading failure." + }, + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "An isolated reproduction of another issue is too vague to match the specific Qwen2.5-VL video-position bug." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "SigLIP2 zero probabilities and a Perceiver resolution bug are different models and different defects." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque errors and a PEFT version bump are unrelated." + }, + { + "left": "issue:44737", + "right": "issue:45588", + "accept": false, + "reason": "XLNet CPU tensor placement and flash_attention s_aux handling are different bugs." + }, + { + "left": "issue:43749", + "right": "issue:44164", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and extra_state save/load handling are not the same issue." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Both mention RoPE/position logic, but they concern different models and different incorrect behaviors." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "A missing PyTorch dependency error is unrelated to a specific Llama 3.2 Vision model-loading regression." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "GenerationMixin import failure and TensorFlow backend packaging are unrelated." + }, + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Generic AutoConfig field-loss with trust_remote_code and a specific config field dropped by @strict are not the same concrete bug." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Both are about SigLIP2 producing wrong/nonsensical outputs, and the titles point to the same underlying implementation discrepancy." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "MobileLLM loading failure and init_empty_weights argument mismatch are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays appropriately conservative about the cluster being heterogeneous. The two accepted soft edges are plausible duplicate pairs from the titles, and the rejected pairs are mostly cautious rather than overstated. No clear evidence of overclaiming." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 17, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:34567", + "issue:36331", + "issue:41762", + "issue:43295", + "issue:43452", + "issue:43525", + "issue:43531", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43653", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:44062", + "issue:44291", + "issue:44466", + "issue:44484", + "issue:44488", + "issue:44514", + "issue:44561", + "issue:44568", + "issue:44589", + "issue:44625", + "issue:44704", + "issue:44792", + "issue:44811", + "issue:44821", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44936", + "issue:44964", + "issue:45005", + "issue:45042", + "issue:45081", + "issue:45216", + "issue:45290", + "issue:45356", + "issue:45381", + "issue:45405", + "issue:45440", + "issue:45600" + ], + "soft_pairs": [ + "issue:43582|issue:43782", + "issue:44062|issue:45081", + "issue:34567|issue:36331", + "issue:43452|issue:44488", + "issue:44484|issue:44704", + "issue:44291|issue:44821", + "issue:44792|issue:44964", + "issue:43643|issue:44561", + "issue:45405|issue:45600", + "issue:43697|issue:45216", + "issue:43688|issue:43749", + "issue:45042|issue:45081", + "issue:44514|issue:45381", + "issue:43742|issue:44589", + "issue:43531|issue:44821", + "issue:44514|issue:45290", + "issue:43452|issue:43577", + "issue:43452|issue:44843", + "issue:44625|issue:44877", + "issue:43688|issue:45440", + "issue:41762|issue:44936", + "issue:43720|issue:44898", + "issue:43295|issue:45356", + "issue:43653|issue:44568", + "issue:43525|issue:43643", + "issue:43819|issue:44811", + "issue:44466|issue:45005" + ], + "prepared_review_unit_hash": "7aaba8370e6ac5b7574ac8a1bef706a7aa06ad21c3236a8775e0ca1e9de382c9", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 27, + "serialized_chars": 35537, + "estimated_input_tokens": 8885, + "estimated_eval_tokens": 18026 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the soft-similarity pairs mostly share only broad Transformer themes (tokenizers, loading, processors, training) but not the same concrete bug. I found no safe duplicate merges among the proposed edges. The broadest/highest-activity issue is FSDP_CPU_RAM_EFFICIENT_LOADING, but it is not actually a duplicate center for the rest of the cluster.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43749 has the strongest discussion/inbound activity and the broadest impact, so it is the best representative issue in an otherwise unrelated set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 is the most prominent issue here by activity and scope; it is the safest global representative even though the surrounding items are not true duplicates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Different failures in different loading paths; Apple Silicon allocator warmup TypeError is unrelated to Qwen3VL weight_only loading." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "Both are tokenizer-related, but one is an AddedToken special-arg bug and the other is a Mistral regex/backend_tokenizer crash." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "Trainer metrics bookkeeping vs a changed compute_loss signature are separate bugs." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "gguf_file/from_pretrained breakage is a different loading issue than the cjvt/sleng-bert model load failure." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "max_shard_size behavior and AutoProcessor cached_file kwargs passthrough are unrelated." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "init_empty_weights/_is_hf_initialized and AutoImageProcessor-from-URL are different code paths and failures." + }, + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Both involve multimodal models, but the failing models and concrete code paths differ." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Both mention trust_remote_code, but one is missing config fields and the other is removal of is_torch_fx_available." + }, + { + "left": "issue:45405", + "right": "issue:45600", + "accept": false, + "reason": "A PEFT version pin issue and removed Sam3LiteText auto-mapping references are unrelated." + }, + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Different regressions: output drift in RTDetrV2 vs save_pretrained checkpoint serialization for Qwen3.5." + }, + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Auxiliary loss normalization and FSDP CPU RAM efficient loading are unrelated bugs." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "Dependency requirement regression in image processors is not the same as the Mistral regex crash." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL issues, but one is apply_chat_template batching/padding and the other is video vision_position_ids." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "KeyError loading MobileLLM-125M and Float8 storage lookup failure are different loading bugs." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and AutoImageProcessor URL loading are unrelated." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": false, + "reason": "Same function name, but different inputs and failure modes; not the same concrete chat-template bug." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "gguf_file loading breakage and Blip2 dtype propagation are separate issues." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "gguf_file/from_pretrained breakage is unrelated to the offline Mistral regex model_info() call." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Qwen3.5 num_labels propagation and granite_speech strict config loading are different config bugs." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "BitNet packed-weight loading and Perceiver image resolution handling do not share the same bug." + }, + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "processor.tokenizer/image input regression and Kimi-K2.5 codec/fix_mistral_regex regression are different tokenizer issues." + }, + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "Both involve special tokens, but they affect different tokenizers and different symptoms." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config is unrelated to trust_remote_code config field loss." + }, + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "DAC latent reconstruction mismatch and Whisper batch_decode skip_special_tokens are unrelated." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": false, + "reason": "Both mention tied weights, but one is device-dependent lm_head serialization and the other is translation-model save issues; not the same concrete bug." + }, + { + "left": "issue:43749", + "right": "issue:44589", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and Float8 storage lookup are different serialization/loading failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 18, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:24643", + "issue:28282", + "issue:29127", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:34567", + "issue:35141", + "issue:38175", + "issue:41720", + "issue:43122", + "issue:43295", + "issue:43329", + "issue:43334", + "issue:43452", + "issue:43504", + "issue:43531", + "issue:43550", + "issue:43572", + "issue:43577", + "issue:43646", + "issue:43653", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43906", + "issue:43927", + "issue:44361", + "issue:44442", + "issue:44451", + "issue:44464", + "issue:44479", + "issue:44554", + "issue:44610", + "issue:44625", + "issue:44743", + "issue:44843", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45325", + "issue:45356", + "issue:45588", + "issue:45701", + "issue:45704" + ], + "soft_pairs": [ + "issue:24643|issue:29127", + "issue:44464|issue:44743", + "issue:41720|issue:43906", + "issue:43577|issue:43697", + "issue:38175|issue:43295", + "issue:43688|issue:45704", + "issue:44843|issue:44991", + "issue:43646|issue:43749", + "issue:44361|issue:44442", + "issue:43688|issue:45701", + "issue:43653|issue:44361", + "issue:44479|issue:44936", + "issue:28282|issue:32090", + "issue:44977|issue:45356", + "issue:28282|issue:33357", + "issue:30064|issue:33666", + "issue:28282|issue:35141", + "issue:43122|issue:43906", + "issue:43577|issue:45588", + "issue:43577|issue:44554", + "issue:43653|issue:43927", + "issue:43531|issue:43742", + "issue:43329|issue:45325", + "issue:44464|issue:44977", + "issue:44479|issue:44625", + "issue:43452|issue:44451", + "issue:43550|issue:44361", + "issue:43504|issue:43720", + "issue:43504|issue:43550", + "issue:32090|issue:34567", + "issue:43504|issue:44610", + "issue:43334|issue:43572", + "issue:32090|issue:33666" + ], + "prepared_review_unit_hash": "73a0eba4b02b728f156a6faed1cd7d75337919175b9622b568c0299655a00fa2", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 33, + "serialized_chars": 37744, + "estimated_input_tokens": 9436, + "estimated_eval_tokens": 19128 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a false positive: the issues share only superficial regression/model-loading language, but they cover distinct bugs across tokenization, multimodal processing, training, device mapping, and config handling. None of the soft pairs look like the same underlying issue.", + "confidence": 0.99, + "canonical_issue_reason": "No clear canonical duplicate exists; the items are too heterogeneous to collapse onto one underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must stand in for the cluster, issue 43646 is the broadest umbrella regression ('Transformers 5.0.0 breaks custom model initialization'), but it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "DeepSpeed weight-shape training error vs LayoutLMv3 box-content validation; unrelated failure modes." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "Both mention generation behavior, but one is chunked generation with compiled forward and the other is recurrent-state reset in modular_qwen3_5; different code paths." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "Qwen3 auto device-mapping cuda assert vs an isolated reproduction of a separate issue; no shared concrete bug." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "Blip2 dtype staying float32 vs RTDetrV2 output divergence; different model families and defects." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "Siglip2 zero-probabilities issue vs v4.57.5 custom processor/tokenizer regression; not the same path." + }, + { + "left": "issue:43688", + "right": "issue:45704", + "accept": false, + "reason": "Aux-loss normalization bug in MoE models vs T5/apex RMSNorm memory leak; unrelated." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "HF Hub offline/tokenizer patch issue vs tokenizer loading regression for a specific model; different root cause." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Generic custom-model initialization regression vs FSDP CPU RAM-efficient loading breakage; not the same fix." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "MLukeTokenizer task AttributeError vs AutoTokenizer failing to load FastSpeech2ConformerTokenizer; separate tokenizer bugs." + }, + { + "left": "issue:43688", + "right": "issue:45701", + "accept": false, + "reason": "Auxiliary-loss normalization bug vs general tokenization changes across versions; no shared implementation defect." + }, + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug vs MLukeTokenizer task AttributeError; different tokenizer subsystems." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Video-input regression for VLMs vs trainer.evaluate() failing after trainer.train(); unrelated." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "Missing PyTorch import error vs Trainer _gpu_broadcast_one NoneType error; different environment/runtime problems." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Qwen3.5 flash-attention generation bug vs Kimi-K2.5 tokenizer codec/regex regression; distinct model and layer." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "PyTorch-not-found ImportError vs MacOS bus error on CLIP community model; unrelated failures." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Void segmentation-map image processor bug vs Qwen2-VL multi-GPU training request; not the same issue." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "PyTorch missing import vs output-embedding reinitialized after resize_token_embeddings; different domains." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "Tokenizer version change behavior vs reproduction of a different issue; no concrete overlap." + }, + { + "left": "issue:43577", + "right": "issue:45588", + "accept": false, + "reason": "Blip2 dtype propagation bug vs flash_attention integration crash on s_aux=None; different model/integration paths." + }, + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "Blip2 dtype issue vs MPS attention correctness when value-head dim differs; not the same code-path problem." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "BigBird mask token special-token registration vs DiaConfig losing custom token IDs; both token-related but distinct bugs." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Qwen3-MoE sliding_window issue vs MobileLLM loading key error; different model-loading/runtime defects." + }, + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Undefined video-patch helpers in multimodal token counting vs Qwen2.5-VL rope-index temporal scaling bug; different logic." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Chunked generation inconsistency with compiled forward vs Qwen3.5 flash-attention generation bug; not the same failure." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "Video input regression for several VLMs vs Qwen3.5 num_labels propagation bug; unrelated multimodal vs config issue." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "gguf_file tokenizer/model loading breakage vs a specific tokenizer load failure for ScandiBERT; separate loading paths." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure vs MLukeTokenizer AttributeError; unrelated model execution and tokenizer issues." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Beit legacy-field load failure vs BitNet packed-weight unpacking during accelerate loading; different model-loading mechanisms." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Beit pretrained-load issue vs Bamba torch.compile/SDPA failure; no shared underlying bug." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "Trainer _gpu_broadcast_one NoneType error vs TrainerState num_input_tokens_seen not updating; different trainer behaviors." + }, + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "Beit legacy field load failure vs OmDet-Turbo processor/model input-size mismatch; unrelated." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Qwen3-VL missing pad_token_id on text config vs StableLmConfig missing pad_token_idx after 5.0; similar theme but different model/config fields and fixes." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "NoneType broadcast error in Trainer vs Qwen2-VL multi-GPU training request; different areas entirely." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 19, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:33357", + "issue:33666", + "issue:36010", + "issue:36331", + "issue:39692", + "issue:41720", + "issue:43257", + "issue:43262", + "issue:43278", + "issue:43295", + "issue:43299", + "issue:43493", + "issue:43525", + "issue:43540", + "issue:43572", + "issue:43606", + "issue:43646", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43742", + "issue:43756", + "issue:43824", + "issue:43825", + "issue:43844", + "issue:43927", + "issue:44361", + "issue:44423", + "issue:44464", + "issue:44466", + "issue:44479", + "issue:44488", + "issue:44589", + "issue:44625", + "issue:44734", + "issue:44821", + "issue:44843", + "issue:44849", + "issue:44913", + "issue:44964", + "issue:44977", + "issue:45084", + "issue:45200", + "issue:45229", + "issue:45357", + "issue:45405", + "issue:45520", + "issue:45584" + ], + "soft_pairs": [ + "issue:43525|issue:43572", + "issue:43278|issue:43688", + "issue:41720|issue:44464", + "issue:44849|issue:45357", + "issue:43756|issue:44913", + "issue:43493|issue:43825", + "issue:43742|issue:44821", + "issue:43540|issue:43716", + "issue:43262|issue:45084", + "issue:33666|issue:36010", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:43257|issue:43299", + "issue:43278|issue:43844", + "issue:44488|issue:44843", + "issue:43493|issue:43844", + "issue:44964|issue:45405", + "issue:39692|issue:43493", + "issue:43720|issue:44361", + "issue:43540|issue:44977", + "issue:33666|issue:36331", + "issue:43756|issue:44479", + "issue:45200|issue:45229", + "issue:44625|issue:45584", + "issue:43824|issue:44589", + "issue:43646|issue:44466", + "issue:44423|issue:44734", + "issue:44589|issue:45520", + "issue:39692|issue:43295", + "issue:33357|issue:33666" + ], + "prepared_review_unit_hash": "5aa2bf896172acaeb5598d7615155775165f4a7700c7d14beeaf266a97b0c504", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36738, + "estimated_input_tokens": 9185, + "estimated_eval_tokens": 18626 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated model-specific regressions and API issues. A few pairs share a broad theme (loading/config regressions), but none look like the same concrete bug or change closely enough to merge as duplicates.", + "confidence": 0.66, + "canonical_issue_reason": "Issue 43646 is the broadest, most representative regression report in the set (v5 custom model initialization breakage), so it is the best single issue to anchor the cluster even though the rest are largely unrelated.", + "canonical_pr_reason": null, + "best_issue_reason": "43646 is the strongest standalone issue for triage because it describes a general, version-wide init/load regression rather than a model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Both are config attribute regressions, but they affect different model families and different missing fields; not the same underlying bug." + }, + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "Different failure modes: dtype drift in evaluation vs auxiliary-loss normalization; only a loose training-related theme." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "Unrelated code paths and symptoms: device-mapping CUDA assert vs compiled-forward chunked generation mismatch." + }, + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "One is an output_hidden_states bug, the other is bad save_pretrained key serialization; not the same defect." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Both mention RoPE/config behavior, but one is a model architecture omission and the other is a config reload bug." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "Different subsystems entirely: SigLIP2 implementation discrepancy vs pipeline task-support messaging." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "Different loading failures with different triggers; no shared concrete code path." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "Both are multimodal issues, but one is video processing and the other is image-preprocessor dtype mismatch." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "Audio sampling-rate default bug vs template-compilation error; unrelated behavior." + }, + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Qwen2-VL multi-GPU training and a GenerationMixin import error are unrelated." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "Packed-weight loading bug vs config token-id persistence bug; different objects and failure points." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU offload device mismatch is unrelated to DiaConfig token ID loss." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both involve Qwen3 MoE loading, but one is accelerate/deepspeed weight conversion and the other is a v5 model-loading regression; not clearly the same bug." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "Different training issues with different symptoms; no evidence of one shared root cause." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Specific model load failure vs offline-mode hub call bug; only loosely related through tokenizer loading." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 implementation discrepancy and ZeRO-3 gradient inflation are unrelated." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "Model-loading failure vs dependency-version bump; not the same underlying issue." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "Doc-example errors and implementation mismatch are different problem types, even if both touch SigLIP2." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "Different tokenizer/model-loading failures with no shared concrete code path." + }, + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Video-input processing bug vs flash-attention generation bug; unrelated." + }, + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Multi-GPU training and trainer compute_loss signature mismatch are different bugs." + }, + { + "left": "issue:43756", + "right": "issue:44479", + "accept": false, + "reason": "Different Qwen/Smollm model regressions with different mechanics and failure modes." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "One is a missing default for text-only fine-tuning, the other is GPU OOM in multi-GPU inference; not duplicates." + }, + { + "left": "issue:44625", + "right": "issue:45584", + "accept": false, + "reason": "Different model families and different bugs: num_labels propagation vs Whisper empty-transcription generation." + }, + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "ImportError for a specific model class and missing Float8 storage are unrelated." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Both are v5 regressions, but one is custom model initialization and the other is tied-weight serialization depending on device." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Both hit serve paths, but the concrete crashes and code paths differ." + }, + { + "left": "issue:44589", + "right": "issue:45520", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to the flash_attn import_utils KeyError on Python 3.13." + }, + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "Documentation example problems and a processor.tokenizer regression are distinct issues." + }, + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "MacOS CLIP bus error and Qwen2-VL multi-GPU training are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 20, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:15354", + "issue:29942", + "issue:30333", + "issue:37428", + "issue:38175", + "issue:39401", + "issue:43232", + "issue:43278", + "issue:43334", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43572", + "issue:43575", + "issue:43582", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43844", + "issue:43927", + "issue:43957", + "issue:43994", + "issue:44112", + "issue:44291", + "issue:44315", + "issue:44361", + "issue:44464", + "issue:44493", + "issue:44521", + "issue:44554", + "issue:44568", + "issue:44792", + "issue:44849", + "issue:44964", + "issue:44977", + "issue:45072", + "issue:45084", + "issue:45198", + "issue:45200", + "issue:45290", + "issue:45325", + "issue:45356", + "issue:45405", + "issue:45446", + "issue:45520", + "issue:45584", + "issue:45701" + ], + "soft_pairs": [ + "issue:43572|issue:44291", + "issue:43504|issue:43606", + "issue:44521|issue:45290", + "issue:43504|issue:43653", + "issue:43927|issue:44361", + "issue:43575|issue:45405", + "issue:43994|issue:44493", + "issue:44792|issue:45405", + "issue:44464|issue:45584", + "issue:43278|issue:43701", + "issue:43606|issue:44112", + "issue:29942|issue:30333", + "issue:44554|issue:45446", + "issue:44568|issue:45701", + "issue:15354|issue:38175", + "issue:43844|issue:44964", + "issue:43540|issue:45200", + "issue:45084|issue:45520", + "issue:43582|issue:44977", + "issue:43232|issue:44849", + "issue:39401|issue:44568", + "issue:45072|issue:45198", + "issue:37428|issue:43957", + "issue:44315|issue:44849", + "issue:44554|issue:45356", + "issue:43334|issue:43525", + "issue:45200|issue:45325" + ], + "prepared_review_unit_hash": "8e7769aa2f211f6a408c5273bf11086792e3936b6010139a1b9d98c10a364e6a", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 27, + "serialized_chars": 35292, + "estimated_input_tokens": 8823, + "estimated_eval_tokens": 17902 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a grab bag of unrelated Transformers bug reports across different models, tokenizers, generation paths, and CI regressions. The soft pairs mostly share broad vocabulary like \"pad_token\" or \"loading\" but do not describe the same concrete bug/change, so none should be merged as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "Both involve loading/config initialization, but one is about missing `pad_token_idx` after a 5.0 update and the other is an `init_empty_weights`/`_is_hf_initialized` argument regression; different failure modes and code paths." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "Different models and bugs: BEiT pretrained preset loading with a legacy field vs. `suno/bark-small` device mismatch under CPU offload." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both mention chat templating, but one is about all-zero assistant masks for multimodal inputs and the other is a crash on assistant tool-call messages without content; not the same defect." + }, + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "One is a BEiT model-loading legacy-field issue; the other is a BigBird tokenizer special-token registration/empty decode bug." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "Both are config/token-related, but `DiaConfig` loses custom token IDs on save/load, while `MLukeTokenizer` raises an AttributeError on tasks; distinct artifacts and symptoms." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "OOM during tensor-parallel loading of a specific model is unrelated to a package-version pin on `MIN_PEFT_VERSION`." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "`siglip2` nonsensical outputs with AutoModel/pipeline is not the same as the generic unexpected position-id key issue affecting many models." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "A janus test failure is unrelated to a PEFT minimum-version bump." + }, + { + "left": "issue:44464", + "right": "issue:45584", + "accept": false, + "reason": "Chunked generation inconsistency with compiled forward is a generation/compilation bug; empty transcription after `align_special_tokens` is a Whisper-specific generation edge case." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "BF16-to-FP32 dtype change in evaluation is unrelated to a `resume_from_checkpoint` key mismatch." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Different CI failures in different models: CPU offload device mismatch vs. stale device override test in GraniteSpeech." + }, + { + "left": "issue:29942", + "right": "issue:30333", + "accept": false, + "reason": "Flash Attention 2 test failures and MLFlow reporting of failed jobs are unrelated." + }, + { + "left": "issue:44554", + "right": "issue:45446", + "accept": false, + "reason": "An MPS attention correctness issue is not the same as a PyTorch version-check bug for `AuxRequest` import in flex_attention." + }, + { + "left": "issue:44568", + "right": "issue:45701", + "accept": false, + "reason": "`add_special_tokens=True` not adding BOS/EOS for one tokenizer is different from a broad version-dependent tokenization change report." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "GeneratorExp unsupported in `torch.jit.script` is unrelated to zero probabilities in `siglip2-base-patch16-224`." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "DeepSpeed ZeRO-3 gradient growth and loading `microsoft/Phi-4-multimodal-instruct` are separate problems with different code paths." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Video-input processing failure in Qwen3OmniMoe is unrelated to Gemma 4 requiring `mm_token_type_ids` to default to zeros." + }, + { + "left": "issue:45084", + "right": "issue:45520", + "accept": false, + "reason": "Template-node compilation error and Python 3.13 `flash_attn` import KeyError are unrelated failures." + }, + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "Apple Silicon allocator warmup TypeError is not the same bug as Qwen3.5 flash-attention generation issues." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "`_update_model_kwargs_for_generation` after `sync_gpus` concerns generation state handling; the Qwen3.5 issue is about `output_hidden_states=True` behavior." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Wrong `offset_mapping` in Qwen3 tokenizer is a tokenizer-span bug, not the BOS/EOS insertion bug in mDeBERTa." + }, + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "Dtype mismatches in CI for SwitchTransformers/TimmWrapperModel are unrelated to Wav2Vec2 save/tokenization failures." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "ImportError for `_flash_supports_window_size` and meta-device loading breakage are different regressions affecting different subsystems." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "`model_init` not applying Liger Kernel is a training-setup issue, not the same as Qwen3.5 hidden-state output behavior." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "Different attention/tokenization concerns: MPS attention correctness vs. Kimi-K2.5 tokenizer codec handling and warning message regression." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": false, + "reason": "Qwen3-VL missing `pad_token_id` and Llama4 missing `pad_token_id` are similar symptoms but different configs/models and likely separate fixes." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Gemma 4 multimodal token type IDs defaulting to zeros is unrelated to Qwen2.5-VL rope-index scaling for still images." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 21, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:37428", + "issue:38175", + "issue:42915", + "issue:43278", + "issue:43295", + "issue:43388", + "issue:43450", + "issue:43493", + "issue:43526", + "issue:43572", + "issue:43575", + "issue:43653", + "issue:43701", + "issue:43819", + "issue:43824", + "issue:43827", + "issue:43844", + "issue:43856", + "issue:43873", + "issue:43901", + "issue:43957", + "issue:43975", + "issue:43994", + "issue:44112", + "issue:44206", + "issue:44360", + "issue:44442", + "issue:44462", + "issue:44464", + "issue:44466", + "issue:44514", + "issue:44521", + "issue:44561", + "issue:44610", + "issue:44779", + "issue:44792", + "issue:44898", + "issue:44936", + "issue:44987", + "issue:45083", + "issue:45127", + "issue:45198", + "issue:45245", + "issue:45290", + "issue:45381", + "issue:45412", + "issue:45563", + "issue:45584" + ], + "soft_pairs": [ + "issue:43295|issue:44610", + "issue:44466|issue:45127", + "issue:44898|issue:45412", + "issue:43493|issue:43701", + "issue:44792|issue:45563", + "issue:37428|issue:43824", + "issue:43388|issue:43526", + "issue:43873|issue:44464", + "issue:43572|issue:44987", + "issue:43844|issue:44936", + "issue:43572|issue:43957", + "issue:43824|issue:43975", + "issue:44514|issue:44521", + "issue:44360|issue:44561", + "issue:43295|issue:43450", + "issue:38175|issue:45412", + "issue:43295|issue:43873", + "issue:42915|issue:43824", + "issue:44442|issue:45198", + "issue:43295|issue:43994", + "issue:43827|issue:43901", + "issue:38175|issue:43493", + "issue:43278|issue:43856", + "issue:44206|issue:45584", + "issue:43653|issue:44112", + "issue:43450|issue:43493", + "issue:45127|issue:45245", + "issue:44462|issue:44561", + "issue:44561|issue:44779", + "issue:43819|issue:45083", + "issue:45290|issue:45381", + "issue:43450|issue:43575" + ], + "prepared_review_unit_hash": "1fd012d66153b0084bb0eaee880342cfc57d061ff8991fb33265d59b1549833a", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 32, + "serialized_chars": 37340, + "estimated_input_tokens": 9335, + "estimated_eval_tokens": 18926 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose grab-bag of mostly unrelated bug reports with only superficial overlap around Transformers v5 regressions, tokenizers, and multimodal processors. None of the soft pairs look like the same concrete underlying bug/change, so I would not merge any of them.", + "confidence": 0.19, + "canonical_issue_reason": "issue:43295 is the broadest, most reusable regression report in the set: it describes a v4.57.5 API break affecting custom model/processor code, which makes it a reasonable duplicate bucket for adjacent processor-regression reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43295 is the most generally framed, high-impact issue here and the best candidate to serve as a duplicate target for nearby API/regression complaints.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Both are processor-related regressions, but one is a custom code/API break and the other is a wrong image size for OmDet-Turbo; different bugs and fixes." + }, + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "One is device-dependent serialization of tied weights; the other is LoRA merge collapse with extended vocab. Different code paths." + }, + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Perceiver interpolation failure and RT-DETR memory leak are unrelated model bugs." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "SigLIP2 output discrepancy and resume-from-checkpoint key mismatch are not the same underlying issue." + }, + { + "left": "issue:44792", + "right": "issue:45563", + "accept": false, + "reason": "A Janus image-generation test failure and a stale generate() warning are unrelated." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Both are ImportErrors, but for different symbols in different modules; no shared concrete code-path bug is evident." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Both involve labels, but one drops tuple elements in gather_for_metrics while the other truncates labels in BeitImageProcessorFast." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization/offloading behavior and compiled-forward chunked generation are different bugs." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "Missing pad_token_idx in StableLmConfig and loading physical-intelligence/fast are unrelated failures." + }, + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "ZeRO-3 gradient growth and trainer.evaluate() failure after train are different training/runtime problems." + }, + { + "left": "issue:43572", + "right": "issue:43957", + "accept": false, + "reason": "Config regression for StableLm and meta-device loading breakage are distinct issues." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Model import/export error and DeepSeek detokenization regression are unrelated." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both touch apply_chat_template, but one is a batched padding=False crash and the other returns all-zero assistant masks; not the same concrete bug." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "A missing ReLU in DSA indexer and removal of is_torch_fx_available are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Processor.tokenizer regression and batched video-shape bug affect different paths and fixes." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 zero probabilities and RT-DETR memory release are unrelated model issues." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "Custom processor API breakage and quantization/offloading behavior are different bugs." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "Qwen3Moe FP8 failure and Qwen2_5_VL import error are unrelated." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "FastSpeech2ConformerTokenizer loading and Wav2Vec2 save/tokenization failures are different speech-tokenizer bugs." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "DAC latent/STE mismatch and qwen3_omni_moe helper behavior are unrelated." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Processor.tokenizer regression and SigLIP2 nonsensical outputs are not the same bug." + }, + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "One is docs still mentioning pipeline(), the other is docs still mentioning return_all_scores; related only at a documentation level, not the same change." + }, + { + "left": "issue:38175", + "right": "issue:43493", + "accept": false, + "reason": "Zero probabilities in SigLIP2 and HF-vs-JAX discrepancy are different manifestations and likely different fixes." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Embedding dtype drift in evaluation and Qwen3 MoE memory inefficiency are unrelated." + }, + { + "left": "issue:44206", + "right": "issue:45584", + "accept": false, + "reason": "Unsupported center argument in LaserFeatureExtractor and Whisper empty-transcription failure are different code paths." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and GraniteSpeech CI flakiness are unrelated." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "Batched video processor shape bug and SigLIP2 implementation discrepancy are different issues." + }, + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "LoRA merge collapse and category-count limit are unrelated." + }, + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "AutoTokenizer ignoring tokenizer.json and removal of is_torch_fx_available are different regressions." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "trust_remote_code breakage from a removed helper and DeepSeek tokenizer output changes are unrelated." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "Duplicate of the earlier pairing: DAC.from_latents mismatch and qwen3_omni_moe helper behavior are not the same bug." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Chat-template crash with tool calls and wrong video vision_position_ids are different multimodal bugs." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Video batch-shape bug and Qwen2-57B TP OOM are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 22, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:32090", + "issue:37428", + "issue:38175", + "issue:39692", + "issue:40444", + "issue:43262", + "issue:43278", + "issue:43335", + "issue:43425", + "issue:43450", + "issue:43504", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43611", + "issue:43653", + "issue:43720", + "issue:43723", + "issue:43819", + "issue:43825", + "issue:43931", + "issue:44112", + "issue:44292", + "issue:44351", + "issue:44448", + "issue:44493", + "issue:44554", + "issue:44568", + "issue:44610", + "issue:44743", + "issue:44779", + "issue:44855", + "issue:44857", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45200", + "issue:45229", + "issue:45310", + "issue:45381", + "issue:45478", + "issue:45491", + "issue:45520", + "issue:45588", + "issue:45593" + ], + "soft_pairs": [ + "issue:37428|issue:43425", + "issue:44610|issue:44857", + "issue:43819|issue:44112", + "issue:44857|issue:45593", + "issue:43278|issue:43825", + "issue:43262|issue:43450", + "issue:45200|issue:45588", + "issue:43606|issue:45083", + "issue:45310|issue:45478", + "issue:44448|issue:44743", + "issue:29942|issue:32090", + "issue:37428|issue:44351", + "issue:43335|issue:43550", + "issue:39692|issue:43526", + "issue:43611|issue:43931", + "issue:43819|issue:44493", + "issue:45200|issue:45381", + "issue:43335|issue:43653", + "issue:29127|issue:32090", + "issue:44292|issue:45520", + "issue:44448|issue:44779", + "issue:43335|issue:45083", + "issue:44855|issue:45083", + "issue:39692|issue:44493", + "issue:43335|issue:43504", + "issue:43550|issue:44112", + "issue:38175|issue:44493", + "issue:43723|issue:44554", + "issue:44977|issue:45229", + "issue:44568|issue:45491", + "issue:43335|issue:45072", + "issue:40444|issue:43526", + "issue:43335|issue:45593", + "issue:43720|issue:44112", + "issue:44448|issue:44977" + ], + "prepared_review_unit_hash": "afe43b7a3fa0947d1c04b55f7eef29233597301c101d106f9fef07373a4a78d1", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 35, + "serialized_chars": 37750, + "estimated_input_tokens": 9438, + "estimated_eval_tokens": 19132 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most pairs are related only at a broad subsystem level and should not be merged. The only clear duplicate in this cluster is the repeated Qwen3.5 MoE `from_pretrained` regression (`issue:45310` / `issue:45478`).", + "confidence": 0.94, + "canonical_issue_reason": "`issue:45310` is the earliest report of the same Qwen3.5 MoE `from_pretrained` failure and appears to be the primary discussion thread for that bug.", + "canonical_pr_reason": null, + "best_issue_reason": "`issue:45310` is the best canonical issue candidate because it is the first/central report of the exact same error as `issue:45478`.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Both are import/compatibility complaints, but they point to different failures: a missing flash-attention symbol vs broad Torch 2.10 incompatibility." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "Different concrete bugs: OmDet-Turbo input-size mismatch versus LwDetrImageLoss AMP/CUDA crash." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "Unrelated code paths: DAC latent reconstruction mismatch vs a GraniteSpeech CI device-override test." + }, + { + "left": "issue:44857", + "right": "issue:45593", + "accept": false, + "reason": "Both touch detection training, but one is an AMP/CUDA loss crash and the other is missing auxiliary losses when denoising is off." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "Different problems: dtype drift in evaluation versus a pipeline error-message regression about translation support." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Audio chat-template sampling-rate handling is not the same bug as batched video processor shape handling." + }, + { + "left": "issue:45200", + "right": "issue:45588", + "accept": false, + "reason": "Different models and failures: Gemma 4 token-type defaults vs flash-attention crash on sink-less models." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "CPU offload device mismatch in Bark is unrelated to the qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Same titled Qwen3.5 MoE `from_pretrained` error with the same transformers>=5.4.0 regression." + }, + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Pegasus v4/v5 output drift and qwen3_5 recurrent-state reset are distinct correctness issues." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Flash Attention 2 test failures and a Trainer `_gpu_broadcast_one` NoneType error are different bugs." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they involve different missing symbols and different root causes." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "SwitchTransformers sparse-layer construction and Bamba torch.compile/SDPA failure are unrelated code-paths." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "SigLIP2 docs/quantization problems are not the same as BeitImageProcessorFast `reduce_labels` returning only one label." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "General v5 `base_model_prefix` loading breakage is different from Qwen3-VL weight-shape mismatch." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "DAC latent reconstruction mismatch and unexpected position-id keys are separate model behaviors." + }, + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Both are Gemma/Qwen-VL-related, but one is `mm_token_type_ids` defaults and the other is video `vision_position_ids`." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "Sparse-layer config bug and BigBird mask-token registration are different tokenizer/model issues." + }, + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "LayoutLMv3 box-content error messaging is unrelated to the Trainer GPU broadcast NoneType failure." + }, + { + "left": "issue:44292", + "right": "issue:45520", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime failure and Python 3.13 `flash_attn` import lookup are different problems." + }, + { + "left": "issue:44448", + "right": "issue:44779", + "accept": false, + "reason": "Pegasus output differences and Deepseek tokenizer regressions are separate model/tokenizer issues." + }, + { + "left": "issue:43335", + "right": "issue:45083", + "accept": false, + "reason": "SwitchTransformers sparse-layer config and qwen3_omni_moe feature-length helper behavior do not share the same bug." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "Python 3.13 `@torch.jit.script` parsing failure is unrelated to the qwen3_omni_moe helper function." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 docs/quantization errors are not the same as widespread unexpected position-id key warnings." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "Sparse-layer creation and BEiT semantic-segmentation legacy-field loading are different bugs." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure and GraniteSpeech stale CI device override are unrelated." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "Unexpected zero probabilities in SigLIP2 and unexpected position-id keys are different symptoms with different causes." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "Tokenizer loading in v5 is unrelated to an MPS attention correctness issue." + }, + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Qwen3.5 flash-attention generation issues and Gemma4 multi-GPU CUDA OOM are not the same failure." + }, + { + "left": "issue:44568", + "right": "issue:45491", + "accept": false, + "reason": "mDeBERTa special-token handling and Gemma3 NaN embeddings from sliding-window attention are different bugs." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "SwitchTransformers sparse-layer config and dtype mismatches in bfloat16 inference are unrelated." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Qwen2.5-VL iterable-dataset multi-image finetuning failure is not the same as BeitImageProcessorFast label reduction." + }, + { + "left": "issue:43335", + "right": "issue:45593", + "accept": false, + "reason": "SwitchTransformers sparse-layer creation and D-FINE auxiliary-loss suppression are distinct issues." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "BitNet packed-weight unpacking during accelerate loading is unrelated to the GraniteSpeech CI device-override failure." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Pegasus v4/v5 output drift and Qwen3.5 flash-attention generation are different model-specific regressions." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The canonical match is well grounded: `issue:45310` and `issue:45478` have the same title, same version context, and `45310` is earlier. The soft-edge verdicts are generally conservative and supported by the packet metadata, with reasons that mostly rely on clearly different titles/codes paths rather than assuming deeper evidence. I don\u2019t see an overstatement that would warrant rejection." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 23, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:25251", + "issue:29942", + "issue:32090", + "issue:33357", + "issue:34689", + "issue:35141", + "issue:37428", + "issue:41720", + "issue:43065", + "issue:43066", + "issue:43278", + "issue:43329", + "issue:43412", + "issue:43421", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43873", + "issue:43927", + "issue:43976", + "issue:43994", + "issue:44186", + "issue:44448", + "issue:44451", + "issue:44462", + "issue:44466", + "issue:44493", + "issue:44743", + "issue:44857", + "issue:44977", + "issue:44987", + "issue:45072", + "issue:45083", + "issue:45198", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45406", + "issue:45412", + "issue:45440", + "issue:45459", + "issue:45491", + "issue:45520", + "issue:45563", + "issue:45588", + "issue:45698" + ], + "soft_pairs": [ + "issue:43066|issue:44462", + "issue:34689|issue:35141", + "issue:44977|issue:45563", + "issue:43927|issue:45698", + "issue:45372|issue:45588", + "issue:37428|issue:44987", + "issue:43606|issue:44857", + "issue:44857|issue:45072", + "issue:43701|issue:45083", + "issue:43976|issue:45440", + "issue:45245|issue:45563", + "issue:43412|issue:45588", + "issue:43550|issue:45083", + "issue:43873|issue:44493", + "issue:29942|issue:35141", + "issue:43873|issue:45229", + "issue:43278|issue:43329", + "issue:45372|issue:45459", + "issue:45372|issue:45406", + "issue:32090|issue:33357", + "issue:41720|issue:45229", + "issue:43065|issue:43994", + "issue:44448|issue:44466", + "issue:43065|issue:45412", + "issue:44451|issue:45520", + "issue:25251|issue:32090", + "issue:43653|issue:44743", + "issue:44857|issue:45588", + "issue:43421|issue:45491", + "issue:44186|issue:45198", + "issue:43994|issue:45412" + ], + "prepared_review_unit_hash": "1f53ae72546a6296445f14374aab2a46e7f8f765b42b2089ba9c98d72d20e5fa", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 31, + "serialized_chars": 36509, + "estimated_input_tokens": 9128, + "estimated_eval_tokens": 18512 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is heterogeneous and does not form a real duplicate cluster. The soft edges are mostly broad thematic similarities (tokenizers, flash-attn, save/load, dtype/offload), but they do not look like the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Both touch tokenizer loading, but one is a v5 decoder-type issue and the other is AutoTokenizer ignoring tokenizer.json; different root causes." + }, + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Different failures: model loading for Llama 3.2 vision vs output embedding reinit after resize in post_init." + }, + { + "left": "issue:44977", + "right": "issue:45563", + "accept": false, + "reason": "Generation with flash-attention vs a stale paged-generate warning; not the same bug." + }, + { + "left": "issue:43927", + "right": "issue:45698", + "accept": false, + "reason": "Both involve save/load behavior, but one loses custom token IDs and the other loads the wrong custom module." + }, + { + "left": "issue:45372", + "right": "issue:45588", + "accept": false, + "reason": "Different subsystems and symptoms: Gemma 4 processor import/loading vs flash_attention.py crashing on s_aux=None." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "Shared flash-attn context, but one is a missing internal import and the other is a model load failure." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "Both are device/dtype crashes, but they affect different models and code paths." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype mismatches, but the affected models and failure mechanics are unrelated." + }, + { + "left": "issue:43701", + "right": "issue:45083", + "accept": false, + "reason": "Checkpoint key mismatch is unrelated to the qwen3_omni_moe feature-extract length helper bug." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Environment compatibility issue vs model implementation divergence; not the same underlying defect." + }, + { + "left": "issue:45245", + "right": "issue:45563", + "accept": false, + "reason": "Unrelated bugs: category-count runtime limit vs a stale generate warning." + }, + { + "left": "issue:43412", + "right": "issue:45588", + "accept": false, + "reason": "Export-time guard/erfinv problem is unrelated to the sink-less flash_attention AttributeError." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "Different code paths: torch.compile+SDPA on Bamba vs a helper-function behavior issue." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Quantization/offload behavior and unexpected position-id keys are different bugs." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "FlashAttention test failures and embedding resize reinitialization are unrelated regressions." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Both are memory-related at a high level, but offloading/quantization and multi-GPU OOM are distinct problems." + }, + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Training/evaluate dtype drift is unrelated to undefined video-token helper variables." + }, + { + "left": "issue:45372", + "right": "issue:45459", + "accept": false, + "reason": "Both are tokenizer/processor loading adjacent, but one is a missing dependency import and the other is exception masking." + }, + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Both affect Gemma 4 processor loading, but they fail at different points with different causes." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Trainer distributed broadcast error and MacOS bus error are unrelated." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Both are CUDA/multi-GPU failures, but one is a cudaErrorAssert and the other is OOM." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Different model-specific bugs: dummy Conv2d in Sam3PixelDecoder vs nonsensical outputs in Siglip2." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Both are v4/v5 regressions, but one changes generation output and the other is serialization of lm_head.weight." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "One is a placeholder module issue, the other is a memory leak on RT-DETR; unrelated." + }, + { + "left": "issue:44451", + "right": "issue:45520", + "accept": false, + "reason": "Model load failure for ScandiBERT vs Python 3.13 flash_attn import KeyError are different problems." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Pipeline output shape regression and a trainer broadcast type error are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "Tokenizer special-token registration bug vs recurrent cache state reset bug; different subsystems." + }, + { + "left": "issue:44857", + "right": "issue:45588", + "accept": false, + "reason": "Both are runtime crashes, but one is AMP/dtype-related and the other is a flash_attention auxiliary-state None crash." + }, + { + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "Special-token post-processor updates and Gemma3 NaN embeddings are unrelated." + }, + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "Different tokenizer/model families and different failure modes." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "Inference correctness regression and a model memory leak are not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 24, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:30064", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:35141", + "issue:36010", + "issue:39692", + "issue:42915", + "issue:42994", + "issue:43232", + "issue:43257", + "issue:43278", + "issue:43334", + "issue:43381", + "issue:43388", + "issue:43526", + "issue:43531", + "issue:43653", + "issue:43701", + "issue:43756", + "issue:43761", + "issue:43824", + "issue:43931", + "issue:44079", + "issue:44186", + "issue:44206", + "issue:44265", + "issue:44279", + "issue:44361", + "issue:44442", + "issue:44464", + "issue:44479", + "issue:44610", + "issue:44871", + "issue:44977", + "issue:45072", + "issue:45103", + "issue:45198", + "issue:45229", + "issue:45245", + "issue:45372", + "issue:45412", + "issue:45440", + "issue:45491", + "issue:45593" + ], + "soft_pairs": [ + "issue:42915|issue:42994", + "issue:43278|issue:43381", + "issue:43756|issue:45440", + "issue:44361|issue:45198", + "issue:44206|issue:44479", + "issue:44265|issue:45198", + "issue:44871|issue:44977", + "issue:43257|issue:43278", + "issue:44610|issue:45412", + "issue:29942|issue:30064", + "issue:43824|issue:44279", + "issue:45103|issue:45372", + "issue:45440|issue:45593", + "issue:29127|issue:34567", + "issue:33357|issue:39692", + "issue:32090|issue:33290", + "issue:43334|issue:43531", + "issue:45072|issue:45229", + "issue:43526|issue:44186", + "issue:43653|issue:44442", + "issue:43381|issue:43701", + "issue:44464|issue:45491", + "issue:44977|issue:45245", + "issue:29942|issue:36010", + "issue:29127|issue:33357", + "issue:42915|issue:43931", + "issue:29127|issue:35141", + "issue:43232|issue:43388", + "issue:43761|issue:44079" + ], + "prepared_review_unit_hash": "c06e97cb6588923f6962a1fbc7af4e35cfee6a9fbb66eccdec22620b83e0bf37", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36446, + "estimated_input_tokens": 9112, + "estimated_eval_tokens": 18480 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the soft pairs span unrelated bugs in tokenizers, generation, training, vision processors, model loading, and docs. I rejected every soft edge because none look like the same underlying defect or fixable in one PR.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue stands out; the items are not duplicates and do not share one underlying bug or code path.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue either: the cluster is too mixed to nominate one issue as a representative duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Different problems: FineGrainedFP8Config failure in Qwen3MoE vs quantized model saving." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "Embedding dtype drift in eval is unrelated to gradient checkpointing being disallowed in eval mode." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Smollm3 RoPE-layer mismatch and DeepseekV3 implementation divergence are different model-specific issues." + }, + { + "left": "issue:44361", + "right": "issue:45198", + "accept": false, + "reason": "MLukeTokenizer task AttributeError and Wav2Vec2 save/tokenization failures affect different tokenization paths." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Unsupported `center` arg in LasrFeatureExtractor is unrelated to Qwen video-input regression." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "`torch.export` with `torch_compilable_check` is a different failure mode than Wav2Vec2 save_pretrained/tokenization." + }, + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "Gemma eos_token_id mismatch is unrelated to Qwen3.5 flash-attention generation issues." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "MoE weight conversion under accelerate+deepspeed is not the same as eval-time embedding dtype changes." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor input-size mismatch and RT-DETR memory not being released are different bugs." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention 2 test failures and void segmentation-map processing are unrelated." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "Missing `Qwen2_5_VLForConditionalGeneration` import is not the same as a generic dependency issue." + }, + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "Auto-docstring annotation handling and mistral_common ReasoningEffort import failures are unrelated." + }, + { + "left": "issue:45440", + "right": "issue:45593", + "accept": false, + "reason": "DeepseekV3 implementation divergence does not match D-FINE auxiliary-loss handling." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 error clarity and TrainerState token-count tracking are different concerns." + }, + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "MacOS CLIP bus error and SigLIP2 documentation mistakes are not the same bug." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "`_gpu_broadcast_one` NoneType error in Trainer is unrelated to Adafactor OOM in deepspeed." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Qwen3-VL pad_token_id load error and Qwen3-MoE sliding_window behavior are separate model issues." + }, + { + "left": "issue:45072", + "right": "issue:45229", + "accept": false, + "reason": "dtype mismatches in inference are unrelated to Gemma4 multi-GPU OOM." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug and LayoutLMv2Tokenizer padding/truncation crash differ in scope and code path." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and AutoTokenizer loading FastSpeech2ConformerTokenizer are distinct tokenizer bugs." + }, + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Gradient checkpointing in eval mode and resume_from_checkpoint key mismatch are unrelated." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency is not the same as Gemma3 NaN embeddings from sliding-window padding." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3.5 flash-attention generation and category-count overflow are different failures." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention 2 CI failures are unrelated to the GenerationMixin import change." + }, + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and CLIP MacOS bus error do not share a code path." + }, + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Qwen3MoE FP8 config and Qwen3-VL weight-shape mismatch are different model-loading bugs." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "Improving a LayoutLMv3 error message is unrelated to embedding reinitialization after resize." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwargs sync issue and metric-gathering label truncation are separate trainer paths." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression and ModelOutput key assignment are different layers of the stack." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 25, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:33290", + "issue:34567", + "issue:34689", + "issue:39692", + "issue:41628", + "issue:42907", + "issue:43262", + "issue:43316", + "issue:43441", + "issue:43452", + "issue:43493", + "issue:43525", + "issue:43526", + "issue:43575", + "issue:43611", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:43723", + "issue:43761", + "issue:43827", + "issue:43873", + "issue:43931", + "issue:43994", + "issue:44077", + "issue:44188", + "issue:44206", + "issue:44387", + "issue:44442", + "issue:44479", + "issue:44610", + "issue:44743", + "issue:44811", + "issue:44857", + "issue:44898", + "issue:44977", + "issue:45003", + "issue:45072", + "issue:45216", + "issue:45290", + "issue:45372", + "issue:45440", + "issue:45479", + "issue:45491", + "issue:45584", + "issue:45588", + "issue:45600", + "issue:45698" + ], + "soft_pairs": [ + "issue:43262|issue:44206", + "issue:43441|issue:45588", + "issue:43526|issue:44857", + "issue:45003|issue:45440", + "issue:43723|issue:45479", + "issue:44442|issue:45072", + "issue:43526|issue:44610", + "issue:44743|issue:44811", + "issue:43452|issue:45588", + "issue:43316|issue:43525", + "issue:43646|issue:44077", + "issue:44479|issue:45290", + "issue:43761|issue:44188", + "issue:45588|issue:45600", + "issue:44977|issue:45584", + "issue:43575|issue:43701", + "issue:43931|issue:45216", + "issue:42907|issue:45698", + "issue:39692|issue:45491", + "issue:43575|issue:44387", + "issue:43994|issue:44206", + "issue:43873|issue:45588", + "issue:34689|issue:43611", + "issue:43673|issue:44743", + "issue:44442|issue:44898", + "issue:33290|issue:34567", + "issue:43493|issue:43827", + "issue:41628|issue:45372" + ], + "prepared_review_unit_hash": "f43e9c893ed6354abeb45208208a10adb6e8b0d7ccb0a72bc64646abf945ae3b", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 28, + "serialized_chars": 36167, + "estimated_input_tokens": 9042, + "estimated_eval_tokens": 18340 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: it mixes unrelated Transformers bugs around tokenizers, vision/audio processors, model loading, flash-attention, quantization, and save/load regressions. None of the soft pairs look like the same underlying defect, so no duplicates are accepted.", + "confidence": 0.92, + "canonical_issue_reason": "No true duplicate canonical exists for the whole cluster; if one issue must represent it, issue 43873 is the broadest and most active recent regression report, but it is not a duplicate of the others.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43873 is the best overall representative because it is open, broad, and has the most cluster-like activity/inbound references; it is still only a representative, not a merge point for the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sampling-rate bug vs. LasrFeatureExtractor center-arg crash; different components and failure modes." + }, + { + "left": "issue:43441", + "right": "issue:45588", + "accept": false, + "reason": "Both mention flash attention, but one is a Ministral/Transformers v5 RC generation bug and the other is an AttributeError on sink-less models; not the same code path." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug vs. LwDetrImageLoss AMP crash; unrelated processor vs. loss issues." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "sys.modules access in modeling_utils is unrelated to DeepseekV3MoE divergence from remote implementation." + }, + { + "left": "issue:43723", + "right": "issue:45479", + "accept": false, + "reason": "Tokenizer loading regression vs. zero-loss sequence-classification bug; different APIs and root causes." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "FastSpeech2ConformerTokenizer loading failure vs. bfloat16 dtype mismatch in model inference; not the same bug." + }, + { + "left": "issue:43526", + "right": "issue:44610", + "accept": false, + "reason": "Both are processor-related, but one is reduce_labels returning a single label and the other is an image size mismatch in OmDet-Turbo." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Qwen3.5 recurrent-state reset in cached generation is unrelated to Whisper batch_decode skipping special tokens." + }, + { + "left": "issue:43452", + "right": "issue:45588", + "accept": false, + "reason": "gguf_file breaks tokenizer/model loading, whereas this is a flash-attention AttributeError on s_aux=None; different subsystems." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Two API discrepancies, but for different configs and missing fields; not one shared bug." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "General custom model init regression vs. patchtsmixer post_init API restriction; similar theme, different concrete issue." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "Video-input regression for Qwen models vs. apply_chat_template crash on tool-call assistant messages; unrelated code paths." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression vs. torch.compile attention-kernel divergence; distinct defects." + }, + { + "left": "issue:45588", + "right": "issue:45600", + "accept": false, + "reason": "Flash-attention crash on sink-less models vs. auto_mappings.py referencing removed configs; different failure causes." + }, + { + "left": "issue:44977", + "right": "issue:45584", + "accept": false, + "reason": "Qwen3.5 flash-attention generation bug vs. Whisper empty-transcription generation failure after align_special_tokens; not the same model or path." + }, + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "Tensor-parallel load OOM vs. resume_from_checkpoint key mismatch; unrelated training/loading problems." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Qwen3-VL shape mismatch on load vs. Qwen3.5 save_pretrained checkpoint corruption; load-time vs save-time issues." + }, + { + "left": "issue:42907", + "right": "issue:45698", + "accept": false, + "reason": "Dequantized save failure vs. wrong custom module loaded after save_pretrained; both involve persistence, but not the same bug." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "SigLIP2 docs-example errors vs. Gemma3 NaN embeddings with mixed-length batching; unrelated models and symptoms." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both end in OOM, but one is tensor-parallel loading and the other is increased reserved memory under int4 quantization; different root causes." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 nonsensical outputs vs. LasrFeatureExtractor crash on unsupported center arg; not the same underlying defect." + }, + { + "left": "issue:43873", + "right": "issue:45588", + "accept": false, + "reason": "Quantization/offloading behavior issue vs. flash-attention AttributeError; no concrete shared code path." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Both are model-loading regressions, but one is a Llama 3.2 vision model breakage and the other is a base_model_prefix-related v5 loading regression." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Missing GenerationMixin cache during chunked_prefill vs. recurrent state reset in modular_qwen3_5; distinct generation bugs." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "Tokenizer loading failure vs. Perceiver non-default-resolution image-classification failure; different model families and APIs." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "DeepSpeed adafactor OOM vs. TrainerState num_input_tokens_seen not updating; unrelated training subsystems." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "SigLIP2 implementation mismatch vs. docs still referencing pipeline() after removals; model behavior vs documentation." + }, + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "AutoImageProcessor import failure vs. ReasoningEffort import failure breaking Gemma 4 processor loading; different import paths and packages." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 26, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:33290", + "issue:34689", + "issue:35141", + "issue:36010", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43295", + "issue:43316", + "issue:43381", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43643", + "issue:43688", + "issue:43716", + "issue:43720", + "issue:43742", + "issue:43749", + "issue:43761", + "issue:43825", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:44077", + "issue:44162", + "issue:44190", + "issue:44442", + "issue:44479", + "issue:44496", + "issue:44514", + "issue:44617", + "issue:44792", + "issue:44855", + "issue:44871", + "issue:44877", + "issue:44913", + "issue:45127", + "issue:45137", + "issue:45200", + "issue:45584", + "issue:45656" + ], + "soft_pairs": [ + "issue:43381|issue:43873", + "issue:44077|issue:44479", + "issue:33290|issue:34689", + "issue:33290|issue:35141", + "issue:43643|issue:44913", + "issue:44190|issue:44496", + "issue:43749|issue:43931", + "issue:43720|issue:44442", + "issue:43873|issue:44871", + "issue:33290|issue:45656", + "issue:43316|issue:44877", + "issue:44162|issue:44792", + "issue:34689|issue:36010", + "issue:44855|issue:45137", + "issue:43540|issue:44514", + "issue:43381|issue:43856", + "issue:42175|issue:43316", + "issue:43404|issue:43441", + "issue:41950|issue:43475", + "issue:43316|issue:43716", + "issue:43388|issue:43688", + "issue:43381|issue:43742", + "issue:16998|issue:22355", + "issue:25251|issue:28282", + "issue:43295|issue:43825", + "issue:43065|issue:44617", + "issue:43761|issue:44479", + "issue:44871|issue:45584", + "issue:45127|issue:45200" + ], + "prepared_review_unit_hash": "d2b7d1500c9632866b5191fac037f301dde312f975b52aaa9f05dcafa5bde905", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36315, + "estimated_input_tokens": 9079, + "estimated_eval_tokens": 18414 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs mostly share only broad Transformers subsystems or model families, not the same concrete bug/change. I would not merge any of the soft edges.", + "confidence": 0.9, + "canonical_issue_reason": "issue:43749 is the broadest, most high-traffic regression in the set (many comments and inbound refs) and is the best anchor if a single issue must represent the cluster, though the items are not true duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 has the strongest signs of being the most impactful and widely discussed issue in this list, so it is the best general issue candidate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Different bugs: eval-mode gradient checkpointing vs quantization/offloading behavior." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "Different model/tooling problems; no shared code-path or symptom." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "Deepspeed Adafactor OOM is unrelated to Llama 3.2 model-loading regression." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Training OOM vs embedding reinit bug; unrelated failure modes." + }, + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Remote-code config field loss is unrelated to GPTNeoX rotary_pct reload behavior." + }, + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Dataset-loading script issue vs model auto-detection/config error; different surfaces." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "Different loading regressions: FSDP CPU RAM efficient loading vs shape-mismatch on Qwen3-VL." + }, + { + "left": "issue:43720", + "right": "issue:44442", + "accept": false, + "reason": "BitNet accelerate-loading packing bug is unrelated to tokenizer class loading." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading issue vs Gemma eos_token configuration mismatch." + }, + { + "left": "issue:33290", + "right": "issue:45656", + "accept": false, + "reason": "Both involve deepspeed/optimizer, but the concrete bug is different (OOM vs double optimizer step)." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "API discrepancy in Gemma3TextConfig is not the same as strict config rejection for granite_speech." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "ESM2 model breakage and janus image-generation test failure are unrelated." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "Model-loading regression and GenerationMixin import error are different import paths and fixes." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Python 3.13 TorchScript parse error is unrelated to DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Both are Qwen video-related, but one is video input processing and the other is batched chat-template padding behavior." + }, + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "Gradient-checkpointing eval-mode bug is unrelated to Qwen3 MoE memory usage." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Backend packaging dependency issue is unrelated to Gemma3TextConfig API inconsistency." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "Mistral3 lm_head tying bug and Ministral FlashAttention failure are distinct code paths." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Video-classification pipeline processor lookup is unrelated to Sam3VisionEncoderOutput missing attribute." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "Config API mismatch and image-preprocessor dtype mismatch are different problems." + }, + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Metrics label-truncation bug is unrelated to auxiliary-loss normalization." + }, + { + "left": "issue:43381", + "right": "issue:43742", + "accept": false, + "reason": "Gradient-checkpointing eval-mode issue is unrelated to MobileLLM key error on load." + }, + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "Model max length question and missing transformers.onnx module are unrelated." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Pipeline top_k output-shape bug is unrelated to missing PyTorch import." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "Custom processor/tokenizer regression is unrelated to the v5 translation-task error message." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "Dummy Conv2d implementation question is unrelated to Sam3Video OOM." + }, + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "Hidden-states regression in CLIPVisionModel is unrelated to Qwen video-input regression." + }, + { + "left": "issue:44871", + "right": "issue:45584", + "accept": false, + "reason": "EOS-token config inconsistency is unrelated to Whisper empty-transcription generation failure." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "LoRA merge collapse with tied embeddings is unrelated to Gemma 4 mm_token_type_ids defaults." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 27, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:42617", + "issue:43065", + "issue:43295", + "issue:43452", + "issue:43550", + "issue:43575", + "issue:43606", + "issue:43646", + "issue:43701", + "issue:43742", + "issue:43761", + "issue:43827", + "issue:43906", + "issue:43931", + "issue:43994", + "issue:44060", + "issue:44077", + "issue:44190", + "issue:44242", + "issue:44464", + "issue:44479", + "issue:44496", + "issue:44521", + "issue:44534", + "issue:44610", + "issue:44617", + "issue:44734", + "issue:44792", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44936", + "issue:44964", + "issue:44977", + "issue:45127", + "issue:45245", + "issue:45290", + "issue:45357", + "issue:45362", + "issue:45381", + "issue:45406", + "issue:45563", + "issue:45584", + "issue:45600", + "issue:45698" + ], + "soft_pairs": [ + "issue:44077|issue:44534", + "issue:44936|issue:45698", + "issue:44734|issue:45406", + "issue:43295|issue:44871", + "issue:43550|issue:43701", + "issue:43606|issue:43701", + "issue:43646|issue:44913", + "issue:43452|issue:44496", + "issue:43761|issue:44242", + "issue:43931|issue:44060", + "issue:43575|issue:43742", + "issue:44617|issue:45584", + "issue:45563|issue:45584", + "issue:42617|issue:43575", + "issue:43906|issue:45290", + "issue:44521|issue:45381", + "issue:44479|issue:44734", + "issue:44933|issue:45362", + "issue:32090|issue:35141", + "issue:41720|issue:43575", + "issue:43065|issue:44610", + "issue:44464|issue:44871", + "issue:44464|issue:45563", + "issue:43827|issue:43994", + "issue:44792|issue:44977", + "issue:44534|issue:44913", + "issue:41720|issue:45245", + "issue:44190|issue:44964", + "issue:44792|issue:45600", + "issue:45127|issue:45357" + ], + "prepared_review_unit_hash": "dd11201ce7ea6765bf191ef8b2fd6b72b0af9db110fc6a561e5700dfe8074836", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36570, + "estimated_input_tokens": 9143, + "estimated_eval_tokens": 18542 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs mostly share broad Transformers/model-loading keywords but describe different failures, models, or code paths. I don\u2019t see a safe canonical issue or any PR candidate here.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "Different bugs: optional post_init API change vs non-persistent buffer corruption." + }, + { + "left": "issue:44936", + "right": "issue:45698", + "accept": false, + "reason": "Different code paths: train/evaluate state bug vs wrong custom module loaded after save_pretrained." + }, + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "One is KV-cache indexing in serve responses; the other is a missing _tokenizer attribute on Gemma4Processor." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "Different regressions: processor.tokenizer access/images handling vs eos_token_id mismatch." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "Unrelated failures: torch.compile+SDPA on Bamba vs resume_from_checkpoint key mismatch." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "Device-mismatch with CPU offload is unrelated to checkpoint key mapping." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom model initialization breakage is not the same as GPTNeoXConfig rotary_pct reload drift." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "gguf_file loading issue is different from an unrecognized-model config.json error." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "CLIP hidden_states regression and MoE load-balancing loss omission are different issues." + }, + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Weight-shape mismatch on model load is not the same as a bad tied-weights warning." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "tp OOM on Qwen2-57B is unrelated to a MobileLLM loading KeyError." + }, + { + "left": "issue:44617", + "right": "issue:45584", + "accept": false, + "reason": "CUDA OOM in Sam3Video is unrelated to Whisper empty-transcription generation failure." + }, + { + "left": "issue:45563", + "right": "issue:45584", + "accept": false, + "reason": "A stale warning about num_return_sequences is not the same as a generation crash." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "3d_parallel.py runtime failure and tensor-parallel OOM are distinct problems." + }, + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "A reproduction of an older issue is not enough to show the same apply_chat_template/tool-call crash." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "Both are multimodal, but assistant_masks being zero is a different bug from vision_position_ids being wrong." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression in Qwen VL models is unrelated to KV-cache tensor indexing in serve." + }, + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Missing image_utils import and Qwen3.5 chat crash are unrelated failures." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer broadcast TypeError is not the same as output embedding reinitialization after resize." + }, + { + "left": "issue:41720", + "right": "issue:43575", + "accept": false, + "reason": "Auto device-mapping cudaErrorAssert on A800 is a different symptom than tp OOM." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and OmDet-Turbo size mismatch are different model/processor bugs." + }, + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Chunked generation with compiled forward is unrelated to eos_token_id config inconsistency." + }, + { + "left": "issue:44464", + "right": "issue:45563", + "accept": false, + "reason": "Inconsistent compiled generation outputs and a stale warning are not the same bug." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "Docs still referencing pipeline() is unrelated to SigLIP2 producing nonsensical outputs." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "Janus image-generation test failure does not match Qwen3.5 flash-attention generation issues." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk in v5 is unrelated to GPTNeoX rotary_pct reload behavior." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3 auto device mapping crash and category-count limit error are different failures." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Local dataset loading in a trainer script is unrelated to loading Phi-4-multimodal-instruct." + }, + { + "left": "issue:44792", + "right": "issue:45600", + "accept": false, + "reason": "A janus test failure is not the same as CI breakage from removed Sam3LiteText configs." + }, + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "LoRA merge collapse with tied embeddings is unrelated to incorrect visual-encoder keys on save_pretrained." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 28, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41628", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43116", + "issue:43295", + "issue:43335", + "issue:43388", + "issue:43404", + "issue:43479", + "issue:43540", + "issue:43550", + "issue:43644", + "issue:43650", + "issue:43761", + "issue:43976", + "issue:44016", + "issue:44077", + "issue:44079", + "issue:44112", + "issue:44186", + "issue:44190", + "issue:44220", + "issue:44279", + "issue:44466", + "issue:44561", + "issue:44683", + "issue:44734", + "issue:44743", + "issue:44855", + "issue:44913", + "issue:44938", + "issue:45216", + "issue:45245", + "issue:45276", + "issue:45375", + "issue:45406", + "issue:45446", + "issue:45542", + "issue:45584", + "issue:45588", + "issue:45600", + "issue:45632" + ], + "soft_pairs": [ + "issue:44938|issue:45542", + "issue:43388|issue:44079", + "issue:43761|issue:44743", + "issue:44855|issue:45588", + "issue:43335|issue:44855", + "issue:43976|issue:44855", + "issue:44077|issue:44079", + "issue:43644|issue:44913", + "issue:43550|issue:44190", + "issue:43540|issue:44734", + "issue:43650|issue:44220", + "issue:44112|issue:45600", + "issue:43116|issue:44190", + "issue:45276|issue:45584", + "issue:39692|issue:44190", + "issue:41950|issue:44016", + "issue:43479|issue:45375", + "issue:44734|issue:45632", + "issue:36246|issue:41628", + "issue:43295|issue:45276", + "issue:43404|issue:45216", + "issue:45245|issue:45584", + "issue:36246|issue:38617", + "issue:44016|issue:44220", + "issue:44561|issue:45632", + "issue:44186|issue:44734", + "issue:42175|issue:44279", + "issue:38175|issue:43065", + "issue:44683|issue:45446", + "issue:45406|issue:45632", + "issue:44466|issue:44913" + ], + "prepared_review_unit_hash": "a87b4e1cf8ccdd44767a454b4e6327dfe42944d022fc0708e4f71b3e4e71daaf", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 31, + "serialized_chars": 36639, + "estimated_input_tokens": 9160, + "estimated_eval_tokens": 18576 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the soft-similarity pairs mostly share broad transformer/regression vocabulary but do not describe the same concrete bug or change. No single issue stands out as a reliable duplicate canonical.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44938", + "right": "issue:45542", + "accept": false, + "reason": "Both mention runtime/backend failures, but one is a Python import/load problem and the other is a TensorFlow backend packaging issue; different code paths." + }, + { + "left": "issue:43388", + "right": "issue:44079", + "accept": false, + "reason": "One is a metrics label-truncation bug and the other is a ModelOutput key-assignment bug; unrelated internals." + }, + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "CLIPVision hidden-state regression and Qwen recurrent-state reset are different model-specific failures." + }, + { + "left": "issue:44855", + "right": "issue:45588", + "accept": false, + "reason": "Python import parsing error vs flash-attention auxiliary-state AttributeError; no shared underlying defect." + }, + { + "left": "issue:43335", + "right": "issue:44855", + "accept": false, + "reason": "SwitchTransformers layer construction bug is unrelated to the DebertaV2 Python 3.13 import parsing issue." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "General Python-version compatibility complaint vs a specific IndentationError on import; not the same bug." + }, + { + "left": "issue:44077", + "right": "issue:44079", + "accept": false, + "reason": "One concerns post_init validation policy, the other ModelOutput key handling; different utilities and fixes." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer initialization and GPTNeoX config reload persistence are separate regressions." + }, + { + "left": "issue:43550", + "right": "issue:44190", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba is unrelated to local dataset loading in an image-classification script." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Video-input processing in Qwen3OmniMoe and KV-cache indexing in transformers serve are different failures." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "The first issue is non-actionable/empty content, the second is an audio feature extraction bug; no duplicate relationship." + }, + { + "left": "issue:44112", + "right": "issue:45600", + "accept": false, + "reason": "A stale CI test in GraniteSpeech and a broken auto_mappings entry for Sam3LiteText are separate maintenance issues." + }, + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "Multi-label classification example output bug and local dataset loading failure are different scripts and failure modes." + }, + { + "left": "issue:45276", + "right": "issue:45584", + "accept": false, + "reason": "Resize-token-embeddings propagation bug in Gemma4 is unrelated to Whisper empty-transcription generation after token alignment." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "Documentation example errors with SigLIP2 are not the same as a no-trainer image-classification dataset-loading problem." + }, + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Video-classification pipeline processor lookup and a notebook syntax error are not the same underlying issue." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Both are multimodal config bugs, but they affect different model families and different missing/defaulted fields." + }, + { + "left": "issue:44734", + "right": "issue:45632", + "accept": false, + "reason": "Serve KV-cache continuation indexing and trust_remote_code cache-path collisions are unrelated server/import behaviors." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but one is a missing model-specific image processor export and the other is a missing top-level AutoImageProcessor export." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Processor/tokenizer regression in v4.57.5 and Gemma4 embedding-resize propagation are different code paths." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Mistral3 tied-weight serialization and Qwen3.5 save_pretrained corruption are both checkpointing-related but not the same concrete defect." + }, + { + "left": "issue:45245", + "right": "issue:45584", + "accept": false, + "reason": "Category-cardinality error and Whisper empty-transcription behavior are unrelated model/runtime failures." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Different missing symbols from different modules; these are separate import breakages." + }, + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "A notebook syntax error is unrelated to the fbank feature extraction bug." + }, + { + "left": "issue:44561", + "right": "issue:45632", + "accept": false, + "reason": "Removal of an internal API affecting trust_remote_code and a local-model cache collision are different regressions." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Tokenizer padding/truncation crash and serve KV-cache tensor indexing are different subsystems." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "TensorFlow backend installation issue and a generic dependency issue are too vague and not the same underlying bug." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "SigLIP2 zero probabilities and Sam3PixelDecoder dummy Conv2d are unrelated model bugs." + }, + { + "left": "issue:44683", + "right": "issue:45446", + "accept": false, + "reason": "Compiled flex_attention failure on new torch and an incorrect version check for AuxRequest import are separate flex_attention problems." + }, + { + "left": "issue:45406", + "right": "issue:45632", + "accept": false, + "reason": "Gemma4Processor missing _tokenizer in serve and trust_remote_code cache-path collisions are unrelated failures." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Serialization of tied lm_head weights and GPTNeoX rotary_pct reload are distinct configuration/serialization bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 29, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:41950", + "issue:42907", + "issue:43295", + "issue:43299", + "issue:43381", + "issue:43441", + "issue:43479", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43673", + "issue:43824", + "issue:43854", + "issue:43901", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44242", + "issue:44246", + "issue:44279", + "issue:44336", + "issue:44351", + "issue:44368", + "issue:44462", + "issue:44496", + "issue:44521", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45003", + "issue:45020", + "issue:45042", + "issue:45081", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45325", + "issue:45335", + "issue:45357", + "issue:45405", + "issue:45584", + "issue:45593", + "issue:45600" + ], + "soft_pairs": [ + "issue:44246|issue:45003", + "issue:43643|issue:44496", + "issue:42907|issue:43441", + "issue:44016|issue:44062", + "issue:44964|issue:45600", + "issue:43295|issue:45335", + "issue:43299|issue:45216", + "issue:43976|issue:44987", + "issue:43673|issue:44336", + "issue:44112|issue:44964", + "issue:43824|issue:44933", + "issue:43531|issue:45600", + "issue:44496|issue:45042", + "issue:43976|issue:44279", + "issue:43854|issue:44964", + "issue:43526|issue:43901", + "issue:45335|issue:45584", + "issue:43299|issue:45357", + "issue:44075|issue:44368", + "issue:44521|issue:45325", + "issue:43381|issue:45593", + "issue:44351|issue:45278", + "issue:43577|issue:45216", + "issue:43976|issue:45405", + "issue:43901|issue:44496", + "issue:41950|issue:45020", + "issue:44242|issue:45200", + "issue:43479|issue:43525", + "issue:44462|issue:45081" + ], + "prepared_review_unit_hash": "fbacfe924c3393a2e28278f83448a387e9f8529c918368b62cad824c44a38a0d", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 29, + "serialized_chars": 35785, + "estimated_input_tokens": 8947, + "estimated_eval_tokens": 18150 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: most items are separate Transformers regressions around loading, configs, imports, or generation, but not the same underlying bug. I rejected every soft edge as a non-duplicate. If one issue must serve as the umbrella representative, issue 45020 is the broadest and most central to the version-regression/loading theme.", + "confidence": 0.73, + "canonical_issue_reason": "Issue 45020 is the best umbrella representative because it is broad, open-ended, and closest to the recurring theme of recent Transformers regressions breaking model loading/remote_code behavior. That said, the set is not a true duplicate cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45020 is the strongest global issue candidate: it has the broadest scope and best matches the common regression theme seen across several other reports, even though the individual bugs are distinct.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "Import slowness vs unsafe sys.modules access; related area, but different failure modes." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "Both involve config/model loading, but one is missing fields under trust_remote_code and the other is model-type resolution." + }, + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Same model family, but save_pretrained on dequantized checkpoints vs a FlashAttention load/runtime failure." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "Notebook syntax error vs AddedToken keyword duplication; unrelated bugs." + }, + { + "left": "issue:44964", + "right": "issue:45600", + "accept": false, + "reason": "Phi-4 multimodal loading issue vs auto_mappings referencing removed configs; different code paths." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Processor/tokenizer regression vs t5gemma embedding resize bug." + }, + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Both are Qwen-related, but one is loading failure and the other is incorrect save_pretrained checkpoint contents." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "Python/version compatibility issue vs a specific model-loading failure." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Generation cache regression vs ANSI formatting in loading_report; unrelated." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "CI test/device override failure vs model loading failure; not the same bug." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Different missing imports from different modules and different APIs." + }, + { + "left": "issue:43531", + "right": "issue:45600", + "accept": false, + "reason": "MoE sliding_window bug vs removed config references in auto_mappings." + }, + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "Model/config resolution failure vs image processor torchvision dependency regression." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "Python support/version issue vs a generic dependency complaint." + }, + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Different models and likely different root causes for the loading failure." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "ImageProcessorFast label reduction bug vs docs/behavior mismatch for return_all_scores." + }, + { + "left": "issue:45335", + "right": "issue:45584", + "accept": false, + "reason": "Embedding resize bug vs Whisper empty-transcription generation bug." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Both involve Qwen3.5, but one is loading and the other is incorrect visual-encoder key saving." + }, + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "Optimizer argument handling vs LoRA warning in Qwen3.5; unrelated." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "Assistant mask generation bug vs rope/position-id scaling bug." + }, + { + "left": "issue:43381", + "right": "issue:45593", + "accept": false, + "reason": "Gradient checkpointing eval-mode restriction vs D-FINE auxiliary-loss omission." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "One specific HybridCache import error vs a broad collection of import regressions; not enough evidence of one fix." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "BLIP2 dtype propagation bug vs Qwen3.5 save_pretrained checkpoint bug." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "Python compatibility issue vs unreleased peft minimum-version bump." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Docs mismatch vs model recognition failure; different bugs." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Video-classification processor lookup bug vs remote_code breakage; only loosely related by model loading." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "MoE load-balancing loss omission vs mm_token_type_ids defaulting; different training-path bugs." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Both are config-related, but they affect different models and different missing/default fields." + }, + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer.json precedence issue vs Mistral regex patch crash; distinct tokenizer-loading bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 30, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:41950", + "issue:42175", + "issue:42994", + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43295", + "issue:43299", + "issue:43317", + "issue:43441", + "issue:43450", + "issue:43650", + "issue:43653", + "issue:43673", + "issue:43723", + "issue:43761", + "issue:43784", + "issue:43854", + "issue:43901", + "issue:43976", + "issue:44016", + "issue:44162", + "issue:44261", + "issue:44263", + "issue:44279", + "issue:44291", + "issue:44297", + "issue:44462", + "issue:44466", + "issue:44496", + "issue:44521", + "issue:44617", + "issue:44623", + "issue:44671", + "issue:44683", + "issue:44749", + "issue:44779", + "issue:44877", + "issue:44898", + "issue:44987", + "issue:45276", + "issue:45290", + "issue:45397", + "issue:45405", + "issue:45542", + "issue:45593" + ], + "soft_pairs": [ + "issue:44683|issue:45542", + "issue:44496|issue:44877", + "issue:44297|issue:44462", + "issue:44279|issue:45405", + "issue:43299|issue:43854", + "issue:41950|issue:43650", + "issue:44462|issue:44779", + "issue:39692|issue:43901", + "issue:43295|issue:44779", + "issue:38175|issue:44617", + "issue:44016|issue:44291", + "issue:43450|issue:44279", + "issue:44261|issue:44263", + "issue:43723|issue:44162", + "issue:42994|issue:43317", + "issue:43065|issue:44898", + "issue:43441|issue:44683", + "issue:43650|issue:45290", + "issue:44466|issue:45276", + "issue:43262|issue:44521", + "issue:44623|issue:45397", + "issue:44623|issue:44749", + "issue:43784|issue:45542", + "issue:43673|issue:43761", + "issue:44671|issue:44779", + "issue:42175|issue:43976", + "issue:43122|issue:43650", + "issue:44987|issue:45542", + "issue:43653|issue:45593" + ], + "prepared_review_unit_hash": "94758f60afa7dba9f63aceae22719ea2335bd5a5b88076861cff68382ef3c34f", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 29, + "serialized_chars": 36269, + "estimated_input_tokens": 9068, + "estimated_eval_tokens": 18392 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous and does not contain any convincing duplicate pairs. The soft edges mostly link unrelated model-loading, tokenizer, processor, quantization, attention, or documentation issues. No PRs are present.", + "confidence": 0.96, + "canonical_issue_reason": "Issue 44521 is the best representative because it is a concrete, user-facing regression in a common multimodal API (`apply_chat_template`) and is still open; the other issues are mostly isolated model- or environment-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44521 is the strongest standalone issue to anchor the cluster: it describes a clear bug, has multiple inbound references, and affects a broadly used multimodal chat-template path.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44683", + "right": "issue:45542", + "accept": false, + "reason": "Flex-attention compilation on torch>=2.9 and missing TensorFlow backend from install extras are unrelated failures." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "One is an unrecognized model/config loading error; the other is strict config rejection for granite_speech. Different code paths and causes." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Tokenizer save metadata mismatch and AutoTokenizer ignoring tokenizer.json are different serialization vs loading bugs." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "A generic dependency issue is not the same as bumping MIN_PEFT_VERSION to an unreleased release." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "These are model-specific loading problems for different models, not the same underlying bug." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "Video pipeline processor lookup and a vague 'ADD THE DATA' issue are not the same concrete problem." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Both involve tokenizers, but one is about repository tokenizer.json being ignored and the other is a Deepseek tokenization regression." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "Both are documentation-related, but they concern different examples and different API mismatches." + }, + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "Custom model processor/tokenizer regression and Deepseek tokenizer incorrect output are distinct bugs." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unexpected zero probabilities in SigLIP2 and Sam3Video CUDA OOM are unrelated symptoms and subsystems." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "Notebook syntax error and init_empty_weights argument handling are unrelated." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Batched video processor shape handling is unrelated to a dependency issue." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "These touch different code paths: MLA layernorm precision vs GlmMoeDsaIndexer torch.split behavior." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "AutoTokenizer loading failure and ESM2 being broken are different model/loading issues." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Quantized model saving failure and dequantized model loading with device_map offload are opposite stages and not the same bug." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and Perceiver non-default resolution failure are different model-specific defects." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "Both mention attention, but Ministral-3 FlashAttention failure and torch>=2.9 flex_attention compilation failure are not the same concrete code-path problem." + }, + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "Unrelated issues: one is not a real technical report, the other is an apply_chat_template crash on tool-call messages." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "Serialization inconsistency for lm_head.weight and resize_token_embeddings not updating embeddings are different behaviors." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Both involve apply_chat_template, but one is audio sample-rate defaulting and the other is multimodal assistant mask generation." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "processor.save_pretrained missing files and gemma-4 zero3 from_pretrained are different load/save failures." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "Missing processor save files and Chinese tokenizer/filtering slowdown are unrelated." + }, + { + "left": "issue:43784", + "right": "issue:45542", + "accept": false, + "reason": "Sentence-transformers import NameError and missing TensorFlow backend due to pip extras are unrelated environment/import issues." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "Chunked-prefill cache handling and CLIPVisionModel hidden_states propagation are different model/runtime bugs." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "CamemBERT masked LM prediction regressions and Deepseek tokenizer regressions affect different models and layers." + }, + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "Missing TensorFlow backend in a torch install and Python version incompatibility in Transformers 5.1.0 are unrelated dependency/environment issues." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Tokenizer output changes across versions and 'ADD THE DATA' are not the same issue." + }, + { + "left": "issue:44987", + "right": "issue:45542", + "accept": false, + "reason": "Model loading failure in transformers>=5.1.0 and missing TensorFlow backend from install extras are unrelated." + }, + { + "left": "issue:43653", + "right": "issue:45593", + "accept": false, + "reason": "BigBirdTokenizer special-token registration and D-FINE auxiliary-loss behavior are unrelated model areas." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 31, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42491", + "issue:42757", + "issue:43295", + "issue:43299", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43526", + "issue:43531", + "issue:43673", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44162", + "issue:44188", + "issue:44220", + "issue:44242", + "issue:44246", + "issue:44295", + "issue:44297", + "issue:44336", + "issue:44351", + "issue:44361", + "issue:44373", + "issue:44462", + "issue:44485", + "issue:44496", + "issue:44561", + "issue:44589", + "issue:44617", + "issue:44661", + "issue:44857", + "issue:45003", + "issue:45200", + "issue:45250", + "issue:45356", + "issue:45362", + "issue:45397", + "issue:45542", + "issue:45561", + "issue:45563", + "issue:45593" + ], + "soft_pairs": [ + "issue:43901|issue:43994", + "issue:44361|issue:45593", + "issue:43824|issue:43976", + "issue:43475|issue:44016", + "issue:44297|issue:45397", + "issue:43525|issue:43881", + "issue:45561|issue:45563", + "issue:44589|issue:45362", + "issue:43352|issue:45250", + "issue:44188|issue:44336", + "issue:41628|issue:43976", + "issue:43295|issue:44617", + "issue:42491|issue:43299", + "issue:44336|issue:44373", + "issue:41950|issue:44220", + "issue:43901|issue:44246", + "issue:44242|issue:45593", + "issue:44295|issue:45397", + "issue:44462|issue:45356", + "issue:44661|issue:45542", + "issue:45003|issue:45561", + "issue:43531|issue:45362", + "issue:44246|issue:44496", + "issue:36246|issue:42757", + "issue:36246|issue:44351", + "issue:43673|issue:44561", + "issue:44336|issue:44485", + "issue:43526|issue:45003", + "issue:44162|issue:45200", + "issue:44242|issue:44857" + ], + "prepared_review_unit_hash": "dd78eee91f823a0d7865dff04c4720e6e161b93603e7f484daaa65e9e1639c26", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36322, + "estimated_input_tokens": 9081, + "estimated_eval_tokens": 18418 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly false-positive similarity: it mixes unrelated import/export errors, model-loading regressions, pipeline bugs, docs/test issues, and performance complaints. None of the soft-linked pairs look like the same underlying defect tightly enough to merge.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue is safe here; the items are too heterogeneous to collapse into one duplicate bucket.", + "canonical_pr_reason": null, + "best_issue_reason": "No clear best issue for the whole cluster; at most there are a few small topical groups, but they do not form one duplicate chain.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs mismatch for return_all_scores is unrelated to a SigLIP2 model output-quality bug." + }, + { + "left": "issue:44361", + "right": "issue:45593", + "accept": false, + "reason": "MLukeTokenizer task AttributeError and D-FINE auxiliary-loss behavior are different models and different failures." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "A missing Qwen2.5-VL export and a Python version compatibility issue are unrelated import problems." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "SAM3 video attribute error is unrelated to a notebook syntax error." + }, + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer_class metadata mismatch is not the same bug as gemma-4 zero3 from_pretrained failure." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Both are model-loading failures, but for different model families and likely different code paths." + }, + { + "left": "issue:45561", + "right": "issue:45563", + "accept": false, + "reason": "A pytest-xdist file race and a stale generation warning are unrelated testing/runtime issues." + }, + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Float8 storage lookup failure is unrelated to Qwen3.5 chat crashes." + }, + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "A specific model lacking Flash Attention 2 support is not the same as the general Flash Attention topic." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "torch.compile kernel divergence and stray ANSI codes in loading_report are unrelated." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "AutoImageProcessor import failure and Python version support regression do not share the same root cause." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "processor/tokenizer regression is unrelated to a CUDA OOM in SAM3 Video." + }, + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3 MoE, but one is a LoRA compatibility break and the other is a VL loading regression; too different to merge." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "ANSI output in a utility and a wrong docstring are not duplicates." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "video-classification using image processors is unrelated to _torch_extract_fbank_features()." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "A docs/behavior mismatch is unrelated to intermittent import latency." + }, + { + "left": "issue:44242", + "right": "issue:45593", + "accept": false, + "reason": "Missing load-balancing loss and missing auxiliary losses are both loss-related, but for different models and flags." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "position_ids buffer access and gemma-4 zero3 loading are separate bugs." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "AutoTokenizer ignoring tokenizer.json is not the same as the Kimi-K2.5 codec regression." + }, + { + "left": "issue:44661", + "right": "issue:45542", + "accept": false, + "reason": "TOKENIZER_MAPPING_NAMES handling and missing TensorFlow backend setup are unrelated." + }, + { + "left": "issue:45003", + "right": "issue:45561", + "accept": false, + "reason": "Unsafe sys.modules access and an xdist captured_info race are different failure modes." + }, + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and Qwen3.5 chat crashes are not the same defect." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Import slowness and unrecognized model config are unrelated." + }, + { + "left": "issue:36246", + "right": "issue:42757", + "accept": false, + "reason": "Qwen2.5-VL image processor import and huggingface_hub is_offline_mode import are different API export issues." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Different missing symbols from different modules; same symptom, not same bug." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Chunked prefill cache handling and removal of is_torch_fx_available are unrelated regressions." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "ANSI escaping in loading_report is unrelated to GLM-5 RoPE implementation discussion." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and sys.modules access are unrelated code paths." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 breakage and Gemma 4 mm_token_type_ids defaulting are separate model bugs." + }, + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "Missing router loss and LwDetrImageLoss AMP crash are not the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 32, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42886", + "issue:43065", + "issue:43066", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43408", + "issue:43441", + "issue:43475", + "issue:43540", + "issue:43596", + "issue:43653", + "issue:43673", + "issue:43901", + "issue:44060", + "issue:44077", + "issue:44162", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44368", + "issue:44466", + "issue:44496", + "issue:44734", + "issue:44749", + "issue:44779", + "issue:44871", + "issue:44877", + "issue:44977", + "issue:44987", + "issue:44995", + "issue:45003", + "issue:45137", + "issue:45200", + "issue:45229", + "issue:45292", + "issue:45335", + "issue:45375", + "issue:45397", + "issue:45459", + "issue:45563", + "issue:45584", + "issue:45588", + "issue:45656", + "issue:45701" + ], + "soft_pairs": [ + "issue:44246|issue:44871", + "issue:36246|issue:43596", + "issue:44295|issue:44749", + "issue:43066|issue:44779", + "issue:43441|issue:43540", + "issue:44877|issue:45375", + "issue:44987|issue:45459", + "issue:42886|issue:45229", + "issue:44779|issue:45292", + "issue:41628|issue:41950", + "issue:45292|issue:45584", + "issue:43065|issue:43408", + "issue:45292|issue:45701", + "issue:43232|issue:44336", + "issue:43901|issue:45003", + "issue:44995|issue:45200", + "issue:43257|issue:45137", + "issue:44060|issue:44077", + "issue:44496|issue:45003", + "issue:43475|issue:43540", + "issue:43653|issue:44162", + "issue:43262|issue:44246", + "issue:44995|issue:45588", + "issue:44368|issue:45563", + "issue:44368|issue:44977", + "issue:44466|issue:45335", + "issue:43673|issue:44734", + "issue:45397|issue:45656" + ], + "prepared_review_unit_hash": "7d7774c6553aa55c4c8c3f0f09a10b23bdb9a3d289db3215e6d727d91230e469", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 28, + "serialized_chars": 36189, + "estimated_input_tokens": 9048, + "estimated_eval_tokens": 18352 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster does not look like a true duplicate set: the issues span many unrelated Transformers regressions (tokenizers, generation/cache, model configs, DeepSpeed, SAM3, Gemma, Qwen, etc.). I\u2019d treat them as separate bugs rather than one merged triage bucket.", + "confidence": 0.27, + "canonical_issue_reason": "If one issue must represent the group, issue #44987 is the strongest standalone report: it is open, concrete, user-facing, and clearly tied to a version-regression during model loading. However, the set is too diverse for a confident single canonical issue.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #44987 is the best overall triage candidate because it describes a reproducible failure on a specific package/version boundary and is still open for action. It is more concrete than the broader warning/config reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Unrelated problems: import latency versus Gemma eos-token configuration mismatch. No shared code path or concrete bug." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "Different failures in different subsystems: Qwen2.5-VL import error versus DeepSpeed ZeRO3/BertModel indexing error." + }, + { + "left": "issue:44295", + "right": "issue:44749", + "accept": false, + "reason": "One is about position_ids buffer access, the other about tokenizer/filtering slowdown after upgrade. Not the same bug." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Both concern tokenizers in v5, but one is a wrong decoder-type warning and the other is incorrect DeepSeek tokenization results. Too broad to merge." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "Different model families and failures: Ministral flash-attention issue versus Qwen3OmniMoe video input validation error." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config strictness, but they target different models and different missing fields; not the same underlying bug." + }, + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Different failure modes: model loading regression versus protobuf-related tokenizer error masking. No common code-path bug shown." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU OOM are unrelated." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer correctness regression versus resize_token_embeddings not updating output embeddings; separate issues." + }, + { + "left": "issue:41628", + "right": "issue:41950", + "accept": false, + "reason": "Importing AutoImageProcessor from transformers and a video-classification pipeline using image processors are related only at a broad API level, not the same concrete bug." + }, + { + "left": "issue:45292", + "right": "issue:45584", + "accept": false, + "reason": "Embedding-resize behavior and Whisper empty-transcription generation failure are unrelated." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "Both mention SAM3, but one is a dummy Conv2d implementation detail and the other is a model-type warning. Different bugs." + }, + { + "left": "issue:45292", + "right": "issue:45701", + "accept": false, + "reason": "Resize-token-embedding behavior and version-dependent tokenization changes do not point to the same defect." + }, + { + "left": "issue:43232", + "right": "issue:44336", + "accept": false, + "reason": "Generation kwargs syncing and ANSI escape codes in loading_report are unrelated." + }, + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch for text classification versus unsafe sys.modules access in modeling_utils. Different layers and different problems." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "GlmMoeDsa stale indexer cache on second forward pass is unrelated to Gemma4 text-only fine-tuning token-type defaults." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both involve DeepSpeed/ZeRO3, but one is Qwen3 MOE weight conversion and the other is a deque underflow error. Not the same bug." + }, + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Different model-family warnings: tied-weights warning versus a patchtsmixer post_init validation change." + }, + { + "left": "issue:44496", + "right": "issue:45003", + "accept": false, + "reason": "Unrecognized model/config loading failure and unsafe sys.modules access are not the same underlying issue." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "SAM3 vision encoder output attribute error versus Qwen3OmniMoe video-input ValueError. Different code paths." + }, + { + "left": "issue:43653", + "right": "issue:44162", + "accept": false, + "reason": "BigBirdTokenizer special-token registration issue versus ESM2 broken report. Not the same bug." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio chat-template sampling-rate default and import performance are unrelated." + }, + { + "left": "issue:44995", + "right": "issue:45588", + "accept": false, + "reason": "Different failures in different code paths: stale cache on second forward pass versus flash-attention AttributeError on s_aux=None." + }, + { + "left": "issue:44368", + "right": "issue:45563", + "accept": false, + "reason": "Both are warnings, but one is tied-word-embeddings config warnings and the other is stale num_return_sequences warning. Not the same defect." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "Qwen3.5 flash-attention generation failure is not the same as a tied-weights warning emitted during fine-tuning." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Both involve embedding weights, but one is serialization inconsistency and the other is resize_token_embeddings not propagating to decoder.embed_tokens. Separate bugs." + }, + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Generation cache issues in different contexts (chunked_prefill vs transformers serve KV continuation) without evidence of the same code-path bug." + }, + { + "left": "issue:45397", + "right": "issue:45656", + "accept": false, + "reason": "Gemma4 zero3 from_pretrained and optimizer step being called twice with deepspeed are distinct training/runtime issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 33, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:41628", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:42831", + "issue:42994", + "issue:43232", + "issue:43475", + "issue:43482", + "issue:43525", + "issue:43643", + "issue:43646", + "issue:43673", + "issue:43704", + "issue:43873", + "issue:43931", + "issue:44038", + "issue:44162", + "issue:44188", + "issue:44222", + "issue:44297", + "issue:44361", + "issue:44368", + "issue:44623", + "issue:44661", + "issue:44749", + "issue:44869", + "issue:44933", + "issue:44945", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45137", + "issue:45200", + "issue:45230", + "issue:45278", + "issue:45310", + "issue:45356", + "issue:45362", + "issue:45375", + "issue:45381", + "issue:45397", + "issue:45459", + "issue:45478", + "issue:45600", + "issue:45656" + ], + "soft_pairs": [ + "issue:44933|issue:45459", + "issue:44869|issue:45356", + "issue:45137|issue:45600", + "issue:43931|issue:44368", + "issue:43232|issue:43673", + "issue:43673|issue:45278", + "issue:44297|issue:44623", + "issue:43646|issue:44162", + "issue:44361|issue:44749", + "issue:44623|issue:45656", + "issue:28282|issue:30990", + "issue:44749|issue:45310", + "issue:42994|issue:43482", + "issue:42371|issue:45092", + "issue:44038|issue:44623", + "issue:44749|issue:45656", + "issue:42673|issue:43704", + "issue:43475|issue:43643", + "issue:45230|issue:45397", + "issue:45375|issue:45478", + "issue:43475|issue:45381", + "issue:41628|issue:44933", + "issue:45310|issue:45375", + "issue:45081|issue:45103", + "issue:42994|issue:43525", + "issue:43475|issue:44661", + "issue:45362|issue:45375", + "issue:42831|issue:44222", + "issue:43873|issue:44945", + "issue:43232|issue:44188", + "issue:42222|issue:45200" + ], + "prepared_review_unit_hash": "7ab605b108461052637d9c48a6b6e88052730bbf6b4b698ff8b4ea919d34a1d9", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 36720, + "estimated_input_tokens": 9180, + "estimated_eval_tokens": 18616 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a mix of related-but-distinct Transformers v5 regressions and model-specific breakages (imports, loading, generation, tokenizer, quantization, and save/load issues). I don\u2019t see any safe duplicate merges, and there are no PRs to evaluate.", + "confidence": 0.74, + "canonical_issue_reason": "issue:45278 is the best umbrella representative: it is broad, recent, open, and explicitly covers the v5 upgrade/import-regression theme that many of the other reports orbit around.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the most suitable cluster representative because it captures the widest user-facing symptom set without overfitting to one model family or one narrow code path.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44933", + "right": "issue:45459", + "accept": false, + "reason": "Both involve import-related failures, but one is a missing import from image_utils while the other is tokenizer error masking when protobuf is absent; different bug paths." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-adjacent, but Whisper timestamp decoding crash and Kimi-K2.5 codec/fix_mistral_regex regression are separate code paths and symptoms." + }, + { + "left": "issue:45137", + "right": "issue:45600", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque underflow and auto_mappings CI breakage are unrelated failures; same general release period only." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Qwen3-VL weight-shape mismatch is a model-loading error, while the other is a warning about tie_word_embeddings during LoRA fine-tuning." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "These both mention generation internals, but sync_gpus state handling and missing GenerationMixin cache in chunked prefill are different bugs." + }, + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "Missing generation cache in chunked prefill is not the same as broad import errors after upgrading Transformers." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Tokenizer.save_pretrained metadata mismatch and processor.save_pretrained missing files are both save-related, but they affect different serialization layers." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Custom model initialization breakage under v5 and ESM2 being broken are both migration regressions, but not the same underlying failure." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "MLukeTokenizer AttributeError and Chinese slowdown after upgrading to 5.3.0 are unrelated; one is a hard error, the other is performance regression." + }, + { + "left": "issue:44623", + "right": "issue:45656", + "accept": false, + "reason": "processor.save_pretrained missing files and deepspeed optimizer step called twice are unrelated subsystems and symptoms." + }, + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "AutoModel missing PyTorch is an environment/import error, while Sentence Transformers getting stuck loading is a runtime hang; not the same bug." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "General slowdown after upgrade and Qwen3.5 Moe from_pretrained error are different regressions with different observable failures." + }, + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Quantized model saving failure and Qwen2.5-GGUF loading failure are opposite directions of the save/load pipeline and do not share a concrete bug." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "TF32 settings warning and old InternVL2 remote-code/meta-init incompatibility are unrelated." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe v5 bug and missing processor save files touch different model/component paths; not a single fixable defect." + }, + { + "left": "issue:44749", + "right": "issue:45656", + "accept": false, + "reason": "Performance slowdown after upgrade and optimizer double-step under deepspeed are distinct training-loop issues." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": false, + "reason": "Same title text, but the later report is a separate instance and the evidence here does not show a single shared root cause or fixable code path." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "SAM 3 video missing fpn_position_embeddings and trust_remote_code AutoConfig missing fields are both attribute problems, but in different models and initialization flows." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "Generic bug report and gemma-4 ZeRO3 from_pretrained failure do not describe the same underlying issue." + }, + { + "left": "issue:45375", + "right": "issue:45478", + "accept": false, + "reason": "Qwen3_5MoeVisionConfig missing a config field and Qwen3.5 Moe from_pretrained error overlap on model family, but the reported failures and fix scope differ." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "SAM 3 video output attribute error and qwen2.5-vl vision_position_ids regression are unrelated model-specific bugs." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "AutoImageProcessor import failure and image_utils nonexistent import are both import issues, but they concern different symbols and likely different breakages." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "Both involve Qwen3.5/Moe, but one is a from_pretrained error under transformers>=5.4.0 and the other is a missing config field silently dropped by @strict." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Mistral regex patch crash and auto_docstring annotation handling crash are unrelated utility regressions." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Quantized model saving failure and Llama4Config missing pad_token_id are different parts of the model lifecycle." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "SAM 3 video output attribute error and add-new-model-like failure in TOKENIZER_MAPPING_NAMES are unrelated model-registration issues." + }, + { + "left": "issue:45362", + "right": "issue:45375", + "accept": false, + "reason": "Qwen3.5-35B chat crash and missing deepstack_visual_indexes are both Qwen-family issues, but not the same bug or code path." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "FineGrainedFP8 accuracy regression and FP8 save_pretrained for moe are both FP8-related, but one is numerical behavior and the other is serialization." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Quantization offloading and incorrect output with pipeline parallelism are different distributed-inference problems." + }, + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Generation kwargs update after sync_gpus and attention-kernel divergence under torch.compile are different generation/compile bugs." + }, + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "SAM 3 vitpose breakage and Gemma 4 mm_token_type_ids defaulting are unrelated model-specific issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 34, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:30333", + "issue:30990", + "issue:33290", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42175", + "issue:42898", + "issue:42994", + "issue:43232", + "issue:43278", + "issue:43377", + "issue:43479", + "issue:43504", + "issue:43526", + "issue:43673", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:43874", + "issue:43976", + "issue:44038", + "issue:44060", + "issue:44079", + "issue:44164", + "issue:44188", + "issue:44222", + "issue:44242", + "issue:44351", + "issue:44373", + "issue:44448", + "issue:44496", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44779", + "issue:44792", + "issue:44869", + "issue:44933", + "issue:44964", + "issue:45092", + "issue:45103", + "issue:45125", + "issue:45137", + "issue:45310", + "issue:45356", + "issue:45459", + "issue:45561" + ], + "soft_pairs": [ + "issue:41628|issue:44351", + "issue:44222|issue:45310", + "issue:42994|issue:44164", + "issue:44038|issue:45310", + "issue:43526|issue:45561", + "issue:36010|issue:41628", + "issue:44496|issue:45561", + "issue:43673|issue:44188", + "issue:43377|issue:44242", + "issue:43504|issue:45561", + "issue:43232|issue:44792", + "issue:42898|issue:44568", + "issue:30990|issue:35141", + "issue:43479|issue:44079", + "issue:43479|issue:44869", + "issue:43976|issue:44933", + "issue:42175|issue:44933", + "issue:44964|issue:45137", + "issue:44188|issue:44373", + "issue:44623|issue:45310", + "issue:43784|issue:43824", + "issue:43723|issue:44568", + "issue:43278|issue:45137", + "issue:44060|issue:45125", + "issue:42898|issue:45356", + "issue:44734|issue:45103", + "issue:43874|issue:45092", + "issue:42898|issue:44779", + "issue:30333|issue:33290", + "issue:42898|issue:44448", + "issue:41628|issue:45459" + ], + "prepared_review_unit_hash": "67c65a1377f5048a4986df966a930298283a49ea9145a92b5e04ba330b544ecf", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 31, + "serialized_chars": 36977, + "estimated_input_tokens": 9245, + "estimated_eval_tokens": 18746 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose similarity bucket, not a true duplicate set: the issues span unrelated import errors, tokenizer regressions, model-loading bugs, training/runtime failures, and test infra problems. I found no soft pair that looks like the same underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "issue:42898 is the broadest and most central of the v5 tokenizer-behavior regression reports, so it is the best umbrella representative for this mostly tokenizer/regression-themed subset.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42898 is the most representative issue in the group by scope and recurrence of nearby v5-tokenization similarities, even though it is not a duplicate of the other reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing symbols (`AutoImageProcessor` vs `HybridCache`) and different failure modes." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "Both involve Qwen3.5/MoE serialization/loading, but one is FP8 `save_pretrained` and the other is a `from_pretrained` load error; not the same bug." + }, + { + "left": "issue:42994", + "right": "issue:44164", + "accept": false, + "reason": "Both touch save/load paths, but one is quantized model saving and the other is `extra_state` handling; different concrete issues." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Same broad model family, but different versions and different failure surfaces (`Qwen3-VL-Moe` vs `Qwen3.5 Moe` load error)." + }, + { + "left": "issue:43526", + "right": "issue:45561", + "accept": false, + "reason": "Completely unrelated: BEiT label reduction bug versus a pytest-xdist captured-file race." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "Both are import failures, but they concern different API symbols and separate regressions." + }, + { + "left": "issue:44496", + "right": "issue:45561", + "accept": false, + "reason": "Unrecognized model/config loading has nothing to do with a pytest-xdist file race." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "Both are generation/runtime bugs, but one is missing generation cache in chunked prefill and the other is divergent attention kernels under `torch.compile`." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "Different model subsystems and different bugs: padding-mask correctness in MIMI vs missing load-balancing loss gating." + }, + { + "left": "issue:43504", + "right": "issue:45561", + "accept": false, + "reason": "Model preset loading with a legacy field is unrelated to the test-worker file race." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Different code paths: generation-kwargs update after `sync_gpus` versus a Janus image-generation test failure." + }, + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but they are different concrete bugs: cleanup-space behavior versus missing BOS/EOS insertion for a specific tokenizer." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "Unrelated issues: sentence-transformers loading hang versus token-embedding reinitialization after resize." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "One is multimodal config initialization from `None`; the other is `ModelOutput` key assignment when values were previously `None`." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Different model areas and symptoms: multimodal config defaults vs Whisper timestamp decode crash." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version compatibility and a missing image_utils import are unrelated bugs." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Backend dependency installation does not match a nonexistent import from `image_utils`." + }, + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Phi-4 multimodal loading failure and DeepSpeed ZeRO3 deque underflow are unrelated." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "A torch.compile attention-kernel divergence bug is unrelated to a docstring issue." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Missing files in `processor.save_pretrained` is a different save-path problem than a Qwen3.5 MoE load error." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different missing names in different packages (`nn` vs `Qwen2_5_VLForConditionalGeneration`)." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "Generic tokenizer loading failure and a specific BOS/EOS tokenization regression are not the same bug." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Embedding dtype drift during evaluation is unrelated to a ZeRO3 deque error." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Incorrect tied-weights warning logic and missing tensor-parallel plan metadata are different model implementation bugs." + }, + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer regressions in nearby versions, but one concerns cleanup-space behavior and the other codec handling/warning behavior in a specific tokenizer." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Different layers entirely: KV-cache continuation crash versus an auto-docstring AttributeError with future annotations." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "Both are multimodal/model-init issues, but they affect different models and different failure mechanisms." + }, + { + "left": "issue:30333", + "right": "issue:33290", + "accept": false, + "reason": "MLflow reporting after training failure and DeepSpeed OOM are distinct training-time problems." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "Both relate to v5 output regressions, but Pegasus v4/v5 output mismatch is not the same concrete tokenizer cleanup bug." + }, + { + "left": "issue:41628", + "right": "issue:45459", + "accept": false, + "reason": "Different import-related failures: missing top-level export versus exception masking when protobuf is absent." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 35, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:30333", + "issue:30990", + "issue:34689", + "issue:35141", + "issue:36331", + "issue:38175", + "issue:41628", + "issue:42757", + "issue:42831", + "issue:42886", + "issue:42898", + "issue:42994", + "issue:43295", + "issue:43479", + "issue:43644", + "issue:43723", + "issue:43756", + "issue:43824", + "issue:44038", + "issue:44060", + "issue:44206", + "issue:44222", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44442", + "issue:44483", + "issue:44484", + "issue:44485", + "issue:44514", + "issue:44623", + "issue:44871", + "issue:44913", + "issue:44977", + "issue:44991", + "issue:45092", + "issue:45103", + "issue:45278", + "issue:45310", + "issue:45357", + "issue:45406", + "issue:45459", + "issue:45656" + ], + "soft_pairs": [ + "issue:44295|issue:45656", + "issue:42898|issue:43295", + "issue:44222|issue:44297", + "issue:44297|issue:45656", + "issue:43479|issue:43644", + "issue:44442|issue:45459", + "issue:42831|issue:44623", + "issue:44222|issue:44623", + "issue:44206|issue:45103", + "issue:41628|issue:42757", + "issue:22355|issue:28282", + "issue:44263|issue:44360", + "issue:44297|issue:45310", + "issue:44222|issue:44484", + "issue:30333|issue:36331", + "issue:43723|issue:44977", + "issue:44263|issue:45092", + "issue:44483|issue:45406", + "issue:30333|issue:34689", + "issue:28282|issue:30333", + "issue:30333|issue:35141", + "issue:44295|issue:44623", + "issue:44373|issue:44485", + "issue:44261|issue:45092", + "issue:44514|issue:45103", + "issue:43723|issue:44871", + "issue:43723|issue:44991", + "issue:45092|issue:45357", + "issue:43723|issue:44442", + "issue:43479|issue:44913", + "issue:30990|issue:38175", + "issue:42886|issue:42994", + "issue:41628|issue:43824", + "issue:42831|issue:45310", + "issue:42831|issue:44038", + "issue:44263|issue:44373", + "issue:42757|issue:45278", + "issue:43756|issue:44060" + ], + "prepared_review_unit_hash": "aa86882df512c92b1b789055e5a65928337238463578b94675d3f7928ffec2f0", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 38, + "serialized_chars": 38848, + "estimated_input_tokens": 9712, + "estimated_eval_tokens": 19680 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: most items are separate Transformers regressions, import errors, tokenizer/load/save issues, and a few unrelated model-specific bugs. None of the soft edges look like true duplicate pairs; no PRs are present.", + "confidence": 0.84, + "canonical_issue_reason": "Issue 43723 is the broadest, most central report in the set: a concrete v5 tokenizer-loading regression that best represents the recurring tokenizer/load failure theme.", + "canonical_pr_reason": null, + "best_issue_reason": "43723 is the most useful representative issue because it has a clear repro, a specific regression description, and sits closest to the few other tokenizer-related reports without being too narrow.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44295", + "right": "issue:45656", + "accept": false, + "reason": "Unrelated bugs: buffer read/access error vs optimizer stepping twice under deepspeed." + }, + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "Different regression surfaces: tokenization cleanup behavior vs processor.tokenizer/image handling." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "FP8 save_pretrained/moe issue is unrelated to tokenizer_class metadata mismatch on save." + }, + { + "left": "issue:44297", + "right": "issue:45656", + "accept": false, + "reason": "Tokenizer config mismatch and deepspeed optimizer behavior are unrelated." + }, + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Different initialization bugs: multimodal config defaults vs non-persistent buffer filling." + }, + { + "left": "issue:44442", + "right": "issue:45459", + "accept": false, + "reason": "Tokenizer class loading failure and protobuf exception masking have different root causes." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "FP8 accuracy regression is unrelated to processor.save_pretrained missing files." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "Both involve saving, but one is FP8/moe model saving and the other is processor file persistence." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "Feature extractor crash from unsupported arg is unrelated to auto_docstring annotations failure." + }, + { + "left": "issue:41628", + "right": "issue:42757", + "accept": false, + "reason": "Both are import errors, but they are for different missing symbols with different fixes." + }, + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Missing transformers.onnx and missing PyTorch are separate environment/import problems." + }, + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "Same subsystem, but different DSA indexer bugs with different concrete failures." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Tokenizer_class mismatch on save is not the same bug as Qwen3.5-Moe from_pretrained failure." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "FP8/moe save failure and max_shard_size question are unrelated save_pretrained topics." + }, + { + "left": "issue:30333", + "right": "issue:36331", + "accept": false, + "reason": "MLflow reporting and trainer loss signature errors are unrelated training issues." + }, + { + "left": "issue:43723", + "right": "issue:44977", + "accept": false, + "reason": "Generic tokenizer-load regression and flash-attention generation failure are different code paths." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "DSA indexer torch.split behavior is unrelated to InternVL2 meta-init incompatibility." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "Chat/completions request rejection and Gemma4Processor _tokenizer attribute error are different serving bugs." + }, + { + "left": "issue:30333", + "right": "issue:34689", + "accept": false, + "reason": "MLFlow reporting and Llama 3.2 vision model-loading regression are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:30333", + "accept": false, + "reason": "AutoModel missing PyTorch and MLFlow job status reporting are unrelated." + }, + { + "left": "issue:30333", + "right": "issue:35141", + "accept": false, + "reason": "Training-job reporting and embedding reinitialization bug are unrelated." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "Reading position_ids from a buffer and processor save_pretrained missing files are unrelated." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Docstring issue for position_ids is unrelated to GLM-5 RoPE implementation." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Layernorm eps precision issue is unrelated to old InternVL2 meta-init compatibility." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "Batched apply_chat_template crash is unrelated to auto_docstring future-annotations crash." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Both are transformer loading/save-adjacent, but one is tokenizer loading and the other is eos_token_id configuration." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Both mention tokenizer loading, but they are different model-specific failures and not the same bug." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen/InternVL regressions with different failure modes and code paths." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Generic tokenizer-load regression and FastSpeech2ConformerTokenizer failure are different concrete issues." + }, + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Multimodal config defaulting and GPTNeoX rotary_pct reload are unrelated config bugs." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence Transformers loading hang and SigLIP2 zero-probability issue are unrelated." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Offline tokenizer cache loading and quantized model saving are different failure modes." + }, + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "Different missing imports for different models; not the same underlying export bug." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "FineGrainedFP8 accuracy and Qwen3.5-Moe from_pretrained error are unrelated." + }, + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "FP8 accuracy regression and Qwen3-VL-Moe bug are distinct issues." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "A DSA return-value bug and a docstring bug are not duplicates." + }, + { + "left": "issue:42757", + "right": "issue:45278", + "accept": false, + "reason": "Missing is_offline_mode is one import error; the other is a broad upgrade report with many import failures." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "Smollm3 RoPE-layer bug and Qwen3-Next tied-weights warning are unrelated model-specific changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 36, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:29127", + "issue:30990", + "issue:33357", + "issue:36296", + "issue:39401", + "issue:41628", + "issue:42371", + "issue:42831", + "issue:42886", + "issue:42907", + "issue:43066", + "issue:43116", + "issue:43122", + "issue:43232", + "issue:43408", + "issue:43452", + "issue:43475", + "issue:43576", + "issue:43784", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44117", + "issue:44222", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44485", + "issue:44488", + "issue:44514", + "issue:44521", + "issue:44625", + "issue:44704", + "issue:44743", + "issue:44779", + "issue:44829", + "issue:44945", + "issue:44964", + "issue:45200", + "issue:45245", + "issue:45278", + "issue:45290", + "issue:45593", + "issue:45600" + ], + "soft_pairs": [ + "issue:43232|issue:45593", + "issue:43576|issue:43784", + "issue:43122|issue:44521", + "issue:42886|issue:43066", + "issue:42371|issue:44263", + "issue:44117|issue:44704", + "issue:30990|issue:33357", + "issue:43976|issue:44945", + "issue:44704|issue:45290", + "issue:42907|issue:44945", + "issue:43122|issue:44514", + "issue:44295|issue:44297", + "issue:43232|issue:44743", + "issue:39401|issue:44488", + "issue:43408|issue:43475", + "issue:43232|issue:45245", + "issue:42907|issue:43937", + "issue:41628|issue:45278", + "issue:43784|issue:45600", + "issue:44360|issue:44485", + "issue:39401|issue:44521", + "issue:44261|issue:44373", + "issue:44625|issue:44829", + "issue:43116|issue:45200", + "issue:43122|issue:44779", + "issue:29127|issue:30990", + "issue:42371|issue:42831", + "issue:44038|issue:44297", + "issue:42907|issue:44964", + "issue:44038|issue:44295", + "issue:36296|issue:43116", + "issue:42907|issue:43452", + "issue:44038|issue:44222" + ], + "prepared_review_unit_hash": "afe4d0dcae6791e96074eb5bbc4b7b6adc2ec030c07b76822342ac80d246bb27", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 33, + "serialized_chars": 36560, + "estimated_input_tokens": 9140, + "estimated_eval_tokens": 18536 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is heterogeneous: most items are unrelated Transformers bugs or model-specific regressions. None of the suggested soft pairs look like the same underlying issue, so there is no clear duplicate cluster here.", + "confidence": 0.87, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43232", + "right": "issue:45593", + "accept": false, + "reason": "Generation state sync bug vs D-FINE auxiliary-loss behavior; different models and code paths." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "Broken `transformers env` CLI is unrelated to a `sentence-transformers` import NameError." + }, + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Tokenizer version drift vs multimodal chat-template mask generation; not the same bug." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Offline cache loading failure vs tokenizer decoder type mismatch in v5; different failures." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "TF32 API guidance and a GLM indexer split-value issue are unrelated." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "`TOKENIZER_MAPPING_NAMES`/`from_pretrained` logic is different from `AutoProcessor` kwargs propagation." + }, + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "Sentence-Transformers loading hang vs Mac bus error with CLIP; distinct symptoms and causes." + }, + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Python-version compatibility regression is unrelated to pipeline-parallel output correctness." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "Processor cached-file kwargs bug is unrelated to chat-template crashes with tool-call messages." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Dequantized model save failure is not the same as pipeline-parallel generation corruption." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "Both involve tokenization, but one is version-to-version tokenization drift and the other is a batched chat-template crash." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "Position-id buffer read error vs tokenizer-class metadata mismatch; separate save/load issues." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Generation kwargs update after GPU sync is not the same as recurrent-state reset in modular Qwen3.5." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Wrong offset mapping for Qwen3 tokenizer is unrelated to loading `cjvt/sleng-bert`." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "Model-type warning vs missing `fpn_position_embeddings` attribute; related family, but not the same defect." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Generation keyword handling and category-count runtime limits are unrelated." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Saving dequantized models is unrelated to invalid `GenerationConfig` for GLM-5." + }, + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "A specific missing import is not the same as a broad collection of import errors after upgrade." + }, + { + "left": "issue:43784", + "right": "issue:45600", + "accept": false, + "reason": "Import NameError in sentence-transformers is not the same as stale `auto_mappings.py` references." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "DSA indexer missing ReLU is unrelated to GLM-5 RoPE implementation." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Tokenizer offset mapping bug is unrelated to multimodal assistant-mask generation." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "A precision/config issue in MLA is unrelated to a docstring correction." + }, + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "`num_labels` propagation in Qwen3.5 is unrelated to flash-attention-3 training degeneration." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Empty multi-label classification results in an example script is unrelated to Gemma 4 multimodal token-type defaults." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Different tokenization across versions is not the same as DeepSeek tokenizer incorrect results." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "LayoutLMv3 error messaging and a loading hang have no common underlying bug." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 control API changes are unrelated to FineGrainedFP8 accuracy issues." + }, + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "Qwen3-VL-Moe bug and tokenizer save metadata mismatch are different subsystems." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor-parallel training bug and multi-label example-script failure are unrelated." + }, + { + "left": "issue:42907", + "right": "issue:44964", + "accept": false, + "reason": "Saving dequantized Ministrals/Devstrals is unrelated to loading Phi-4 multimodal instruct." + }, + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Qwen3-VL-Moe bug and FP8 `save_pretrained` issue are different model/serialization problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 37, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:33453", + "issue:38617", + "issue:39401", + "issue:42222", + "issue:42371", + "issue:42673", + "issue:42757", + "issue:42907", + "issue:43064", + "issue:43116", + "issue:43122", + "issue:43278", + "issue:43441", + "issue:43493", + "issue:43673", + "issue:43784", + "issue:43824", + "issue:43825", + "issue:43827", + "issue:43874", + "issue:43931", + "issue:43937", + "issue:43976", + "issue:43992", + "issue:44038", + "issue:44075", + "issue:44117", + "issue:44222", + "issue:44351", + "issue:44485", + "issue:44704", + "issue:44779", + "issue:44936", + "issue:44945", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45216", + "issue:45278", + "issue:45341", + "issue:45446", + "issue:45478", + "issue:45561", + "issue:45563", + "issue:45588" + ], + "soft_pairs": [ + "issue:43824|issue:44351", + "issue:45216|issue:45478", + "issue:43992|issue:44704", + "issue:42757|issue:44351", + "issue:42907|issue:44222", + "issue:43122|issue:44977", + "issue:43278|issue:44945", + "issue:43784|issue:45446", + "issue:38617|issue:45278", + "issue:43937|issue:44485", + "issue:42222|issue:43441", + "issue:42673|issue:45072", + "issue:43064|issue:43673", + "issue:22355|issue:29127", + "issue:42371|issue:43874", + "issue:43116|issue:43493", + "issue:44075|issue:44936", + "issue:44038|issue:45478", + "issue:43976|issue:45446", + "issue:44222|issue:45478", + "issue:43116|issue:43825", + "issue:43937|issue:45563", + "issue:33453|issue:39401", + "issue:45083|issue:45446", + "issue:39401|issue:44779", + "issue:43931|issue:44945", + "issue:44117|issue:45588", + "issue:43827|issue:45446", + "issue:43937|issue:44977", + "issue:45341|issue:45561" + ], + "prepared_review_unit_hash": "4f9f7727cea5bf907e223b232598c62be9dd51ae9afbe55e718ec94a617e5b17", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 30, + "serialized_chars": 35671, + "estimated_input_tokens": 8918, + "estimated_eval_tokens": 18092 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose mix of Transformers v5 regression reports (imports, tokenization, model loading, training, and docs), but the paired items are not the same concrete bug/change. I would not merge any soft edges; treat them as separate issues.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set ('Many import errors after update from 4.57.0 to 5.5.0') and best represents the shared migration-regression theme.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the best global representative because it is the most general, high-level issue and subsumes the import-error subgroup better than the more specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they involve different missing symbols and different code paths." + }, + { + "left": "issue:45216", + "right": "issue:45478", + "accept": false, + "reason": "Both concern Qwen3.5 save/load behavior, but one is save_pretrained corruption and the other is from_pretrained failure; not the same bug." + }, + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "Unrelated areas: UMT5 weight loading vs AutoProcessor cached_file kwargs." + }, + { + "left": "issue:42757", + "right": "issue:44351", + "accept": false, + "reason": "Different missing imports from different libraries/modules; only the symptom overlaps." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both involve saving quantized models, but the model families and failure modes differ." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Both are tokenizer regressions, but for different models and likely different tokenizer code paths." + }, + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Dtype mismatch during eval is unrelated to incorrect output under pipeline parallelism." + }, + { + "left": "issue:43784", + "right": "issue:45446", + "accept": false, + "reason": "Import-time NameError in sentence-transformers is unrelated to the flex_attention version check bug." + }, + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "38617 is one specific import failure, while 45278 is a broad umbrella; not the same concrete issue." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "GLM-5 generation config validation and RoPE implementation are different bugs." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "Different models and different breakages: vitpose import breakage vs FlashAttention failure." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "VRAM leak in Qwen3ForCausalLM does not match dtype mismatch in bfloat16 inference." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Training/optimizer state corruption under FSDP2+PEFT is unrelated to missing generation cache in chunked prefill." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "A missing module import and a LayoutLMv3 error-message issue are unrelated." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "TF32 API guidance and GLM46V image-patch counting are different issues." + }, + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "Example-script classification output and SigLIP2 implementation discrepancy are unrelated." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "Unused SGD args and train-then-evaluate failure are different training bugs." + }, + { + "left": "issue:44038", + "right": "issue:45478", + "accept": false, + "reason": "Both hit Qwen MoE models, but the model/version and failure modes differ enough that this is not one bug." + }, + { + "left": "issue:43976", + "right": "issue:45446", + "accept": false, + "reason": "Python version compatibility and a PyTorch import version check are unrelated." + }, + { + "left": "issue:44222", + "right": "issue:45478", + "accept": false, + "reason": "FP8 save_pretrained issues for moe models are not the same as Qwen3.5 from_pretrained errors." + }, + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "Classification example output and pipeline translation error messaging are unrelated." + }, + { + "left": "issue:43937", + "right": "issue:45563", + "accept": false, + "reason": "GLM-5 config validation does not match a stale warning in paged generate." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Both are tokenizer regressions, but the reported behavior and models differ." + }, + { + "left": "issue:45083", + "right": "issue:45446", + "accept": false, + "reason": "Feature-extraction helper behavior in qwen3_omni_moe is unrelated to the AuxRequest import check." + }, + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer correctness regressions, but on different models and likely different root causes." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Weight-shape mismatch during model loading is unrelated to pipeline-parallel output corruption." + }, + { + "left": "issue:44117", + "right": "issue:45588", + "accept": false, + "reason": "Tokenizer mapping fallback and flash_attention AttributeError are unrelated code paths." + }, + { + "left": "issue:43827", + "right": "issue:45446", + "accept": false, + "reason": "Docs still referencing pipeline() and a flex_attention import check are not the same issue." + }, + { + "left": "issue:43937", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and different failure modes; only both are v5 regressions." + }, + { + "left": "issue:45341", + "right": "issue:45561", + "accept": false, + "reason": "A general testing_utils bug and an xdist race on captured_info.txt are not the same concrete defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 38, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:38617", + "issue:39401", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42371", + "issue:42617", + "issue:42673", + "issue:43012", + "issue:43066", + "issue:43097", + "issue:43122", + "issue:43408", + "issue:43421", + "issue:43450", + "issue:43577", + "issue:43582", + "issue:43673", + "issue:43742", + "issue:43874", + "issue:43906", + "issue:43937", + "issue:43986", + "issue:44075", + "issue:44117", + "issue:44220", + "issue:44261", + "issue:44291", + "issue:44410", + "issue:44483", + "issue:44484", + "issue:44493", + "issue:44496", + "issue:44829", + "issue:44933", + "issue:45071", + "issue:45081", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45341", + "issue:45405", + "issue:45446", + "issue:45459", + "issue:45464", + "issue:45478", + "issue:45542", + "issue:45656" + ], + "soft_pairs": [ + "issue:44291|issue:45071", + "issue:45405|issue:45446", + "issue:42673|issue:43906", + "issue:43450|issue:43986", + "issue:39401|issue:43421", + "issue:44829|issue:45216", + "issue:38617|issue:41628", + "issue:44410|issue:45310", + "issue:44117|issue:45081", + "issue:44484|issue:45310", + "issue:42371|issue:44261", + "issue:42222|issue:45542", + "issue:43408|issue:43742", + "issue:43937|issue:44496", + "issue:42673|issue:43582", + "issue:42673|issue:43577", + "issue:42175|issue:42222", + "issue:38617|issue:44220", + "issue:44933|issue:45278", + "issue:44483|issue:45464", + "issue:44075|issue:45656", + "issue:42617|issue:45341", + "issue:43012|issue:43577", + "issue:43066|issue:43122", + "issue:43673|issue:45341", + "issue:44484|issue:45478", + "issue:44410|issue:45478", + "issue:43874|issue:44261", + "issue:42222|issue:45459", + "issue:43097|issue:44493" + ], + "prepared_review_unit_hash": "1de26b1ace99fe7eeda176802cf714c5e145e674982ae79185f17a9027dd7f9b", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 30, + "serialized_chars": 36525, + "estimated_input_tokens": 9132, + "estimated_eval_tokens": 18520 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is a mixed bag of unrelated Transformers issues. The soft-similarity pairs are mostly same-subsystem, not same-bug duplicates, so I would not merge any of them.", + "confidence": 0.73, + "canonical_issue_reason": "issue:45278 is the broadest umbrella in the set: an open report about widespread import errors after upgrading to 5.5.0, which makes it the best candidate to anchor any import-regression subset.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the strongest global representative among these items because it describes a wide regression rather than a single narrow symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "Both are v5-related breakages, but one is an unexpected model-loading argument and the other is a config type-checking issue." + }, + { + "left": "issue:45405", + "right": "issue:45446", + "accept": false, + "reason": "Completely different problems: a PEFT version pin mismatch vs a PyTorch version guard for flex_attention." + }, + { + "left": "issue:42673", + "right": "issue:43906", + "accept": false, + "reason": "One is a VRAM leak in dataloader threads; the other is just a reproduction of a separate issue." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "Video processor shape handling and AutoProcessor dependency-crash behavior are different bugs." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Different tokenizer internals: offset_mapping correctness vs runtime special-token/post-processor synchronization." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "Unrelated failures: flash_attention_3 training degeneration vs save_pretrained checkpoint corruption." + }, + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they are for different symbols and different code paths." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "Different model families and different failures; no shared code-path bug is evident." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "One is a generic from_pretrained mapping assumption; the other is a Mistral regex patch crash." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "Save_pretrained shard sizing and Qwen3.5 MoE from_pretrained loading are unrelated." + }, + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 API guidance and an rms_norm_eps precision mismatch are not the same bug." + }, + { + "left": "issue:42222", + "right": "issue:45542", + "accept": false, + "reason": "A broken vitpose model implementation is unrelated to a TensorFlow backend installation issue." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "Model-type mismatch warning vs key error while loading a different model family." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "Both are loading/config errors, but one is invalid GenerationConfig and the other is missing model_type." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "VRAM leak in multi-threading and Apple Silicon allocator TypeError are different failures." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "Different symptom and code path: memory leak vs dtype propagation bug in BLIP2 loading." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Package backend dependency issue and vitpose model breakage are unrelated." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "ImportError for a missing symbol and an audio feature extraction bug are unrelated." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "45278 is a broad import-regression umbrella, but 44933 is one specific missing import; not enough evidence they are the same exact bug." + }, + { + "left": "issue:44483", + "right": "issue:45464", + "accept": false, + "reason": "Both touch chat/completions, but one is API request acceptance and the other is streaming inference on a specific model." + }, + { + "left": "issue:44075", + "right": "issue:45656", + "accept": false, + "reason": "Different optimizer issues: ignored SGD args vs double optimizer.step under deepspeed." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "A runtime launcher failure and a minor testing_utils bug do not match." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "Both mention precision/dtype, but one is a compile-time warning and the other is incorrect model dtype loading." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but one is decoder-type metadata and the other is changed tokenization output." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "Generation cache behavior and a testing_utils bug are unrelated." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "save_pretrained shard-size behavior is unrelated to a Qwen3.5 MoE loading error." + }, + { + "left": "issue:44410", + "right": "issue:45478", + "accept": false, + "reason": "Different model architectures and different missing-component failures." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "A missing image-patch helper and a layernorm epsilon precision issue are unrelated." + }, + { + "left": "issue:42222", + "right": "issue:45459", + "accept": false, + "reason": "Vitpose model breakage and protobuf-hidden tokenizer errors are different subsystems and bugs." + }, + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Config parameter removal and unexpected checkpoint keys are not the same underlying regression." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 39, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42757", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43097", + "issue:43335", + "issue:43352", + "issue:43408", + "issue:43441", + "issue:43452", + "issue:43502", + "issue:43531", + "issue:43824", + "issue:43825", + "issue:43873", + "issue:43874", + "issue:43906", + "issue:43931", + "issue:43976", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44062", + "issue:44117", + "issue:44261", + "issue:44263", + "issue:44315", + "issue:44351", + "issue:44368", + "issue:44380", + "issue:44410", + "issue:44484", + "issue:44485", + "issue:44655", + "issue:44704", + "issue:44829", + "issue:44987", + "issue:45003", + "issue:45071", + "issue:45103", + "issue:45245", + "issue:45341", + "issue:45372", + "issue:45375", + "issue:45542", + "issue:45588" + ], + "soft_pairs": [ + "issue:43441|issue:44829", + "issue:44263|issue:44485", + "issue:43012|issue:43408", + "issue:41628|issue:45071", + "issue:45071|issue:45542", + "issue:43931|issue:44410", + "issue:43986|issue:44484", + "issue:45003|issue:45341", + "issue:43986|issue:45341", + "issue:43931|issue:44038", + "issue:44117|issue:44987", + "issue:43097|issue:43452", + "issue:43335|issue:45071", + "issue:43408|issue:44368", + "issue:43874|issue:44263", + "issue:43976|issue:45071", + "issue:44829|issue:45245", + "issue:43986|issue:45372", + "issue:43531|issue:45341", + "issue:38617|issue:42757", + "issue:43064|issue:44315", + "issue:44261|issue:44485", + "issue:44410|issue:45375", + "issue:44038|issue:44410", + "issue:38617|issue:44351", + "issue:44062|issue:44117", + "issue:44704|issue:45103", + "issue:43097|issue:45588", + "issue:43986|issue:44117", + "issue:43502|issue:43992", + "issue:43824|issue:43986", + "issue:43097|issue:43873", + "issue:43066|issue:43906", + "issue:43352|issue:44380", + "issue:43825|issue:44655", + "issue:43992|issue:44484" + ], + "prepared_review_unit_hash": "b923bfd846f8c020aab9409ca5dfe325d0f97601557164f0edfeb5f392a1868c", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38491, + "estimated_input_tokens": 9623, + "estimated_eval_tokens": 19502 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items form a loose semantic cluster of unrelated Transformers regressions and import errors. None of the soft-edge pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.87, + "canonical_issue_reason": "No single issue is a safe canonical: the cluster spans unrelated import errors, model-loading regressions, training bugs, and warning/behavior changes.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is sufficiently central or representative to serve as a true cluster anchor; the topics are too heterogeneous.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43441", + "right": "issue:44829", + "accept": false, + "reason": "Both involve FlashAttention, but one is a support error for Ministral-3 and the other is degenerate training with FA3; different failures." + }, + { + "left": "issue:44263", + "right": "issue:44485", + "accept": false, + "reason": "GLM MoE indexer split behavior and GLM-5 RoPE implementation are unrelated code paths." + }, + { + "left": "issue:43012", + "right": "issue:43408", + "accept": false, + "reason": "A bfloat16 precision warning is not the same bug as the sam3_video/sam3_tracker model-type warning." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "AutoImageProcessor import failure and PretrainedConfig type-checking regression are different APIs and root causes." + }, + { + "left": "issue:45071", + "right": "issue:45542", + "accept": false, + "reason": "PretrainedConfig type checking and tensorboard-only tf backend detection are unrelated." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Qwen3-VL weight-shape mismatch and qwen3next missing projections are distinct model bugs." + }, + { + "left": "issue:43986", + "right": "issue:44484", + "accept": false, + "reason": "AutoProcessor crash without torchvision and max_shard_size behavior in save_pretrained are unrelated." + }, + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Unsafe sys.modules access and a testing_utils bug are different problems." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "AutoProcessor loading without torchvision is unrelated to testing_utils behavior." + }, + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "These are different Qwen3-VL / Qwen3-VL-Moe loading issues with different symptoms." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "TOKENIZER_MAPPING_NAMES returning None is a tokenizer registry bug, not the physical-intelligence/fast loading failure." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "A removed config option is not the same as gguf_file breaking tokenizer/model loading." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "SwitchTransformers sparse-layer construction and PretrainedConfig type checking are unrelated." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "Both are warning-style issues, but they target different configs and different warning conditions." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "Missing get_number_of_image_patches and torch.split return-values are different GLM bugs." + }, + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "Python version compatibility and PretrainedConfig type checking are unrelated." + }, + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "FlashAttention-3 training degeneration and category-cardinality limits are unrelated." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "AutoProcessor without torchvision and a missing ReasoningEffort import in mistral_common are different dependency issues." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and a testing_utils bug do not share the same code path." + }, + { + "left": "issue:38617", + "right": "issue:42757", + "accept": false, + "reason": "These are two separate import errors for different missing symbols." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "FSDP2/PEFT optimizer-state corruption and Liger-kernel application with model_init are different training setup bugs." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "An MLA rms_norm_eps precision issue is not the same as the GLM-5 RoPE implementation bug." + }, + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Missing qwen3next projections and a missing deepstack_visual_indexes field are different config/model issues." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "These involve different Qwen3-family models and different missing components." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "layer_type_validation and HybridCache are different missing imports, not one underlying defect." + }, + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "AddedToken special-argument duplication and TOKENIZER_MAPPING_NAMES returning None are unrelated tokenizer bugs." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "Kwargs propagation in AutoProcessor and the auto_docstring annotations crash are unrelated." + }, + { + "left": "issue:43097", + "right": "issue:45588", + "accept": false, + "reason": "Removed tie_embeddings support and a flash_attention.py s_aux=None crash are different failures." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "AutoProcessor without torchvision and tokenizer mapping returning None are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "local_files_only still making API requests and missing embed_tokens.weight are different loading bugs." + }, + { + "left": "issue:43824", + "right": "issue:43986", + "accept": false, + "reason": "A missing Qwen2_5_VL import and a torchvision-related AutoProcessor crash are not the same issue." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "A removed config API and quantization/offloading behavior are unrelated." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same bug." + }, + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "Nemotron FlashAttention 2 support and GPT2 attention-scaling being ignored are different backend bugs." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "A pipeline translation error message and saving Pipeline objects are unrelated." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "Missing encoder embed weights and shard-size defaults are different save/load behaviors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 40, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:36010", + "issue:36683", + "issue:38617", + "issue:42757", + "issue:42886", + "issue:42907", + "issue:42994", + "issue:43012", + "issue:43064", + "issue:43066", + "issue:43097", + "issue:43208", + "issue:43299", + "issue:43352", + "issue:43381", + "issue:43475", + "issue:43502", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43582", + "issue:43618", + "issue:43756", + "issue:43761", + "issue:43824", + "issue:43867", + "issue:43901", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44077", + "issue:44220", + "issue:44230", + "issue:44263", + "issue:44368", + "issue:44380", + "issue:44483", + "issue:44485", + "issue:44492", + "issue:44514", + "issue:44655", + "issue:44683", + "issue:44938", + "issue:45216", + "issue:45290", + "issue:45381", + "issue:45440", + "issue:45479" + ], + "soft_pairs": [ + "issue:43097|issue:43901", + "issue:43012|issue:44368", + "issue:43012|issue:43550", + "issue:42886|issue:43381", + "issue:42907|issue:44655", + "issue:44380|issue:45479", + "issue:43208|issue:43550", + "issue:43352|issue:43976", + "issue:43064|issue:44380", + "issue:43618|issue:44380", + "issue:43208|issue:43526", + "issue:44483|issue:44514", + "issue:36683|issue:43582", + "issue:43618|issue:43761", + "issue:43012|issue:44683", + "issue:42757|issue:43824", + "issue:44038|issue:44230", + "issue:44483|issue:45290", + "issue:43299|issue:43867", + "issue:44483|issue:45381", + "issue:38617|issue:43502", + "issue:43756|issue:44485", + "issue:43901|issue:44655", + "issue:36683|issue:43475", + "issue:42994|issue:44655", + "issue:43867|issue:45216", + "issue:44220|issue:44655", + "issue:43352|issue:43937", + "issue:44263|issue:45440", + "issue:44077|issue:44380", + "issue:43066|issue:45381", + "issue:36683|issue:44077", + "issue:43976|issue:44938", + "issue:36010|issue:42757", + "issue:36683|issue:43525", + "issue:43064|issue:44492" + ], + "prepared_review_unit_hash": "40ba120921392bf9dc8fcea88b29e82d1f5004b85ce136a3e8c5caa90d5e1de8", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 36, + "serialized_chars": 38175, + "estimated_input_tokens": 9544, + "estimated_eval_tokens": 19344 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous set of Transformers issues with no broad duplicate cluster. The only clear duplicate-like pair is the CLIP vision forward/output regression around missing returned fields.", + "confidence": 0.88, + "canonical_issue_reason": "Issue:43761 is the best anchor for the only true duplicate-like subcluster: it is a concrete CLIP v5 regression with a clear repro and broader missing-output scope.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue:43761 is the most suitable representative of the accepted CLIP regression subcluster; the rest of the items are largely unrelated and should not be merged into one canonical issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43097", + "right": "issue:43901", + "accept": false, + "reason": "Unrelated: removed model config API vs docs mismatch for pipeline scores." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "Both mention warnings, but they are different warnings on different code paths." + }, + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "Precision warning during compile is not the same as an SDPA compile failure." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Cache/offline loading bug and eval-mode gradient checkpointing are unrelated." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Both involve saving, but one is quantized/dequantized model saving and the other is Pipeline save_pretrained." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "Different bugs: attention scaling backend regression vs sequence-classification zero-loss issue." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "xLSTM training bugs and Bamba torch.compile/SDPA failure are unrelated." + }, + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "FlashAttention support issue and Python-version compatibility bug are not the same defect." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "Different failures: FSDP2/PEFT optimizer state corruption vs GPT2 attention scaling being ignored." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "CLIP output regression and GPT2 attention scaling are unrelated code paths." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "xLSTM training bugs and BEiT fast processor label reduction are unrelated." + }, + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "Chat-completions request rejection and apply_chat_template crash are different layers of the stack." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "Gemma3Config missing vocab_size and Apple Silicon allocator TypeError are unrelated." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": true, + "reason": "Both are CLIP v5 regressions in forward output assembly where requested outputs are missing from returned objects." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "Different compile-related problems: a warning versus a torch>=2.9 flex_attention failure." + }, + { + "left": "issue:42757", + "right": "issue:43824", + "accept": false, + "reason": "Missing import from huggingface_hub and missing import from transformers are separate export issues." + }, + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "Qwen3-VL-Moe loading bug and fp8 support request are not the same bug." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Server request validation failure and chat-template tool-call crash are different issues." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Qwen3VL MoE loading regression and sorted-state_dict load error are unrelated." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "Chat-completions API rejection and Qwen2.5-VL vision_position_ids bug are unrelated." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "ImportError for layer_type_validation and unexpected network calls in local_files_only are unrelated." + }, + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Different model-specific RoPE/implementation complaints; not the same underlying bug." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Docs mentioning return_all_scores and inability to save Pipelines are unrelated." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Gemma3Config vocab_size and SAM3 vision encoder output attribute error are unrelated." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Both are save failures, but they affect different objects and code paths." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "State_dict loading error and saved-checkpoint regression are separate serialization issues." + }, + { + "left": "issue:44220", + "right": "issue:44655", + "accept": false, + "reason": "Feature extraction bug and Pipeline save_pretrained failure are unrelated." + }, + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "FlashAttention support and invalid GenerationConfig are unrelated." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "Different MoE model bugs in different implementations; not one shared defect." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "PatchTSMixer post_init validation and GPT2 attention scaling are unrelated." + }, + { + "left": "issue:43066", + "right": "issue:45381", + "accept": false, + "reason": "Wrong tokenizer decoder type and Qwen2.5-VL video position IDs are unrelated." + }, + { + "left": "issue:36010", + "right": "issue:42757", + "accept": false, + "reason": "Two separate import errors from different modules/packages." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Gemma3Config missing vocab_size and Llama4Config missing pad_token_id are different config regressions." + }, + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "FSDP2 optimizer-state corruption and cache strategy typo are unrelated." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays mostly conservative. The one accepted soft edge, issue:43618 \u2194 issue:43761, is reasonably supported by the titles as related CLIP v5 output-field regressions. The rest of the soft-pair rejections are cautious and do not overclaim duplicate status. No mergeability concerns apply here because these are issue pairs, not PR pairs." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 41, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:28282", + "issue:33453", + "issue:36296", + "issue:36683", + "issue:41669", + "issue:42890", + "issue:43010", + "issue:43064", + "issue:43352", + "issue:43502", + "issue:43575", + "issue:43618", + "issue:43723", + "issue:43867", + "issue:43931", + "issue:43976", + "issue:43992", + "issue:44062", + "issue:44075", + "issue:44077", + "issue:44112", + "issue:44188", + "issue:44242", + "issue:44246", + "issue:44355", + "issue:44360", + "issue:44380", + "issue:44484", + "issue:44556", + "issue:44855", + "issue:44857", + "issue:44938", + "issue:44964", + "issue:45071", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45341", + "issue:45357", + "issue:45375", + "issue:45381", + "issue:45440", + "issue:45478", + "issue:45561", + "issue:45588", + "issue:45593", + "issue:45600" + ], + "soft_pairs": [ + "issue:44188|issue:44380", + "issue:43352|issue:45593", + "issue:43618|issue:45216", + "issue:36683|issue:44242", + "issue:43010|issue:45278", + "issue:43352|issue:43931", + "issue:43992|issue:45310", + "issue:41669|issue:44246", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:44355|issue:44855", + "issue:42890|issue:45561", + "issue:44938|issue:45071", + "issue:43867|issue:45357", + "issue:43992|issue:45478", + "issue:43010|issue:43976", + "issue:44360|issue:45440", + "issue:44938|issue:45381", + "issue:43618|issue:44077", + "issue:43867|issue:44484", + "issue:36296|issue:43575", + "issue:43618|issue:44857", + "issue:43618|issue:45588", + "issue:28282|issue:33453", + "issue:42890|issue:44964", + "issue:42890|issue:44112", + "issue:42890|issue:45600", + "issue:43502|issue:44062", + "issue:43723|issue:44556", + "issue:42890|issue:45341", + "issue:43010|issue:44075" + ], + "prepared_review_unit_hash": "9884cba6560567158fd1c2fa85fc845193aa00a956eb01f3003f2085e1d87061", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 31, + "serialized_chars": 36063, + "estimated_input_tokens": 9016, + "estimated_eval_tokens": 18288 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is still mostly heterogeneous import/load-regression noise, with 45278 remaining the best broad anchor. I\u2019m downgrading the 41669\u219444246 soft edge: the packet supports shared import-performance concern, but not that they are the same underlying bug.", + "confidence": 0.72, + "canonical_issue_reason": "Issue 45278 is the best open cluster anchor because it is broad, current, and covers the recurring import/load regression theme without being as narrow as the other reports.", + "canonical_pr_reason": null, + "best_issue_reason": "45278 is the strongest global triage point: it is open, broad, and can absorb nearby upgrade/import failures better than the narrower model-specific bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Different attention bugs: torch.compile branching divergence vs GPT2 scaling config being ignored under SDPA/FlashAttention." + }, + { + "left": "issue:43352", + "right": "issue:45593", + "accept": false, + "reason": "Unrelated problems: FlashAttention2 support gating vs D-FINE auxiliary-loss behavior." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "Different models and failures: CLIPOutput attentions assignment vs Qwen3.5 save_pretrained regression." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "Gemma3Config vocab_size missing is unrelated to MoE load-balancing loss not being added." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Cache update no_grad semantics are unrelated to broad import/load errors after upgrade." + }, + { + "left": "issue:43352", + "right": "issue:43931", + "accept": false, + "reason": "FlashAttention2 support vs Qwen3-VL weight-shape mismatch are different loading failures." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "Both are from_pretrained failures, but they affect different models and different failure modes." + }, + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "Both touch import performance, but the packet does not establish that 44246 is the same import-* regression as 41669." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "State-dict loading error vs missing config field dropped by strict are different root causes." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "Gemma config attribute bug is unrelated to the FSDP2+PEFT optimizer state problem." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "Compiled Python-file errors are not the same as the Python 3.13 torch.jit parse/IndentationError issue." + }, + { + "left": "issue:42890", + "right": "issue:45561", + "accept": false, + "reason": "Seed-related flaky test and xdist file-race are different CI/test failures." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Python 3.14 import failure and PretrainedConfig type-checking break are distinct compatibility issues." + }, + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "Model load error and save_pretrained key regression are opposite directions of the serialization path, not the same bug." + }, + { + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "Same broad from_pretrained area, but different model family and different missing-key/error behavior." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "Cache update no_grad semantics do not match a Python-version support failure." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "DSA indexer activation bug is unrelated to DeepSeekV3 MoE implementation divergence." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Python import failure in 3.14 and qwen2.5-vl position-id bug are unrelated." + }, + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "CLIPOutput attentions assignment is unrelated to patchtsmixer post_init being disallowed." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "Checkpoint loading error and a save_pretrained shard-size question are unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Tensor-parallel training bug and tp load OOM for a specific large model are not the same underlying issue." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "Attentions field regression is unrelated to the float16 AMP crash in LwDetrImageLoss." + }, + { + "left": "issue:43618", + "right": "issue:45588", + "accept": false, + "reason": "CLIPOutput attentions assignment and sink-less flash_attention crash are different code paths." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "PyTorch missing ImportError and tokenizer-loading regression are different loading problems." + }, + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Flaky integration test and Phi-4 multimodal load failure are unrelated." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Seed instability and GraniteSpeech stale device override are different CI issues." + }, + { + "left": "issue:42890", + "right": "issue:45600", + "accept": false, + "reason": "Seed-related failure is unrelated to auto_mappings referencing removed configs." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "Local-files-only network leak and AddedToken keyword conflict are different bugs." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Both involve loading, but one is tokenizer loading and the other is checkpoint reloading after a version upgrade; they are not the same bug." + }, + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "no_grad on cache updates is unrelated to SGD argument handling." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The cluster-level framing is mostly grounded, but one soft-edge acceptance is too strong for the evidence available: 41669 vs 44246 may both concern import performance, yet the reason asserts a specific causal link (\u201c44246 matching the import slow-down caused by the import-* issue\u201d) that is not established by the packet. To keep the verdicts conservative, this should be downgraded unless the issue bodies explicitly tie them together. The rest of the non-merge decisions look reasonably cautious." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 42, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:30333", + "issue:30990", + "issue:31795", + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38617", + "issue:41628", + "issue:42548", + "issue:42757", + "issue:42831", + "issue:43010", + "issue:43012", + "issue:43065", + "issue:43232", + "issue:43299", + "issue:43502", + "issue:43576", + "issue:43673", + "issue:43824", + "issue:43827", + "issue:43874", + "issue:43901", + "issue:44222", + "issue:44230", + "issue:44261", + "issue:44273", + "issue:44315", + "issue:44351", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44387", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44655", + "issue:44749", + "issue:44908", + "issue:44961", + "issue:45003", + "issue:45071", + "issue:45125", + "issue:45375", + "issue:45468", + "issue:45478", + "issue:45542" + ], + "soft_pairs": [ + "issue:44222|issue:45125", + "issue:43010|issue:43065", + "issue:44387|issue:44556", + "issue:42548|issue:44351", + "issue:45125|issue:45478", + "issue:43010|issue:43232", + "issue:45125|issue:45375", + "issue:44355|issue:44749", + "issue:43502|issue:44485", + "issue:30990|issue:31795", + "issue:30333|issue:33453", + "issue:44315|issue:44360", + "issue:43010|issue:44368", + "issue:43502|issue:44222", + "issue:43299|issue:44556", + "issue:43502|issue:44961", + "issue:44355|issue:44655", + "issue:42831|issue:43874", + "issue:30333|issue:36296", + "issue:44222|issue:44230", + "issue:44355|issue:45071", + "issue:33453|issue:34689", + "issue:42757|issue:43576", + "issue:43827|issue:44961", + "issue:33453|issue:36296", + "issue:43576|issue:45071", + "issue:43576|issue:43824", + "issue:43673|issue:44492", + "issue:38617|issue:43576", + "issue:44961|issue:45468", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:41628|issue:42548", + "issue:44485|issue:44961", + "issue:36296|issue:43901", + "issue:36296|issue:43012", + "issue:41628|issue:44355", + "issue:44261|issue:44360", + "issue:44273|issue:45003", + "issue:44273|issue:45542" + ], + "prepared_review_unit_hash": "8dd138fa3b7ad89bd5ff17bf8e5c3d6a65f9dda1c9b60b02f494186f95fe699f", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 47, + "item_count": 47, + "soft_pair_count": 41, + "serialized_chars": 38809, + "estimated_input_tokens": 9703, + "estimated_eval_tokens": 19662 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft links mostly connect issues that share broad keywords like loading, imports, v5 regressions, cache/generation, or quantization, but they do not look like the same underlying bug or change.", + "confidence": 0.93, + "canonical_issue_reason": "No single true duplicate hub stands out. If forced to anchor the cluster, issue:43576 is the broadest v5 regression report with some inbound references, but the items are mostly unrelated.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43576 is the most usable cluster anchor because it is a concise, version-specific regression report with some external references; however, it is not a strong canonical duplicate representative.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "Both involve FP8/MoE, but one is about save_pretrained and the other about missing tensor-parallel plan metadata; different bugs." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "Cache update no_grad behavior and a dummy Conv2d in Sam3PixelDecoder are unrelated code-paths." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "Int4 memory/OOM behavior and checkpoint reload breakage after v5 upgrade are different failure modes." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different symbols and likely different missing exports." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Tensor-parallel plan metadata and a generic from_pretrained error are not the same defect." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Decorating cache update with no_grad and post-sync_gpus generation kwargs handling are different generation bugs." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Missing _tp_plan for tensor parallelism and a missing config field in a vision config are separate issues." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Compiled Python file errors and a post-upgrade slowdown are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "local_files_only network leakage and GLM-5 RoPE implementation are unrelated." + }, + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Stuck loading and documentation confusion are not the same bug." + }, + { + "left": "issue:30333", + "right": "issue:33453", + "accept": false, + "reason": "MLflow reporting on failed training jobs and tokenizer loading regression are unrelated." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Liger Kernel application and DSA indexer activation logic are different subsystems and defects." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "Cache no_grad decoration and a tie_word_embeddings warning are unrelated." + }, + { + "left": "issue:43502", + "right": "issue:44222", + "accept": false, + "reason": "Network calls despite local_files_only and FP8 save_pretrained are different bugs." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Qwen3VL MoE loading failure and generic checkpoint reload regression after upgrade are not clearly the same code-path." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "local_files_only behavior and an unrelated 'racoon' issue do not match." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Compiled file execution errors and pipeline save_pretrained failure are different defects." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "FineGrainedFP8 accuracy and GLM46V image processor attribute errors are unrelated." + }, + { + "left": "issue:30333", + "right": "issue:36296", + "accept": false, + "reason": "MLflow reporting on failed jobs and tensor parallel training bug are different issues." + }, + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "Both mention FP8/MoE, but one is save_pretrained bug and the other is inference support; not mergeable as one fix." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "Compiled file errors and PretrainedConfig type checking breakage are unrelated." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Tokenizer loading regression and Llama 3.2 vision model loading failure are different loading paths." + }, + { + "left": "issue:42757", + "right": "issue:43576", + "accept": false, + "reason": "Missing is_offline_mode import and broken transformers env command are different regressions." + }, + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Pipeline-doc references and an unrelated 'racoon' issue do not match." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading and tensor parallel training are unrelated." + }, + { + "left": "issue:43576", + "right": "issue:45071", + "accept": false, + "reason": "Broken env command and PretrainedConfig type checking are different failures." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "Broken env command and Qwen2_5_VL import error are unrelated." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "Generation cache missing during chunked_prefill and a cache-strategy typo are different scopes." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "ImportError from configuration_utils and broken env command are not the same bug." + }, + { + "left": "issue:44961", + "right": "issue:45468", + "accept": false, + "reason": "A placeholder 'racoon' issue and Gemma-4 audio positional encoding are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Docs confusion around forward args and an import error for layer_type_validation are unrelated." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "Broken env command and missing HybridCache import are different regressions." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Scheduler kwargs handling and an unrelated 'racoon' issue do not match." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "AutoImageProcessor and PreTrainedModel import failures are separate missing-export issues." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "GLM-5 RoPE implementation and an unrelated 'racoon' issue are not the same." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "Tensor parallel training and TextClassificationPipeline docs are unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43012", + "accept": false, + "reason": "Tensor parallel training and bfloat16 compile warning are different bugs." + }, + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "AutoImageProcessor import failure and compiled Python file errors are unrelated." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "Missing rms_norm_eps in MLA q_a_layernorm and DSA indexer ReLU absence are different precision/logic issues." + }, + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Lazy loading problems and unsafe sys.modules access are related only superficially; not the same concrete bug." + }, + { + "left": "issue:44273", + "right": "issue:45542", + "accept": false, + "reason": "Lazy loading and undefined tf backend error from tensorboard-only installs are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 43, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:31795", + "issue:33453", + "issue:34689", + "issue:37428", + "issue:41669", + "issue:42371", + "issue:42890", + "issue:43352", + "issue:43408", + "issue:43502", + "issue:43519", + "issue:43576", + "issue:43606", + "issue:43673", + "issue:43704", + "issue:43723", + "issue:43827", + "issue:43976", + "issue:44038", + "issue:44077", + "issue:44246", + "issue:44261", + "issue:44263", + "issue:44273", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44351", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44655", + "issue:44704", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:45071", + "issue:45092", + "issue:45125", + "issue:45230", + "issue:45310", + "issue:45341", + "issue:45468", + "issue:45478" + ], + "soft_pairs": [ + "issue:44623|issue:45230", + "issue:43576|issue:44556", + "issue:41669|issue:43673", + "issue:44038|issue:45125", + "issue:44393|issue:45071", + "issue:44556|issue:45092", + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44704|issue:44908", + "issue:44246|issue:44273", + "issue:43408|issue:44315", + "issue:43976|issue:44393", + "issue:43352|issue:44393", + "issue:43576|issue:44908", + "issue:44908|issue:45071", + "issue:44297|issue:45230", + "issue:44295|issue:45230", + "issue:41669|issue:44351", + "issue:41669|issue:45341", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:44393|issue:45478", + "issue:44315|issue:44829", + "issue:43827|issue:44908", + "issue:31795|issue:33453", + "issue:44077|issue:44861", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:43576|issue:44273", + "issue:44655|issue:44861", + "issue:44360|issue:45468", + "issue:44273|issue:44908", + "issue:45230|issue:45310", + "issue:43519|issue:44485", + "issue:41669|issue:43704", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31795|issue:37428", + "issue:44315|issue:45092", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704", + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:43704|issue:45341" + ], + "prepared_review_unit_hash": "47a84ab8025d0858b2c554575c02e24bef94bc3152f49262c167b730ac7a415d", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 48, + "item_count": 48, + "soft_pair_count": 49, + "serialized_chars": 41785, + "estimated_input_tokens": 10447, + "estimated_eval_tokens": 21150 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is dominated by title-level similarity around Transformers v5/loading/tokenizer/model issues, but the pairs do not look like the same underlying bug or change. I rejected all soft edges and used one concrete v5 loading regression as the best representative issue.", + "confidence": 0.71, + "canonical_issue_reason": "issue:44556 is the most concrete and representative loading-regression report in the set: it has a specific upgrade path (v4.57 to v5.2/v5.3) and a clear failure mode, unlike the many generic or loosely related items.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44556 is the strongest anchor for duplicate triage because it is specific, reproducible, and sits in the common compatibility-breakage theme seen across several nearby titles.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "Different bugs: processor save_pretrained missing files vs a generic bug report with no concrete matching failure mode." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "Both mention v5 breakage, but one is an env command issue and the other is checkpoint reload compatibility; different codepaths." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "Import-star performance/regression is unrelated to GenerationMixin cache behavior during chunked_prefill." + }, + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Same model family, but one is a general Qwen3-VL-Moe bug and the other is missing _tp_plan for tensor parallelism; not the same issue." + }, + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Qwen3-VL bounding-box output error is unrelated to PretrainedConfig type-checking breakage." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both are v5 model-loading incompatibilities, but one is a checkpoint reload regression and the other is remote-code/meta-init incompatibility for a specific model." + }, + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 API changes and a cache-strategy typo are unrelated." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Missing set_seed in tests is unrelated to inverse_sqrt scheduler kwargs handling." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "AutoProcessor kwargs forwarding and scheduler kwargs propagation are different bugs in different subsystems." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": false, + "reason": "Import slowness and lazy-loading failure are related only at a broad theme level; not the same concrete bug." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "Model-type warning vs Liger Kernel not applied with model_init; different mechanisms and symptoms." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility issue is unrelated to Qwen3-VL output correctness." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash Attention 2 support gap is unrelated to Qwen3-VL hallucination/error output." + }, + { + "left": "issue:43576", + "right": "issue:44908", + "accept": false, + "reason": "v5 env command breakage and scheduler kwargs bug are unrelated." + }, + { + "left": "issue:44908", + "right": "issue:45071", + "accept": false, + "reason": "Scheduler kwargs handling and PretrainedConfig type-checking are separate issues." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "Tokenizer_class mismatch in save_pretrained is not the same as a generic bug report." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "position_ids buffer read error is a distinct model-tensor issue, not the generic bug report." + }, + { + "left": "issue:41669", + "right": "issue:44351", + "accept": false, + "reason": "Import-star cleanup/performance is unrelated to missing HybridCache import." + }, + { + "left": "issue:41669", + "right": "issue:45341", + "accept": false, + "reason": "Import-time cleanup issue is unrelated to a small testing_utils bug." + }, + { + "left": "issue:44492", + "right": "issue:44945", + "accept": false, + "reason": "Cache-strategy typo and incorrect pipeline-parallel output are different failures." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import-star performance issue does not match a cache-strategy typo." + }, + { + "left": "issue:44393", + "right": "issue:45478", + "accept": false, + "reason": "Different Qwen3.5 Moe bug: bounding-box hallucination vs from_pretrained error." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Liger Kernel application with model_init is unrelated to flash_attention_3 causing degenerate training." + }, + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Docs still referencing pipeline() is unrelated to scheduler kwargs handling." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Documentation confusion and tokenizer-loading regression are not the same bug." + }, + { + "left": "issue:44077", + "right": "issue:44861", + "accept": false, + "reason": "Optional post_init allowance and _get_tied_weight_keys AttributeError are separate model-internal issues." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "Import performance regression is unrelated to scheduler behavior." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3-VL bounding-box output errors are unrelated." + }, + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Broken env command and lazy-loading malfunction are different problems, even if both touch v5-era internals." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Pipeline save_pretrained support and tied-weight key handling are unrelated." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "DSA indexer activation issue and Gemma-4 audio positional encoding are unrelated." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading failure does not match inverse_sqrt scheduler kwargs behavior." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "Generic bug report is not a concrete duplicate of the Qwen3.5 Moe from_pretrained error." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different model-specific math bugs." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "Import-star slowdown and VRAM leak in multi-threaded dataloader use are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Timestamp calculation in a processor and scheduler kwargs handling are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Documentation confusion about forward args is unrelated to Llama 3.2 model-loading regression." + }, + { + "left": "issue:31795", + "right": "issue:37428", + "accept": false, + "reason": "Forward-doc confusion is unrelated to a flash-attention import error." + }, + { + "left": "issue:44315", + "right": "issue:45092", + "accept": false, + "reason": "Liger Kernel/model_init and remote-code meta-initialization incompatibility are not the same bug." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak in dataloader threads and GLM-5 RoPE implementation are unrelated." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "Different model internals: RoPE implementation vs audio relative positional encoding." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 API guidance is unrelated to a VRAM leak." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "local_files_only network requests and Qwen3VL timestamp math are different failure modes." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Timestamp calculation bug and processor save_pretrained missing files are unrelated." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Timestamp calculation in a processor is unrelated to a multi-threaded VRAM leak." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return semantics in GlmMoeDsaIndexer are unrelated to Gemma-4 audio positional encoding." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "Layernorm epsilon precision discussion is unrelated to audio relative positional encoding." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch and Gemma-4 audio positional encoding are unrelated." + }, + { + "left": "issue:43704", + "right": "issue:45341", + "accept": false, + "reason": "VRAM leak in multi-threaded loading does not match a testing_utils bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 14, + "review_unit_index": 44, + "review_unit_count": 44, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-381", + "nodes": [ + "issue:30333", + "issue:31515", + "issue:31795", + "issue:33453", + "issue:36296", + "issue:38617", + "issue:42913", + "issue:43066", + "issue:43298", + "issue:43525", + "issue:43572", + "issue:43618", + "issue:44230", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44861", + "issue:44908", + "issue:44998", + "issue:45290", + "issue:45310", + "issue:45341" + ], + "soft_pairs": [ + "issue:42913|issue:43066", + "issue:44908|issue:44998", + "issue:44998|issue:45341", + "issue:44861|issue:45290", + "issue:44492|issue:44998", + "issue:43618|issue:44998", + "issue:44556|issue:44998", + "issue:43525|issue:44861", + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:30333|issue:31515", + "issue:31515|issue:31795", + "issue:44230|issue:45310", + "issue:31515|issue:33453", + "issue:43298|issue:44230" + ], + "prepared_review_unit_hash": "370d30b279fbc797fdf89d3a7c11882f1eec603a436ee61e061469792d51ad1f", + "component_budget": { + "node_count": 381, + "item_count": 381, + "soft_pair_count": 1427, + "serialized_chars": 735824, + "estimated_input_tokens": 183956, + "estimated_eval_tokens": 368168 + }, + "budget": { + "node_count": 22, + "item_count": 22, + "soft_pair_count": 17, + "serialized_chars": 17392, + "estimated_input_tokens": 4348, + "estimated_eval_tokens": 8952 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. There is a loose tokenizer-v5 theme, but none of the soft pairs look like the same underlying bug/change strongly enough to merge. No PRs are present.", + "confidence": 0.34, + "canonical_issue_reason": "If a representative is needed, issue 42913 is the broadest tokenizer-v5 regression report; however, the cluster does not look like one true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "42913 is the best cluster representative because it is the most general and likely to subsume nearby tokenizer-v5 complaints, even though it is not a confirmed duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both concern tokenizer behavior in v5, but one is a broad v4\u2192v5 behavior regression and the other is a specific decoder-type mismatch; not clearly the same bug." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "Scheduler kwargs handling is unrelated to an unrelated/non-technical issue." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "No shared code path or symptom; these are unrelated issues." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "Different subsystems and different failure modes: tied-weights attribute handling vs chat template crash." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "A typo in cache strategies is unrelated to the other issue." + }, + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "CLIP output attentions regression is unrelated." + }, + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Checkpoint reload compatibility after v5 upgrade is unrelated to the other issue." + }, + { + "left": "issue:43525", + "right": "issue:44861", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config and tied-weight key handling are different bugs." + }, + { + "left": "issue:43572", + "right": "issue:44861", + "accept": false, + "reason": "Missing pad_token_idx on StableLmConfig is a separate config regression." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "Slow checkpoint loading and an ImportError from configuration_utils are different code paths." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "FP8/Qwen3-VL support and missing processor save files are not the same bug." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Checkpoint loading speed and tensor-parallel training are unrelated." + }, + { + "left": "issue:30333", + "right": "issue:31515", + "accept": false, + "reason": "MLFlow job-status reporting is unrelated to checkpoint loading performance." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Loading performance and documentation confusion are not the same underlying issue." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen3.5 MoE, but one is FP8 support and the other is a from_pretrained error; not clearly one concrete bug." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Checkpoint loading slowdown and tokenizer-loading regression are separate issues." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Continuous batching audio-model support and FP8 inference support are different feature gaps." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-26350-2", + "nodes": [ + "issue:26350", + "issue:33260" + ], + "soft_pairs": [ + "issue:26350|issue:33260" + ], + "prepared_review_unit_hash": "efe8ec252c55ddf8c41f0561f93d8e2b515b3f69970a2821f528f9294ebd8913", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two different community feature requests in the same broad area, but they target unrelated capabilities: Flash Attention 2 support vs GGUF support. They should not be treated as duplicates.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:26350", + "right": "issue:33260", + "accept": false, + "reason": "Different underlying changes/features; one requests Flash Attention 2 support and the other requests GGUF support, so they are not the same bug or fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "70b8053e33e3b6bed84eafd239d5700deb77468f622d38e82e7c06d6fe306e3d", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All four items are documentation-translation issues, but only #27088 is concrete (Turkish). The others are generic placeholder/template issues with no language specified, so they are not safe duplicate merges. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "#27088 is the only specific, actionable issue: it names Turkish docs translation explicitly. The other issues are generic placeholders ([i18n-] / ) and do not identify the same concrete problem.", + "canonical_pr_reason": null, + "best_issue_reason": "#27088 is the best representative because it is specific, user-facing, and clearly scoped to one translation task. The others look like template instances rather than a concrete duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": false, + "reason": "Both are translation-related, but #44370 is a generic placeholder issue and does not identify Turkish docs or the same concrete work item." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": false, + "reason": "#27088 is specifically Turkish; #44365 is only a template stub, so there is not enough evidence that they are the same underlying issue." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": false, + "reason": "#43834 is a generic language placeholder issue, not a concrete Turkish translation request, so this is not a safe duplicate." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": false, + "reason": "These are near-identical templates, but without actual language details they cannot be confirmed as the same underlying issue." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": false, + "reason": "Same generic i18n template wording only; no concrete language or task details to justify duplicate merging." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": false, + "reason": "Both are placeholder issues with identical template titles, but the available data is too generic to prove they refer to one specific bug/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-14", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41115", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:36817|issue:41211", + "issue:36817|issue:43671", + "issue:37015|issue:43837", + "issue:37015|issue:44121", + "issue:37015|issue:43909", + "issue:37015|issue:42503", + "issue:37015|issue:45027", + "issue:41115|issue:43837", + "issue:41115|issue:44121", + "issue:41115|issue:42503", + "issue:41115|issue:45027", + "issue:41211|issue:43671", + "issue:42503|issue:43837", + "issue:42503|issue:43671", + "issue:42503|issue:42738", + "issue:42738|issue:45027", + "issue:43671|issue:43837", + "issue:43671|issue:45027", + "issue:43671|issue:45177", + "issue:43671|issue:44121", + "issue:43671|issue:43909", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:43837|issue:44121", + "issue:43909|issue:44205", + "issue:44121|issue:45322", + "issue:43837|issue:45027", + "issue:43837|issue:45177" + ], + "prepared_review_unit_hash": "bc5136440272b7bab1e0c331202c49a792e339b34f33eec72b4b7ba62d35bab0", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 17647, + "estimated_input_tokens": 4412, + "estimated_eval_tokens": 9080 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 17647, + "estimated_input_tokens": 4412, + "estimated_eval_tokens": 9080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster is mostly broad model-request issues that look superficially similar but refer to different models, modalities, or feature additions. They do not appear to be duplicates of the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "Different model requests: EuroBert config support vs DEIMv2 architecture support. Same broad \u2018add model\u2019 theme, but not the same change." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBert config vs Qwen3-TTS support are unrelated models and tasks." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT addition and Qwen3-ASR support target different models and modalities." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT vs OpenAI weight-sparse transformer are different model families with different implementation needs." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT and LFM2.5 Audio are unrelated model additions." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "NeoBERT and ModernVBERT are different models; no evidence they are the same underlying request." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT vs Voxtral TTS support are distinct model integrations." + }, + { + "left": "issue:41115", + "right": "issue:43837", + "accept": false, + "reason": "MiniCPM3 architecture support and Qwen3-ASR support are different model-specific requests." + }, + { + "left": "issue:41115", + "right": "issue:44121", + "accept": false, + "reason": "MiniCPM3 architecture work is not the same as adding the OpenAI sparse transformer model." + }, + { + "left": "issue:41115", + "right": "issue:42503", + "accept": false, + "reason": "MiniCPM3 and ModernVBERT are separate model additions." + }, + { + "left": "issue:41115", + "right": "issue:45027", + "accept": false, + "reason": "MiniCPM3 architecture request and Voxtral TTS support are unrelated." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 addition and Qwen3-TTS support are different model requests." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT models and Qwen3-ASR support are unrelated additions." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT and Qwen3-TTS are different models and tasks." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "ModernVBERT models vs BERT-like models with RoPE is only a broad architectural similarity, not the same concrete request." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "BERT-like RoPE support and Voxtral TTS support are unrelated feature requests." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "Both are Qwen3 requests, but TTS and ASR are different modalities and separate integrations." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-TTS and Voxtral TTS are different models; same modality but not the same underlying change." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-TTS support and DeepSeek-OCR2 are different model requests." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "Qwen3-TTS support is unrelated to the OpenAI sparse transformer request." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-TTS and LFM2.5 Audio are distinct audio model requests." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-ASR and LFM2.5 Audio are different audio models with separate support paths." + }, + { + "left": "issue:43837", + "right": "issue:45322", + "accept": false, + "reason": "Qwen3-ASR support and EUPE encoder support are unrelated model additions." + }, + { + "left": "issue:43837", + "right": "issue:44121", + "accept": false, + "reason": "Qwen3-ASR and the OpenAI weight-sparse transformer are different model integrations." + }, + { + "left": "issue:43909", + "right": "issue:44205", + "accept": false, + "reason": "LFM2.5 Audio and SAM3-LiteText are different models and different use cases." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "OpenAI sparse transformer support and EUPE encoder support are unrelated requests." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-ASR and Voxtral-4B-TTS are different speech models and not the same change." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-ASR support and DeepSeek-OCR2 addition are different model requests." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "442133c41799bc70f07079ff922d28c5bb1714fa3695e3bd6605d8cdea8356a2", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated areas (model normalization, Mask2Former loss behavior, config typing, a typo, and tokenizer warning behavior). None of the soft edges look like the same underlying bug.", + "confidence": 0.98, + "canonical_issue_reason": "No canonical issue fits well here; the items are about different code paths and bug classes, so the cluster should not be merged around a single issue.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good global representative because there is no common underlying defect across the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "Both mention normalization-related terms, but one is about Llama4TextL2Norm semantics and the other is a ModernBERT config type hint; these are unrelated bugs." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Mask2Former ignore_value being unused and kwargs not reaching a loss function are different implementation problems in different paths." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "Mask2Former loss handling is unrelated to tokenizer regex warning behavior." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "A loss-argument issue and a typo in a quantization attribute name are distinct defects." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Different models and different bug types: unused ignore_value vs incorrect config type hint." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "Passing kwargs to loss calculation and a ModernBERT config annotation issue do not share the same code-path bug." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "Incorrect type hint in a config class is not the same as a tokenizer warning regression." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "These are both small correctness issues, but one is a type-hint typo and the other is a specific attribute-name typo in a different subsystem." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "30394be29ea882871b54591c1ada2d1c55688a6749e24e7428d4a6df5c5a9f8f", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two issues are not duplicates: one is about block decoding behavior, the other is a feature request for DeepSeek v3.2 support. They do not describe the same underlying problem or change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Titles and likely scope are unrelated; one concerns decoding logic, the other model/version support. No shared code path or concrete change is evident." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "084d21f372647b656322b45061660af13d1b7074da130a8bf7908d060292f9f4", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are only loosely related by custom-trainer support, but they target different Trainer behaviors: unused-column removal vs FLOPS estimation. They do not look like duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue for a duplicate cluster here; the two issues describe different code paths and fixes.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must be kept as the most representative of the shared theme, #43598 is slightly broader around Trainer customizability, but it is not a duplicate of #43600.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Both mention custom trainers, but one is about remove_unused_column behavior and the other is about FLOPS estimation. Different symptoms, different logic, not the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "4669445ee80485228bb5b3538ac449de7ab686347b5276ce0d47503d06dc5516", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are related only at a very broad AutoModel level, but they request different capabilities for different Qwen variants and different auto classes, so they should not be treated as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "Issue 43684 is the broader model-registration request, but it is not the same underlying problem as 44405, which asks for sequence-classification support for a different Qwen config.", + "canonical_pr_reason": null, + "best_issue_reason": "43684 is the more general representative of the cluster because it concerns core AutoModel registration rather than a narrower task-specific head.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different model families/configs and different auto-class targets: Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration vs Qwen3.5 sequence-classification support for AutoModelForSequenceClassification." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "8245c5995f6aa4febfd425f0c4349118b263030b57df21d912164379bc318198", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4267, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4267, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are low-information closed issues with generic titles. The three \"huggingface\" issues are superficially similar, but there\u2019s no body evidence tying them to the same concrete bug, so none of the soft edges should be merged.", + "confidence": 0.61, + "canonical_issue_reason": "issue:43845 is the best cluster representative because it is one of the repeated \"huggingface\" titles and is the earliest of that set, but the signal is weak.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43845 is the most plausible cluster anchor due to title repetition and earliest timestamp among the similar issues, though the underlying problem is not described.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Different titles and no description evidence beyond generic similarity." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "Different titles and no concrete shared bug details." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "No evidence these refer to the same underlying problem." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "Same title, but bodies are too empty to confirm a shared bug or change." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "Title match is not enough without concrete issue details." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "Title match alone does not justify duplicate merging." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "ac94fdb4e05974ccac3e512c12417c2fee565f1fd710b09c02d50c1f76a9e5a0", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5794, + "estimated_input_tokens": 1449, + "estimated_eval_tokens": 3154 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5794, + "estimated_input_tokens": 1449, + "estimated_eval_tokens": 3154 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "No convincing duplicate cluster here: four items are spam-like placeholders and one is a generic documentation suggestion, so none look like the same underlying issue.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": false, + "reason": "Both are spam-like placeholders with no substantive shared bug or change to merge." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": false, + "reason": "Same spam-only pattern; no concrete issue evidence that they describe the same problem." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "\"\" is unrelated to a documentation enhancement suggestion." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "Spam placeholder and a separate company-name themed title; no shared underlying issue." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "Generic documentation request does not match a spam placeholder." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Generic documentation request does not match a spam placeholder." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": false, + "reason": "Identical spam-like titles are not enough to treat them as the same underlying bug or change." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "No concrete overlap beyond both being low-information titles." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "No concrete overlap beyond both being low-information titles." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "a87d75b407b4aada8de82e2efd0ea7f9ccf8e033875f6890435a6e205fc1f762", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two closed issues appear to be duplicates of the same report: they have the same title, very similar body size, and were opened on the same day with no distinguishing links or files.", + "confidence": 0.99, + "canonical_issue_reason": "Issue #45145 is the better canonical because it was created first and already has an inbound reference, so it is the more established duplicate target.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45145 is the best representative of this cluster for the same reason: earliest created and slightly more anchored in the discussion graph.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": true, + "reason": "Same title and essentially the same report; no evidence of different underlying bugs or distinct scopes." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: the issues share the exact title, have very similar body lengths, were opened the same day, and there are no distinguishing links or files. The canonical-choice rationale is also supported by the earlier creation time and inbound reference on issue 45145. The soft-edge verdict is conservative enough given the available evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 13, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two issues describe the same missing Gemma 4 sequence-classification support; the later, more specific open issue is the better cluster representative.", + "confidence": 0.94, + "canonical_issue_reason": "issue #45373 is the clearest canonical tracker: it is open, explicitly names the missing class `Gemma4ForSequenceClassification`, and states the module-level gap relative to Gemma 2/3.", + "canonical_pr_reason": null, + "best_issue_reason": "issue #45373 best captures the concrete request and likely remaining work, while #45295 is a shorter earlier duplicate with less detail and is already closed.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 14, + "component_count": 14, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45672-2", + "nodes": [ + "issue:45672", + "issue:45674" + ], + "soft_pairs": [ + "issue:45672|issue:45674" + ], + "prepared_review_unit_hash": "29dda7dfd42d507eef3d67685c35319bfb1f1f2bf2d3e446fa50455ff63c40ee", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1667, + "estimated_input_tokens": 417, + "estimated_eval_tokens": 1090 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1667, + "estimated_input_tokens": 417, + "estimated_eval_tokens": 1090 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open issues in the same 4-bit inference area, but they describe different failures: one is a Gemma4/audio-module torch.finfo() TypeError on uint8 weights, the other is a BitsAndBytesConfig llm_int8_skip_modules regression that removes the default lm_head exclusion and triggers an AssertionError. They do not look like the same underlying bug.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45674 is the better representative because it is broader, has more discussion/activity, and describes a clearer configuration regression in 4-bit inference.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45674 is the strongest cluster member by activity and scope; issue:45672 is a separate model/module-specific crash rather than the same defect.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45672", + "right": "issue:45674", + "accept": false, + "reason": "Reject: the symptoms and root causes differ. One is a torch.finfo() TypeError on uint8 weights in Gemma4 audio modules; the other is a BitsAndBytesConfig exclusion bug causing an AssertionError when llm_int8_skip_modules is set." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-12", + "nodes": [ + "pull_request:39895", + "pull_request:41855", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45170", + "pull_request:45213", + "pull_request:45655" + ], + "soft_pairs": [ + "pull_request:39895|pull_request:43424", + "pull_request:41855|pull_request:43823", + "pull_request:41855|pull_request:43424", + "pull_request:42668|pull_request:43823", + "pull_request:43251|pull_request:43823", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:44827", + "pull_request:43424|pull_request:43823", + "pull_request:43823|pull_request:45655", + "pull_request:43823|pull_request:43995", + "pull_request:43995|pull_request:45213", + "pull_request:43995|pull_request:44827", + "pull_request:44827|pull_request:45170", + "pull_request:44827|pull_request:45655", + "pull_request:43823|pull_request:44827", + "pull_request:44827|pull_request:45213", + "pull_request:43823|pull_request:45213", + "pull_request:43251|pull_request:43254" + ], + "prepared_review_unit_hash": "f9441d5883b454267bdbf39d5351877f4e96c1a6754ca6b075426ff33747f68d", + "component_budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 18, + "serialized_chars": 32875, + "estimated_input_tokens": 8219, + "estimated_eval_tokens": 16694 + }, + "budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 18, + "serialized_chars": 32875, + "estimated_input_tokens": 8219, + "estimated_eval_tokens": 16694 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Only one soft duplicate pair is strong enough to merge: the two `loss_utils.py` PRs that both extend `fixed_cross_entropy` with extra `cross_entropy` kwargs. The rest are broad thematic similarities (new models, processor changes, tests, or unrelated refactors) and should stay separate.", + "confidence": 0.9, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43251 is the best canonical representative: it explicitly targets issue 43240 and implements the scoped fix in `fixed_cross_entropy` by forwarding `weight` and `label_smoothing` to `nn.functional.cross_entropy`.", + "best_issue_reason": null, + "best_pr_reason": "PR 43251 is the strongest standalone candidate in this cluster because it is tightly focused, issue-linked, and clearly describes the intended behavior change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "Both are unrelated feature/test PRs; they do not touch the same code path or implement the same change." + }, + { + "left": "pull_request:41855", + "right": "pull_request:43823", + "accept": false, + "reason": "Tokenizer bugfix vs. adding a new model; different scopes and no shared underlying change." + }, + { + "left": "pull_request:41855", + "right": "pull_request:43424", + "accept": false, + "reason": "Missing tokenizer methods is unrelated to executorch dynamic-shape export tests." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Processor robustness work and adding MobileLLM are different PRs in different areas." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss-function kwargs fix and new model addition are unrelated." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "The Mistral4 work is model/test/integration related; it is not the same change as forwarding kwargs in `fixed_cross_entropy`." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "Executorch export tests and Mistral4 fixes are unrelated code paths." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "New model support and dynamic export testing are not the same underlying change." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45655", + "accept": false, + "reason": "A new model PR and a `config` resolution ordering fix are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "New MobileLLM model work does not match Falcon output-interface refactoring." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "These are different model additions/fixes with no shared concrete bug." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "A model addition and a model-creation skill draft are not the same mergeable code change." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45655", + "accept": false, + "reason": "Model addition vs. class annotation/config resolution fix are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon output-collection refactoring and Mistral4 test fixes are different issues." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Model refactoring and the model-creation skill draft are unrelated." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 test/integration fixes and `layrnorm` renaming are separate model-family changes." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45655", + "accept": false, + "reason": "Mistral4 fixes and global `config` resolution ordering are different problems." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "They touch different models and do not represent one concrete bug or patch." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45213", + "accept": false, + "reason": "Mistral4 fixes and the model-creation skill draft are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "A model implementation PR and a workflow/skill draft are not duplicates." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Both PRs fix the same `fixed_cross_entropy` code path by adding supported kwargs to `nn.functional.cross_entropy`; they are plausibly mergeable into one PR." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The analysis is grounded in the packet and stays conservative. The only accepted soft pair, PR 43251 and PR 43254, is clearly the same `loss_utils.py` fix for `fixed_cross_entropy` and is plausibly mergeable into one PR. The remaining pairs are appropriately treated as unrelated thematic similarities rather than duplicates." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "311a7069f5fcab50d3073c27aaa643342b9c0c1bfc1b6e1dbcb944e8ed40b542", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs work on VibeVoice, but they are not the same change: #40546 is the core VibeVoice implementation, while #42781 adds VibeVoice Realtime plus acoustic-tokenizer and generation updates. The file overlap is mostly shared model scaffolding, not a single concrete bugfix or mergeable duplicate PR.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "#40546 is the stronger canonical PR because it is the main VibeVoice implementation, has substantial review activity, and is not a draft.", + "best_issue_reason": null, + "best_pr_reason": "#40546 is the best overall PR for this cluster because it is the foundational, more mature VibeVoice submission; #42781 is a draft extension focused on realtime support rather than the same patch.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "Reject: they share the same model family and some common files, but #42781 adds realtime-specific functionality and acoustic-tokenizer work, so it is not the same underlying change as #40546." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "68cabd18897882362474ecaa9f87a52d53472d0fdbed8f9215a7efe7d3eecc30", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster contains two unrelated pull requests: one adds TimesFM covariate forecasting, the other adds DINOv3 image classification support. They target different models and different code paths, so this is not a duplicate cluster.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: the PRs address different models and implement unrelated features in separate code paths.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a good global representative for duplicate triage because they do not share an underlying bug or change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Reject: these PRs are about different model families and unrelated features, not the same concrete fix or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "aead1308c167b0bae6ff11bf9358340baa5e76f816618383418b505a10a9cd01", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6050, + "estimated_input_tokens": 1513, + "estimated_eval_tokens": 3282 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6050, + "estimated_input_tokens": 1513, + "estimated_eval_tokens": 3282 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two DEIMv2 pull requests target the same model family, but they are not the same change: PR 41356 is an early/basic model + image processor add, while PR 44339 is a much broader follow-up that adds loss code, auto-mappings, conversion tooling, modular files, and more integration work.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44339 is the more complete and representative DEIMv2 implementation; PR 41356 looks like an earlier partial version of the same model bring-up.", + "best_issue_reason": null, + "best_pr_reason": "PR 44339 is the strongest single artifact for the cluster because it subsumes the basic model work and includes the broader integration needed for a full Transformers addition.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": false, + "reason": "Same model area, but not the same concrete change: 44339 expands the implementation substantially and adds new code paths beyond the basic PR 41356. They look like related iterations, not mergeable duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "7d2d6f5f03e6dfee5016ae50ab821407ce59d5d70df41600e9d93598f6058ee4", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs are unrelated model-addition changes that happen to share the Transformers model-registration scaffolding. They add different models (DeepSeekOCR vs Isaac) with different codepaths, tests, and docs, so they should not be treated as duplicates or merged together.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 41797 is the better representative only in the sense that it has much more review activity and appears more mature, but it is not a duplicate of PR 45186.", + "best_issue_reason": null, + "best_pr_reason": "PR 41797 is the strongest standalone candidate because it has broader discussion/review activity and a larger, more established patch, but it still addresses a different model than PR 45186.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Reject: these are separate model integration PRs for different architectures (DeepSeekOCR vs Isaac). The overlapping filenames are expected boilerplate for adding new models, not evidence of the same underlying bug or change, and they would not plausibly be merged into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "b0576f1a30906b4cd8b3c91bf1e474fb7061594dd793fc2268d1963c6bf11f35", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two workflow-maintenance PRs overlap in GitHub Actions files, but they address different upgrade tracks: one for Node 24 compatibility across many actions, the other for latest-version bumps centered on docker/build-push-action. They are related, not duplicates.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 42975 is the broader, more representative workflow-upgrade change and its title matches the main compatibility migration theme across many Actions updates.", + "best_issue_reason": null, + "best_pr_reason": "PR 42975 is the best representative of the cluster because it covers the wider set of workflow action compatibility upgrades rather than a narrower version-bump subset.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "Shared workflow files are not enough here: 42975 is a Node 24 compatibility sweep, while 42976 mainly bumps docker/build-push-action and a few related actions to latest versions. Different concrete change sets, so they should not be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-7", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451", + "pull_request:44601", + "pull_request:45490", + "pull_request:45626", + "pull_request:45630" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:42978|pull_request:45490", + "pull_request:43448|pull_request:43451", + "pull_request:43448|pull_request:45490", + "pull_request:43451|pull_request:45490", + "pull_request:44601|pull_request:45626", + "pull_request:45490|pull_request:45626", + "pull_request:45490|pull_request:45630", + "pull_request:45626|pull_request:45630" + ], + "prepared_review_unit_hash": "d88b994a84a0c92e362c0389ab91a50c99476b591432faa3818b15d4fe94310c", + "component_budget": { + "node_count": 7, + "item_count": 7, + "soft_pair_count": 10, + "serialized_chars": 23983, + "estimated_input_tokens": 5996, + "estimated_eval_tokens": 12248 + }, + "budget": { + "node_count": 7, + "item_count": 7, + "soft_pair_count": 10, + "serialized_chars": 23983, + "estimated_input_tokens": 5996, + "estimated_eval_tokens": 12248 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All candidate soft edges are false positives. The PRs target different underlying changes: separate model additions (ViT NEPA, Molmo, Molmo2, CTSM, PP-FormulaNet, Kimi2-6) plus one distributed pipeline-parallel infrastructure PR. Shared docs/auto-registration files are expected for model integration and do not indicate duplication.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR: these are distinct model-addition or infrastructure changes, not duplicate implementations of the same bug or feature.", + "best_issue_reason": null, + "best_pr_reason": "There is no true duplicate cluster to choose from. If forced to name a representative, PR 45626 is the most complete standalone model-support change, but it is still unrelated to the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "Different model adds: ViT NEPA vs Molmo. Shared docs/auto files are routine integration overlap, not the same change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "Different model adds: ViT NEPA vs Molmo2. No shared bug or concrete code path." + }, + { + "left": "pull_request:42978", + "right": "pull_request:45490", + "accept": false, + "reason": "Different models: ViT NEPA vs CTSM. Similar registration/docs work is not duplicate evidence." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are separate models with distinct implementations; not mergeable as one duplicate PR." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo vs CTSM are unrelated model additions despite both touching auto/docs plumbing." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo2 and CTSM are different model integrations, not the same underlying feature." + }, + { + "left": "pull_request:44601", + "right": "pull_request:45626", + "accept": false, + "reason": "One is core pipeline-parallel loading infrastructure; the other adds PP-FormulaNet model support. Different code paths and goals." + }, + { + "left": "pull_request:45490", + "right": "pull_request:45626", + "accept": false, + "reason": "CTSM model support and PP-FormulaNet model support are distinct additions, not one concrete bug fix." + }, + { + "left": "pull_request:45490", + "right": "pull_request:45630", + "accept": false, + "reason": "CTSM vs Kimi2-6 are separate model PRs; overlap is only in generic model registration/docs files." + }, + { + "left": "pull_request:45626", + "right": "pull_request:45630", + "accept": false, + "reason": "PP-FormulaNet and Kimi2-6 are different model support PRs with no shared underlying change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43291-3", + "nodes": [ + "pull_request:43291", + "pull_request:43665", + "pull_request:44815" + ], + "soft_pairs": [ + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "f3112d35e81b028bd9c17fc08841dda22d22618b79d8553240d81e6c0700b526", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7093, + "estimated_input_tokens": 1774, + "estimated_eval_tokens": 3804 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7093, + "estimated_input_tokens": 1774, + "estimated_eval_tokens": 3804 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The three PRs are not duplicates of one another: two are test-only/whitespace-style changes around unrelated models, while one is a substantive FP8 dequant/loading fix. No soft pair should be merged.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #44815 is the strongest canonical candidate because it contains the only clearly substantive code-path fix, spanning model loading and FP8 dequantization logic, rather than isolated test edits.", + "best_issue_reason": null, + "best_pr_reason": "PR #44815 is the best standalone PR in the cluster: it addresses a concrete functional bug in dequant/loading behavior and has broader, coherent code changes compared with the other test-focused PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "Both are test-oriented and touch different model areas (Whisper vs CLIP/ViT); they do not appear to fix the same underlying bug or change." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "Whisper tokenizer/test changes are unrelated to the FP8 dequant/model-loading fix; no shared concrete bug path." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "The PRs target unrelated subsystems and concerns; one is test-only, the other is a functional dequantization fix, so they should not be merged as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-3", + "nodes": [ + "pull_request:43488", + "pull_request:43532", + "pull_request:45476" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532", + "pull_request:43488|pull_request:45476", + "pull_request:43532|pull_request:45476" + ], + "prepared_review_unit_hash": "9d912f16c57b415bd858a46bfde2461fa451986bb3978c8e1cc2e9b4309aa0b4", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7469, + "estimated_input_tokens": 1868, + "estimated_eval_tokens": 3992 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7469, + "estimated_input_tokens": 1868, + "estimated_eval_tokens": 3992 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three unrelated open PRs with synthetic/test CI changes and incidental overlap in a Vit test file; they do not look like duplicate fixes or mergeable into one change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43532 is the best representative because it has the clearest, most substantive CI/workflow change set and is less clearly artificial than the tiny setup/version tweak in #43488 or the mixed workflow/code edits in #45476.", + "best_issue_reason": null, + "best_pr_reason": "#43532 is the strongest canonical PR candidate for the cluster since it reflects a coherent workflow-oriented change, whereas the other PRs are narrower or mix unrelated edits.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "One is a setup/version/dependency tweak; the other rewrites CI comment/scheduled workflow behavior. They do not fix the same underlying bug or change the same code path." + }, + { + "left": "pull_request:43488", + "right": "pull_request:45476", + "accept": false, + "reason": "#43488 is a packaging/version test PR, while #45476 adds a PR CI workflow and touches tokenizer/test files. The overlap is superficial, not the same change." + }, + { + "left": "pull_request:43532", + "right": "pull_request:45476", + "accept": false, + "reason": "Both touch CI/test-related files, but the concrete changes differ: #43532 changes comment/scheduled workflows and notification/testing utilities, while #45476 adds a PR CI caller and unrelated tokenizer/test edits. Not a single duplicate fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "128b1f59a03cda1aafe9a5590fbcd5156207406405d6f5ec53023c26093ae84a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate PRs for different segmentation pipelines: one adds Promptable Concept Segmentation, the other adds Promptable Visual Segmentation. They share scaffolding/docs patterns, but the concrete pipeline/model/test changes are distinct, so they should not be deduplicated.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Different features and code paths: concept segmentation vs visual segmentation, with different pipeline modules, model docs, and tests. Shared docs/registry updates are generic scaffolding, not evidence of the same underlying change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "9960af740ca7aeae75a7dc025a3d6f3cada739dd5395791417ba764d0c5dc59f", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4291, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4291, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two independent Trainer PRs touch the same file but address different code paths: gradient-accumulation loss scaling vs. signature/column filtering. They are not duplicates and should not be merged together.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43651 is the more concrete bugfix, directly changing training_step loss normalization and adding the helper that gates GA scaling behavior.", + "best_issue_reason": null, + "best_pr_reason": "PR #43651 is the best representative of the cluster because it has the clearest standalone behavior change and a tight, testable fix scope.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Different underlying changes: #43651 alters gradient accumulation loss scaling in training_step, while #43663 refactors signature-column filtering for model inputs. Shared file alone is not enough for duplicate status." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "3227628139088fb21b84d39e92233a5c46c25618ba27d2b45103a70f11274779", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "PR #43842 remains the best representative: it is the most focused fix for the `TypeAdapter`/annotation runtime failure in `src/transformers/cli/serve.py`. PR #43836 is a closely related duplicate on the same issue target. PR #43656 touches the same file but addresses a different typing/future-annotations path and is not clearly the same bugfix.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43842 is the most narrowly targeted fix for the concrete CLI `TypeAdapter` NameError/annotation-evaluation problem, with the clearest issue framing and smallest change set.", + "best_issue_reason": null, + "best_pr_reason": "PR #43842 is the best representative because it directly fixes the specific `TypeAdapter` runtime annotation failure and is the most precise, low-collateral change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": false, + "reason": "They share `src/transformers/cli/serve.py`, but the diffs point to different annotation issues: #43656 is about `TimedModel` typing/future-annotations behavior, while #43842 is specifically about `TypeAdapter` under `TYPE_CHECKING`. Shared file and similarity alone are not enough to treat them as the same fix." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "Both PRs target the same explicit issue (`43824`) and the same `TypeAdapter` annotation/name-resolution problem in `src/transformers/cli/serve.py`, so they look like the same underlying bugfix." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The summary is mostly grounded, but the soft-edge verdict for `pull_request:43656` vs `pull_request:43842` is too strong. The packet only shows shared filename and a similarity signal; it does not show a shared issue target or a clearly identical bugfix. The two diffs also touch different annotation paths: #43656 changes `TimedModel` typing/future-annotations behavior, while #43842 narrows in on `TypeAdapter` under `TYPE_CHECKING`. That is not enough evidence to confidently say they are the same underlying fix or obviously mergeable into one PR. The `43836` vs `43842` acceptance is better supported because both target the same issue and the same `TypeAdapter` annotation problem." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests appear to implement the same SwanLab integration change: adding run id/resume support in `integration_utils.py` for issue #43698, with one variant also expanding the docs. They are strong duplicate PR candidates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43816 is the cleaner representative: it carries the same SwanLab id/resume support plus the accompanying documentation updates, making it the more complete version of the shared change.", + "best_issue_reason": null, + "best_pr_reason": "PR #43816 is the best global representative because it appears to include the full feature change and docs for SwanLab run id/resume support, while #43779 is the narrower implementation-focused variant.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": true, + "reason": "Same underlying SwanLab callback enhancement for id/resume support in the same file and same issue target; the doc differences do not change the core code-path change, so these are duplicate PRs." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target issue #43698, touch the same file, and the previews indicate the same SwanLab id/resume support with #43816 adding docs. The duplicate/mergeable verdict is conservative enough given the shared target and overlapping code change." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44013", + "pull_request:44017", + "pull_request:44018", + "pull_request:44019", + "pull_request:44044", + "pull_request:44056", + "pull_request:44059", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44116", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44071", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44068|pull_request:44086", + "pull_request:44071|pull_request:44085", + "pull_request:44059|pull_request:44086", + "pull_request:44085|pull_request:44086", + "pull_request:44059|pull_request:44722", + "pull_request:44098|pull_request:44101", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:43997|pull_request:44073", + "pull_request:44017|pull_request:44071", + "pull_request:44059|pull_request:44071", + "pull_request:44073|pull_request:44101", + "pull_request:44059|pull_request:44068", + "pull_request:44018|pull_request:44059", + "pull_request:44056|pull_request:44101", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:44017|pull_request:44018", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101", + "pull_request:43997|pull_request:44018", + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:44018|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154" + ], + "prepared_review_unit_hash": "5d93359c60f1b14a49fcf682546f83e8987869c8ffed7a902c1ff5e0cbdeee6b", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 78, + "serialized_chars": 84894, + "estimated_input_tokens": 21224, + "estimated_eval_tokens": 42704 + }, + "budget": { + "node_count": 27, + "item_count": 27, + "soft_pair_count": 72, + "serialized_chars": 75091, + "estimated_input_tokens": 18773, + "estimated_eval_tokens": 37802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a batch of output-tracing refactors, but most PRs are model-specific and not true duplicates across models. The real duplicates are the same-model GPT-J/CodeGen and GPT-Neo changes; most cross-model soft edges should be rejected.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44722 is the broadest and clearest instance of the GPT-J/CodeGen standardized output-tracing refactor, with the same issue target and the most complete diff in that code path.", + "best_issue_reason": null, + "best_pr_reason": "PR 44722 is the best representative PR for this cluster because it most directly captures the canonical GPT-J/CodeGen output-tracing change and subsumes the narrower GPT-J-only variants.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and code paths; matching the general refactor theme is not enough to merge them." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "Different model families and code paths; same tracking-style refactor is too broad to count as a duplicate." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": true, + "reason": "Same GPT-J output-tracing code path; 44085 looks like a narrower slice of the same refactor family." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "Different model families and code paths; they only share the standardized output-tracing theme." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "Different model families and code paths; both are output-tracing refactors, but not the same concrete change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "Different model families and code paths; no shared concrete bug/change." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "Different model families and code paths; same broad refactor pattern only." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "Different model families and code paths; these are unrelated standardized-output refactors." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Same GPT-Neo output-tracing refactor in the same model file; these look like duplicate/overlapping revisions." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "Different model families and code paths; not the same concrete change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-J/CodeGen versus GPT-Neo; same refactor pattern but not the same code path." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44066", + "accept": false, + "reason": "GPT-Neo versus GPT-J/CodeGen; different concrete changes despite similar output-tracing goals." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet versus GPT-J/CodeGen; same meta-refactor idea, but not the same underlying change." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "Different model families and code paths; not a duplicate change." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet versus GPT-J; unrelated concrete code paths." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44019", + "accept": false, + "reason": "GPT-Neo and ResNet are different model code paths; same refactor style is insufficient." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44071", + "accept": false, + "reason": "Different model families and code paths." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo versus Flaubert; not the same underlying change." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo versus GPT-J/CodeGen; different concrete code paths." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "Different model families and code paths." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44085", + "accept": false, + "reason": "MPT versus GPT-J; not the same change." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "Different model families and code paths; only the standardized output-tracing theme matches." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-2 and MGP-STR are unrelated concrete code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-2 versus GPT-J/CodeGen; same broad refactor family, not the same change." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT versus Flaubert/XLM; different model code paths." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert versus LongT5; different model families and code paths." + }, + { + "left": "pull_request:44101", + "right": "pull_request:44161", + "accept": false, + "reason": "Flaubert/XLM versus LongT5; same refactor pattern only." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44018", + "accept": false, + "reason": "Segformer versus GPT-Neo; different concrete code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet versus MPNet; not the same underlying bug/change." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "MPNet versus LongT5; different model code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "RegNet versus LongT5; unrelated concrete changes." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet versus Flaubert/XLM; different model code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-2 versus MPT; not the same concrete change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44059", + "accept": false, + "reason": "GPT-Neo versus GPT-2; same broad theme, different code paths." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44101", + "accept": false, + "reason": "MPNet versus Flaubert/XLM; different model families." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert versus LongT5; unrelated code paths." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44161", + "accept": false, + "reason": "Segformer versus LongT5; not the same change." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44073", + "accept": false, + "reason": "Segformer versus VisualBert; different model code paths." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo versus MPT; different underlying changes." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo versus MGP-STR; not a duplicate." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44722", + "accept": false, + "reason": "MPT versus GPT-J/CodeGen; same refactor family only." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo versus GPT-J/CodeGen; different concrete code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-2 versus GPT-Neo; different model code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-2 versus MPT; unrelated changes." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet versus LongT5; different concrete code paths." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert versus LongT5; not the same underlying change." + }, + { + "left": "pull_request:44101", + "right": "pull_request:44161", + "accept": false, + "reason": "Flaubert/XLM versus LongT5; same broad refactor theme only." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44018", + "accept": false, + "reason": "Segformer versus GPT-Neo; different model code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet versus MPNet; unrelated concrete changes." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44076", + "accept": false, + "reason": "MPT versus ImageGPT; different model families and code paths." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet versus Flaubert/XLM; not a duplicate change." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "RegNet versus LongT5; different underlying changes." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet versus LongT5; different code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44071", + "accept": false, + "reason": "RegNet versus MPT; unrelated concrete changes." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBert versus MPT; different model code paths." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "SqueezeBert versus VisualBert; different concrete changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-Neo versus SpeechT5; same refactor family only." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "MPNet versus LongT5; not the same underlying change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "CVT/FNet versus Segformer; different model families and code paths." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "CVT/FNet versus SpeechT5; unrelated concrete changes." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44044", + "accept": false, + "reason": "SqueezeBert versus DeBERTa v2; different model code paths." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44074", + "accept": false, + "reason": "DeBERTa v2 versus TextNet; different underlying changes." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44722", + "accept": true, + "reason": "Same GPT-J/CodeGen output-tracing refactor, same files and issue target; 44722 is the broader, cleaner revision." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44722", + "accept": true, + "reason": "Both are GPT-J output-tracing refactors on the same code path; 44722 subsumes the narrower GPT-J-only variant." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-J versus SpeechT5; same meta-pattern, but different concrete code paths." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44007", + "accept": false, + "reason": "CVT/FNet versus RegNet/ResNet/RT-DETR; different model families and code paths." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "CVT/FNet versus SpeechT5; not the same underlying change." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44129", + "accept": false, + "reason": "DeBERTa v2 versus SpeechT5; different model code paths." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44154", + "accept": false, + "reason": "DeBERTa v2 versus Vits; unrelated concrete changes." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet, and the accepted soft-edge pairs are supported by shared model/file paths and/or clear subsumption within the same bugfix. The cross-model pairs are rejected conservatively, which fits the evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44024", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44024|pull_request:44076", + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "84e4b13f017dbba84561f4bc07ea4ac80b0f152a249af3fffd9a02080a83fac4", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 78, + "serialized_chars": 84894, + "estimated_input_tokens": 21224, + "estimated_eval_tokens": 42704 + }, + "budget": { + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17124, + "estimated_input_tokens": 4281, + "estimated_eval_tokens": 8818 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a collection of separate per-model PRs that all refactor output tracing/collection to the new standardized capture_outputs/can_return_tuple pattern. They share a theme, but they are not the same concrete change or code path.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44073 is the clearest representative of the shared refactor theme: it explicitly describes standardized output tracing and shows the core capture_outputs/can_return_tuple migration pattern without being tied to an unusual or typoed title.", + "best_issue_reason": null, + "best_pr_reason": "PR 44073 is the best overall representative of the cluster because it most directly matches the common change pattern across the set and is written as a generic output-tracing refactor rather than a model-specific one-off.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Both are output-tracing refactors, but they touch different model implementations (FocalNet vs ImageGPT) and different code paths, so they are not the same underlying change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "These are separate model-specific migrations for VisualBert and TextNet; same refactor style, but not one shared bug or change." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "MPNet and VisualBert are different model code paths; this is a repeated pattern, not a single mergeable PR." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "Different models and different forward/output-tracing implementations; too broad to treat as the same change." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert are unrelated model files, so despite similar standardized-output wording they are not duplicates." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "SqueezeBert and SpeechT5 are distinct model-specific refactors; the shared theme is too broad to merge as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:43998|pull_request:44001", + "pull_request:43998|pull_request:44003", + "pull_request:43998|pull_request:44029", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:44000|pull_request:44001", + "pull_request:44000|pull_request:44002", + "pull_request:44000|pull_request:44025", + "pull_request:44000|pull_request:44026", + "pull_request:44001|pull_request:44002", + "pull_request:44001|pull_request:44025", + "pull_request:44001|pull_request:44026", + "pull_request:44002|pull_request:44029", + "pull_request:44002|pull_request:44025", + "pull_request:44001|pull_request:44003", + "pull_request:44002|pull_request:44003", + "pull_request:44000|pull_request:44003", + "pull_request:44003|pull_request:44028", + "pull_request:44003|pull_request:44004", + "pull_request:43998|pull_request:44004", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44029", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44028", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44029|pull_request:44030" + ], + "prepared_review_unit_hash": "28f7a8c43129343984a2a0a4e72541caf876d52f1591591a8f7dac334cb90396", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 43, + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 43, + "serialized_chars": 41331, + "estimated_input_tokens": 10333, + "estimated_eval_tokens": 20922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is a batch of independent model-specific output-tracing refactors, not true duplicates. The PRs share a theme and terminology, but they touch different model code paths and should not be merged together as one change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "No issue artifacts are present. For a PR representative, #44003 is the strongest canonical anchor because it has the broadest refactor surface (mamba plus falcon_mamba) and introduces the reusable output-capturing plumbing.", + "best_issue_reason": null, + "best_pr_reason": "#44003 is the most central and substantial output-tracing refactor in the set, so it best represents the cluster overall, even though the other PRs are not duplicates of it.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model implementations (timm_backbone vs upernet); same refactor theme, not the same underlying change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "Different code paths and architectures; both are output-tracing cleanups, but not one concrete bug." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "Separate model families with different forward paths; not a mergeable duplicate." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "One is timm_backbone; the other is mamba/falcon_mamba plumbing. Similar motif, different change sets." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model internals and forward signatures; not the same fix." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "mobilenet_v1 and univnet are unrelated model paths; shared refactor language is not enough." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "Independent model refactors in different subsystems; not the same bug or change." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "MobileNetV1 and Depth Anything are separate implementations with different output handling." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "Different architectures and wrappers; similar cleanup, not duplicate work." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "mobilenet_v1 vs speech_encoder_decoder are distinct code paths and model behaviors." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "vision_text_dual_encoder and univnet are unrelated models; same refactor pattern only." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model families; no evidence of a single shared defect." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "Different files, different forward paths, and different model tasks; not one underlying change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44026", + "accept": false, + "reason": "Vision-text dual encoder vs vision encoder-decoder: analogous refactors, but not the same concrete bug." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "UnivNet and UperNet are unrelated implementations; too broad to be a duplicate." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model code and output semantics; only the high-level refactor theme overlaps." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44026", + "accept": false, + "reason": "Separate encoder/decoder and audio model paths; not mergeable as one PR." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "UperNet and RWKV are different subsystems with unrelated forward logic." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model families; same output-tracing cleanup pattern, but not a duplicate." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "UnivNet vs mamba/falcon_mamba touches different code paths and model internals." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "UperNet and mamba/falcon_mamba are unrelated model implementations." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "Different architecture and files; the similarity is only that both refactor output handling." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44028", + "accept": false, + "reason": "mamba/falcon_mamba vs superpoint are distinct model paths; not the same underlying bug." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "CodeGen and mamba/falcon_mamba have different forward contracts and output plumbing." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "timm_backbone and CodeGen are unrelated code paths; do not collapse into one duplicate." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "UnivNet and CodeGen are different models with different output semantics." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "UperNet and CodeGen are not the same change, only the same refactor style." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "Both are encoder-decoder wrapper refactors, but they target different model families and code paths." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "Depth Anything and Speech Encoder-Decoder are separate implementations; not one concrete fix." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "Different models and output behavior; same tracing refactor theme only." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "Depth Anything and RWKV are unrelated subsystems." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "Depth Anything and DPR are different model stacks; not a duplicate change." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "Vision encoder-decoder vs speech encoder-decoder are analogous wrappers, but they are distinct code paths." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "Different model implementations; no shared concrete bug." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "Vision encoder-decoder and RWKV are unrelated." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "Different model families and forward logic." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "Speech encoder-decoder and SuperPoint are separate model paths; similar refactor language is insufficient." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "Different architectures and output contracts; not mergeable as one PR." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "Speech encoder-decoder vs DPR are unrelated changes." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "SuperPoint and RWKV are distinct code paths with unrelated fixes." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "SuperPoint and DPR are separate model implementations; not the same underlying issue." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "RWKV and DPR are different model families and cannot plausibly be one merged fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "1b8dbc7f0edd04501298069baadfa2b5f0d6289255aee980e140aa758a1d609e", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10912, + "estimated_input_tokens": 2728, + "estimated_eval_tokens": 5712 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10912, + "estimated_input_tokens": 2728, + "estimated_eval_tokens": 5712 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three pull requests are separate model-introduction PRs (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They share Transformers boilerplate and auto-registration changes, but they do not fix the same bug or implement the same model, so none should be clustered as duplicates.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No true duplicate PR exists in this set. If a single representative is needed, PR #45075 is the strongest standalone candidate because it is the most mature non-draft model-addition PR with the most review activity and a complete implementation scope.", + "best_issue_reason": null, + "best_pr_reason": "PR #45075 is the best standalone representative: it is non-draft, has substantial review activity, and appears to contain a full end-to-end model addition with docs, tests, conversion, and auto-mapping updates.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model integrations for different code paths (PenguinVL vs Voxtral TTS). The overlap is generic model-registration/docs boilerplate, not the same underlying change." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "These add different models (PenguinVL vs DeepSeek-OCR-2). Shared files are broad Transformers plumbing, but the concrete implementation and target functionality are unrelated." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "DeepSeek-OCR-2 and Voxtral TTS are distinct model additions with different configs, processors, and tests. Similar auto-mapping/docs edits are insufficient to treat them as the same PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "2bae581294da7eba217a756060c607967c100b2d4705bf2d364ef9e5d0f40f18", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related by the broad theme of fusion, but they are not the same change: one introduces a generic Module Fusion API, while the other wires n-to-1 kernel fusion through KernelConfig and related integration points. I would not dedupe them.", + "confidence": 0.82, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #44979 is the more foundational change: it adds the new module-fusion abstraction itself, whereas #45363 is a separate kernel-config-driven integration on top of a different code path.", + "best_issue_reason": null, + "best_pr_reason": "PR #44979 is the best representative of this cluster because it is the core API change and is simpler to use as the anchor for related fusion work.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "They share a fusion theme, but they do not look like one underlying bug/change. #44979 adds a standalone Module Fusion API, while #45363 implements n-to-1 kernel fusion plumbing via KernelConfig and integrations; these would not plausibly be merged into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "9c62f3e8a65a521050fa64cfd12363de8ff6a474c256926aeef82c4b742402f5", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs target the same Mistral regex patch bug in the same file and apply the same code change; PR #45317 is the more complete version because it adds tests and has more discussion.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45317 is the stronger canonical PR: same fix as #45086, but with added test coverage and more review discussion, making it the more complete representative of this change.", + "best_issue_reason": null, + "best_pr_reason": "PR #45317 is best overall because it directly fixes the AttributeError, includes tests, and is the more fully developed implementation of the shared fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Same underlying bug and same code-path fix in the same file; #45317 is effectively the tested, expanded version of #45086." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45115-2", + "nodes": [ + "pull_request:45115", + "pull_request:45569" + ], + "soft_pairs": [ + "pull_request:45115|pull_request:45569" + ], + "prepared_review_unit_hash": "b906698b3fb430661c3e6cf78115b214fbfb01fed57d472e9bedaba9bd8d20a1", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6066, + "estimated_input_tokens": 1517, + "estimated_eval_tokens": 3290 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6066, + "estimated_input_tokens": 1517, + "estimated_eval_tokens": 3290 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs in the NemotronH area overlap on model plumbing, but they pursue different changes: one refactors NemotronH to inherit GraniteMoeHybrid-style structure, while the other splits NemotronH into dedicated dense/sparse model families with docs and tests.", + "confidence": 0.79, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45569 is the better canonical PR because it is the broader, more complete NemotronH redesign and includes the downstream docs/test/model registration changes that make it the likely final representative of the cluster.", + "best_issue_reason": null, + "best_pr_reason": "PR #45569 best represents the cluster since it subsumes the shared NemotronH modeling work and adds the user-facing/model split changes; #45115 is a narrower refactor step and less representative of the overall change set.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45115", + "right": "pull_request:45569", + "accept": false, + "reason": "They touch overlapping NemotronH files, but they are not the same concrete fix/change: #45115 is a structural refactor toward GraniteMoeHybrid compatibility, while #45569 introduces separate NemotronHDense/NemotronHSparse models plus docs/tests and broader API updates. Shared subsystem alone is not enough to merge them." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "8159cc6512800f1c6ef20ceef8d250fcfa8591c2704cd8d3cc5059f7805486a5", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two related but distinct pull requests in the heterogeneous-modeling feature area: one adds per-layer config support, the other adds the broader per-layer modeling/cache integration. They share plumbing files, but they are not the same concrete change and should not be merged as duplicates.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45332 is the better canonical representative because it is the broader umbrella change: it adds heterogeneous model support across config, cache, masking, and modeling paths. PR 45333 is a narrower config-only piece.", + "best_issue_reason": null, + "best_pr_reason": "PR 45332 is the best overall representative for the cluster because it covers the larger end-to-end heterogeneous support work, while 45333 is only one subsystem slice of that effort.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "Related feature work, but not the same underlying change: 45333 only adds heterogeneous config handling, while 45332 extends modeling/cache behavior and depends on that config groundwork. They would not plausibly be one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "7bcd7d04285a4253c91d4ef53638d4eddc49ef8d721a3fc934290aaa0ea21a9b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5514, + "estimated_input_tokens": 1379, + "estimated_eval_tokens": 3014 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5514, + "estimated_input_tokens": 1379, + "estimated_eval_tokens": 3014 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are broad typing-focused PRs in Transformers, but they address different changes: one enables/expands type checking across many src/transformers files, while the other adds shared typing helper protocols and related model utility adjustments. They are related by theme, not the same underlying change.", + "confidence": 0.9, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45425 is the better canonical PR because it is the more focused change centered on shared typing abstractions and targeted utility updates, whereas PR 45415 is a much broader type-checking sweep across many files.", + "best_issue_reason": null, + "best_pr_reason": "PR 45425 is the best representative of this cluster since it has a narrower, more specific code-change scope; PR 45415 is a separate large-scale typing rollout rather than the same concrete fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "Both are typing-related, but they do not look like the same underlying bug or change. PR 45415 is a wide type-checking/config sweep across many modules, while PR 45425 introduces shared typing protocols and related code-path adjustments. Shared filenames alone are not enough to treat them as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45487-2", + "nodes": [ + "pull_request:45487", + "pull_request:45615" + ], + "soft_pairs": [ + "pull_request:45487|pull_request:45615" + ], + "prepared_review_unit_hash": "848e42b2cea82e09d945d113713b3b9afd3438d236f6bf1e52f32d6be69221cb", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5159, + "estimated_input_tokens": 1290, + "estimated_eval_tokens": 2836 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5159, + "estimated_input_tokens": 1290, + "estimated_eval_tokens": 2836 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are unrelated: one fixes model-parallel/device-placement code in AltCLIP/ChineseCLIP and other models, while the other only adds XPU test expectations for Qianfan OCR.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45487 is the stronger representative of a real code change: it fixes device-placement/model-parallel bugs in model implementations rather than only adjusting tests.", + "best_issue_reason": null, + "best_pr_reason": "PR 45487 is the best PR to represent the cluster because it contains the substantive cross-model fix; PR 45615 is just an XPU expectation update in tests.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45487", + "right": "pull_request:45615", + "accept": false, + "reason": "Reject: they do not fix the same concrete code-path problem. 45487 changes model code for AltCLIP/ChineseCLIP device handling, while 45615 only updates Qianfan OCR test outputs for XPU." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45548-2", + "nodes": [ + "pull_request:45548", + "pull_request:45662" + ], + "soft_pairs": [ + "pull_request:45548|pull_request:45662" + ], + "prepared_review_unit_hash": "5fd759d95527c3d82c5ecf392742443d5f9005d030ff2fca2b2902589260839a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4508, + "estimated_input_tokens": 1127, + "estimated_eval_tokens": 2510 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4508, + "estimated_input_tokens": 1127, + "estimated_eval_tokens": 2510 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two PRs both touch expert-parallel loading, but they fix different bugs on different code paths: one is about ZeRO-3/accelerate launch config handling, the other about FSDP2/rank-0 broadcast overwriting experts. Not duplicates.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45662 is the more concrete and self-contained fix: it targets a specific expert-overwrite path during distributed loading and has a focused code change around shard wrapping.", + "best_issue_reason": null, + "best_pr_reason": "PR #45662 is the stronger representative of the cluster because it addresses a clearly defined failure mode in the model loading path, whereas #45548 is a separate configuration/loading compatibility fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45548", + "right": "pull_request:45662", + "accept": false, + "reason": "Different underlying defects. #45548 fixes EP + DeepSpeed ZeRO-3 loading/config serialization for accelerate launch, while #45662 fixes EP + FSDP2 experts being overwritten by rank-0 broadcast during loading. Shared subsystem only, not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45639-2", + "nodes": [ + "pull_request:45639", + "pull_request:45645" + ], + "soft_pairs": [ + "pull_request:45639|pull_request:45645" + ], + "prepared_review_unit_hash": "58bedb4f37eb30626691bdf6c14fc12b35c9b50598d456c17ffbbfe7a09dec37", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4590, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4590, + "estimated_input_tokens": 1148, + "estimated_eval_tokens": 2552 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs address the same xdist collision problem around captured_info logging, with 45645 appearing to subsume 45639 by adding the core fix plus CI log handling.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45645 is the better canonical representative because it contains the same core testing_utils xdist-safe fix as 45639 and also includes the extra workflow/log-preservation changes tied to the same bug.", + "best_issue_reason": null, + "best_pr_reason": "PR 45645 is the strongest overall fit: it covers the concrete captured_info xdist collision fix and the related CI/reporting adjustments, making it the most complete version of the change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45639", + "right": "pull_request:45645", + "accept": true, + "reason": "Same underlying captured_info xdist collision bug; 45645 includes the same testing_utils fix and extends it with CI log aggregation, so the PRs are plausibly mergeable into one change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +} diff --git a/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/manifest.json b/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..b6ee0a8c4e0dde0d78cae8982d820e86acbc18ff --- /dev/null +++ b/snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/manifest.json @@ -0,0 +1,14 @@ +{ + "analysis_id": "hybrid-model-20260430t060020z", + "artifacts": { + "hybrid": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "model": null, + "published_at": "2026-04-30T06:08:50Z", + "repo": "huggingface/transformers", + "schema_version": 1, + "snapshot_id": "20260430T060020Z", + "variant": "hybrid" +} diff --git a/snapshots/20260430T060020Z/manifest.json b/snapshots/20260430T060020Z/manifest.json index 088950aca772d03dfabc2534cb589df4dbf0782e..a7a3681032815f22c8b2a7513f47bc7b95fb65d4 100644 --- a/snapshots/20260430T060020Z/manifest.json +++ b/snapshots/20260430T060020Z/manifest.json @@ -32,6 +32,24 @@ "timeline_events": 421 }, "extracted_at": "2026-04-30T06:00:20Z", + "published_analysis": { + "canonical_analysis_id": "hybrid-model-20260430t060020z", + "runs": { + "hybrid-model-20260430t060020z": { + "analysis_id": "hybrid-model-20260430t060020z", + "artifacts": { + "hybrid": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "manifest_path": "snapshots/20260430T060020Z/analysis-runs/hybrid-model-20260430t060020z/manifest.json", + "model": null, + "published_at": "2026-04-30T06:08:50Z", + "variant": "hybrid" + } + }, + "schema_version": 1 + }, "repo": "huggingface/transformers", "snapshot_id": "20260430T060020Z", "watermark": {