{ "schema_version": "1.0", "repo": "huggingface/transformers", "snapshot_id": "20260415T224019Z", "generated_at": "2026-04-16T00:11:03Z", "model": "gpt-5.4-mini?service_tier=flex", "reviews": [ { "label": "issue", "component_index": 1, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "cache", "cache_hit": true, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-13244-3", "nodes": [ "issue:13244", "issue:15980", "issue:16618" ], "soft_pairs": [ "issue:13244|issue:16618", "issue:15980|issue:16618" ], "prepared_review_unit_hash": "8d942e946546db9f31de23cc11f858ff8e4ea87b88b0ffa5a8ec031174d59e13", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 2477, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 2477, "estimated_input_tokens": 620, "estimated_eval_tokens": 1496 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These three issues do not appear to be duplicates of one another. Two are about model-loading/authentication errors, while one is about Tapas tokenization behavior, which is unrelated.", "confidence": 0.91, "canonical_issue_reason": "Issue #16618 is the most concrete and broadly framed of the three, centered on a specific model-loading failure for 'bert-base-uncased'.", "canonical_pr_reason": null, "best_issue_reason": "#16618 is the best representative issue because it has the clearest user-facing problem statement and the most discussion activity among the non-duplicate candidates.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:13244", "right": "issue:16618", "accept": false, "reason": "Tapas tokenization behavior and a bert-base-uncased loading failure are different code paths and different user problems; no evidence they share the same underlying bug." }, { "left": "issue:15980", "right": "issue:16618", "accept": false, "reason": "Both mention model loading/authentication-related failures, but #15980 is specifically about a bad error when downloading a private model without login, while #16618 is about failing to load bert-base-uncased; these are not clearly the same bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 1, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30277", "issue:35545", "issue:36010", "issue:41720", "issue:42915", "issue:43596", "issue:43638", "issue:43716", "issue:43828", "issue:44292", "issue:44322", "issue:44509", "issue:44512", "issue:44805", "issue:44918", "issue:45161", "issue:45237", "issue:45464" ], "soft_pairs": [ "issue:44322|issue:45464", "issue:43716|issue:45237", "issue:43828|issue:45237", "issue:44292|issue:45237", "issue:30277|issue:35545", "issue:35545|issue:42915", "issue:35545|issue:41720", "issue:44509|issue:44512", "issue:43638|issue:44805", "issue:35545|issue:36010", "issue:43596|issue:44805", "issue:44805|issue:45161", "issue:44805|issue:44918" ], "prepared_review_unit_hash": "603fb1645d7d05cbf2077eb660b6e2c50b40cd7f102a5ebe13473e8731454582", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 13, "serialized_chars": 14261, "estimated_input_tokens": 3566, "estimated_eval_tokens": 7388 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a false-positive mix of unrelated model/runtime issues. The only clear duplicate-like pair is the two v5 docs cleanup issues; the ZeRO3/BERT and Qwen/GPT-OSS items are similar in topic but not the same concrete bug.", "confidence": 0.73, "canonical_issue_reason": "issue:44509 is the cleaner canonical for the docs-removal cluster: it describes the broader v5 documentation cleanup, while issue:44512 is a narrower variant about the removed run command.", "canonical_pr_reason": null, "best_issue_reason": "issue:44509 best represents the only strong duplicate pair in the set and is the most general issue for that docs cleanup.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44322", "right": "issue:45464", "accept": false, "reason": "Different Qwen3.5 problems: a config attribute error vs a streaming chat/completions failure." }, { "left": "issue:43716", "right": "issue:45237", "accept": false, "reason": "Different models and failure modes: Mistral-3 dtype mismatch vs GPT-OSS AMD GPU runtime failure." }, { "left": "issue:43828", "right": "issue:45237", "accept": false, "reason": "Both are model/runtime errors, but the concrete issue is different: Phi-tiny-MoE autocast dtype mismatch vs GPT-OSS on AMD GPUs." }, { "left": "issue:44292", "right": "issue:45237", "accept": false, "reason": "Different model and symptom: Qwen-3-8B-NVFP4 runtime error vs GPT-OSS AMD GPU failure." }, { "left": "issue:30277", "right": "issue:35545", "accept": false, "reason": "Unrelated bugs: Deepspeed ZeRO3 collective mismatch vs ModernBERT ONNX export error." }, { "left": "issue:35545", "right": "issue:42915", "accept": false, "reason": "Different concrete problems: ONNX export for ModernBERT vs FineGrainedFP8Config failure on Qwen3Moe." }, { "left": "issue:35545", "right": "issue:41720", "accept": false, "reason": "Different failure modes and code paths: ONNX export vs auto device mapping CUDA assert." }, { "left": "issue:44509", "right": "issue:44512", "accept": true, "reason": "Both are v5 docs cleanup issues about removed/renamed items still mentioned in documentation, so they look like the same underlying documentation change." }, { "left": "issue:43638", "right": "issue:44805", "accept": false, "reason": "Both mention IndexError, but the concrete reports differ too much: ZeRO3/non-pretrained Bert initialization vs a mask/tensor shape mismatch." }, { "left": "issue:35545", "right": "issue:36010", "accept": false, "reason": "Different issues: ModernBERT export error vs missing GenerationMixin import." }, { "left": "issue:43596", "right": "issue:44805", "accept": false, "reason": "Shared ZeRO3/Bert theme is not enough here; the reported errors and contexts are different enough to avoid treating them as duplicates." }, { "left": "issue:44805", "right": "issue:45161", "accept": false, "reason": "Different models and problems: generic mask shape IndexError vs GPT-OSS tensor-parallel support issue." }, { "left": "issue:44805", "right": "issue:44918", "accept": false, "reason": "Different failure modes and contexts: mask/tensor shape IndexError vs Qwen3.5 embedding unpacking with TRL SFT trainer." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 2, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:24643", "issue:30277", "issue:34634", "issue:34928", "issue:35545", "issue:36331", "issue:39290", "issue:43039", "issue:43596", "issue:43638", "issue:43975", "issue:44292", "issue:44457", "issue:44661", "issue:44805", "issue:45081", "issue:45161", "issue:45237" ], "soft_pairs": [ "issue:44661|issue:44805", "issue:30277|issue:43638", "issue:34634|issue:35545", "issue:43596|issue:43638", "issue:43975|issue:44457", "issue:43039|issue:45161", "issue:44292|issue:44457", "issue:34928|issue:44805", "issue:24643|issue:30277", "issue:43975|issue:45237", "issue:43638|issue:45161", "issue:34928|issue:36331", "issue:43596|issue:45161", "issue:39290|issue:45081" ], "prepared_review_unit_hash": "d852aa9d5b4af19659712ea87c875ac541d4d32f0e426c2efa0d35ddab191d5b", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 14, "serialized_chars": 14839, "estimated_input_tokens": 3710, "estimated_eval_tokens": 7676 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are mostly unrelated false-positive similarity matches across different model/runtime bugs. The only strong duplicate pair is the two DeepSpeed ZeRO3 + BERT initialization IndexError reports (43596/43638).", "confidence": 0.91, "canonical_issue_reason": "issue:43596 is the cleanest representative of the one real duplicate cluster: the same DeepSpeed ZeRO3 + BERT init path hitting an index-out-of-bounds error.", "canonical_pr_reason": null, "best_issue_reason": "issue:43596 is the best single issue to keep because it states the concrete failure clearly and covers the broader reproducer; 43638 reads like the same bug in a narrower non-pretrained variant.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44661", "right": "issue:44805", "accept": false, "reason": "Different bugs: tokenizer mapping/model-addition logic versus a tensor-mask shape mismatch. Similar generic error reporting, but no shared code path." }, { "left": "issue:30277", "right": "issue:43638", "accept": false, "reason": "Both involve DeepSpeed/ZeRO3, but one is a collective mismatch and the other is an index-out-of-bounds during BERT init; not the same underlying failure." }, { "left": "issue:34634", "right": "issue:35545", "accept": false, "reason": "Unrelated domains: BarkProcessor voice preset handling versus ModernBERT ONNX export." }, { "left": "issue:43596", "right": "issue:43638", "accept": true, "reason": "Same concrete bug: DeepSpeed ZeRO3 initialization with BERT hitting an index-0-out-of-bounds error, with the second issue describing a narrower variant of the same failure." }, { "left": "issue:43975", "right": "issue:44457", "accept": false, "reason": "Different problems: detokenization for DeepSeek Coder versus LoRA merge/save/reload output mismatch." }, { "left": "issue:43039", "right": "issue:45161", "accept": false, "reason": "Liger kernel cross-entropy routing is unrelated to GPT-OSS tensor-parallel/MoE execution." }, { "left": "issue:44292", "right": "issue:44457", "accept": false, "reason": "Qwen-3 NVFP4 runtime failure and LoRA merge/reload inconsistency are different code paths and symptoms." }, { "left": "issue:34928", "right": "issue:44805", "accept": false, "reason": "Activation checkpointing with FSDP/accelerate is a different failure mode than a mask/tensor indexing shape mismatch." }, { "left": "issue:24643", "right": "issue:30277", "accept": false, "reason": "Both mention DeepSpeed, but one is a weight-dimension error and the other is a collective synchronization mismatch; not the same bug." }, { "left": "issue:43975", "right": "issue:45237", "accept": false, "reason": "DeepSeek tokenizer detokenization and GPT-OSS AMD GPU runtime issues are unrelated." }, { "left": "issue:43638", "right": "issue:45161", "accept": false, "reason": "Shared DeepSpeed flavor only; BERT ZeRO3 init index error is unrelated to GPT-OSS MoE TP not working." }, { "left": "issue:34928", "right": "issue:36331", "accept": false, "reason": "Training-time activation checkpointing/FSDP mismatch is unrelated to CustomTrainer compute_loss signature breakage." }, { "left": "issue:43596", "right": "issue:45161", "accept": false, "reason": "Different models and failures: BERT ZeRO3 init index error versus GPT-OSS MoE tensor-parallel behavior." }, { "left": "issue:39290", "right": "issue:45081", "accept": false, "reason": "Gemma3 vLLM config attribute error and Mistral regex patch/tokenizer backend crash are different tokenizer/model integration bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 3, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30277", "issue:34928", "issue:36331", "issue:41720", "issue:41762", "issue:43039", "issue:43425", "issue:43541", "issue:43638", "issue:43716", "issue:43827", "issue:43828", "issue:44292", "issue:44512", "issue:44560", "issue:44805", "issue:44918", "issue:45161" ], "soft_pairs": [ "issue:43828|issue:45161", "issue:43425|issue:44292", "issue:43541|issue:43716", "issue:44918|issue:45161", "issue:34928|issue:41720", "issue:43828|issue:44292", "issue:43716|issue:44292", "issue:44292|issue:44918", "issue:43039|issue:44918", "issue:41762|issue:43638", "issue:43827|issue:44512", "issue:43716|issue:44560", "issue:43716|issue:44918", "issue:43039|issue:43828", "issue:30277|issue:36331", "issue:43039|issue:44805" ], "prepared_review_unit_hash": "a0a7f1881802e4ddbe8568010226fb2bf20542e8127cb332dc2cf64407a40d23", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 16, "serialized_chars": 15284, "estimated_input_tokens": 3821, "estimated_eval_tokens": 7898 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: it mixes unrelated model/runtime failures (ZeRO-3, FSDP, autocast, dynamo tracing, device mapping, trainer API changes) plus separate docs cleanup items. I don\u2019t see any pair that clearly shares the same underlying bug/change, so no soft merges should be accepted.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43828", "right": "issue:45161", "accept": false, "reason": "Both mention MoE/TP-type behavior, but one is an autocast dtype mismatch in Phi-tiny-MoE and the other is a GPT-OSS tensor-parallel issue; the failure modes are different." }, { "left": "issue:43425", "right": "issue:44292", "accept": false, "reason": "Torch 2.10 incompatibility is a version-wide support issue, while Qwen-3-8B-NVFP4 is a model/runtime-specific failure; not the same bug." }, { "left": "issue:43541", "right": "issue:43716", "accept": false, "reason": "Mixtral grouped_mm errors during torch dynamo tracing and a Mistral-3 image preprocessor dtype mismatch are different code paths and different symptoms." }, { "left": "issue:44918", "right": "issue:45161", "accept": false, "reason": "TRL SFTTrainer embedding unpacking is unrelated to GPT-OSS tensor-parallel-only failures; they do not look like one underlying bug." }, { "left": "issue:34928", "right": "issue:41720", "accept": false, "reason": "FSDP + activation checkpointing tensor recomputation mismatch is unrelated to Qwen3 auto device mapping causing cudaErrorAssert." }, { "left": "issue:43828", "right": "issue:44292", "accept": false, "reason": "Autocast dtype mismatch and Qwen-3-8B-NVFP4 runtime failure are different model/runtime problems." }, { "left": "issue:43716", "right": "issue:44292", "accept": false, "reason": "Mistral-3 image preprocessor dtype mismatch is not the same underlying issue as Qwen-3-8B-NVFP4 execution failure." }, { "left": "issue:44292", "right": "issue:44918", "accept": false, "reason": "Qwen-3-8B-NVFP4 failure and TRL SFTTrainer input-embedding unpacking failure are distinct bugs." }, { "left": "issue:43039", "right": "issue:44918", "accept": false, "reason": "Liger Kernel cross_entropy being called is unrelated to Qwen3.5 embedding unpacking in TRL SFTTrainer." }, { "left": "issue:41762", "right": "issue:43638", "accept": false, "reason": "Both are ZeRO-3 index errors, but one occurs when loading Gemma3 and the other during training a non-pretrained Bert model; too different to merge." }, { "left": "issue:43827", "right": "issue:44512", "accept": false, "reason": "Both are docs removals, but they target different deprecated commands/sections; this looks like separate documentation cleanup items." }, { "left": "issue:43716", "right": "issue:44560", "accept": false, "reason": "A dtype mismatch in Mistral-3 image preprocessing is not the same as a StopIteration error in Qwen3-vl video embedding." }, { "left": "issue:43716", "right": "issue:44918", "accept": false, "reason": "Mistral-3 image dtype mismatch and Qwen3.5/TRl embedding unpacking are unrelated failures." }, { "left": "issue:43039", "right": "issue:43828", "accept": false, "reason": "Liger Kernel cross_entropy behavior and an autocast dtype mismatch are different subsystems and different concrete failures." }, { "left": "issue:30277", "right": "issue:36331", "accept": false, "reason": "ZeRO-3 collective mismatch with Jamba-v01 is unrelated to a CustomTrainer.compute_loss signature change." }, { "left": "issue:43039", "right": "issue:44805", "accept": false, "reason": "Liger Kernel cross_entropy invocation is unrelated to a mask-shape IndexError." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 4, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34928", "issue:36010", "issue:39290", "issue:41093", "issue:41720", "issue:41762", "issue:43039", "issue:43425", "issue:43531", "issue:43596", "issue:43638", "issue:43854", "issue:43866", "issue:43901", "issue:44512", "issue:44560", "issue:44863", "issue:45070" ], "soft_pairs": [ "issue:34928|issue:36010", "issue:41762|issue:43596", "issue:43901|issue:44512", "issue:39290|issue:41720", "issue:43039|issue:44560", "issue:43866|issue:44863", "issue:43854|issue:43866", "issue:43425|issue:45070", "issue:39290|issue:43531", "issue:41093|issue:43638" ], "prepared_review_unit_hash": "c64b238d1ce42abb40907204a284e73f84e6d739f9a8b5d814f62fc18fd0d4ad", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13481, "estimated_input_tokens": 3371, "estimated_eval_tokens": 6998 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Only one soft edge looks like a true duplicate pair: the two DeepSpeed ZeRO-3 index-out-of-bounds loading failures. The rest are distinct bugs, docs issues, or model-specific regressions that should stay separate.", "confidence": 0.87, "canonical_issue_reason": "Issue 43596 is the best canonical issue because it describes the generic DeepSpeed ZeRO-3 / index-out-of-bounds failure without tying it to one model family, so it cleanly subsumes the similar Gemma3 report.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43596 is the strongest representative of the cluster: it captures the shared zero-sized tensor / ZeRO-3 loading crash in a model-agnostic way and matches the closest duplicate candidate.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:34928", "right": "issue:36010", "accept": false, "reason": "Unrelated failures: one is an activation-checkpointing/FSDP tensor-size mismatch, the other is a GenerationMixin import error." }, { "left": "issue:41762", "right": "issue:43596", "accept": true, "reason": "Same underlying bug pattern: both report the exact out-of-bounds IndexError during model loading with DeepSpeed ZeRO-3, so they plausibly share one fix." }, { "left": "issue:43901", "right": "issue:44512", "accept": false, "reason": "Both are docs-related, but they cover different documentation problems and different behavior changes; not the same issue." }, { "left": "issue:39290", "right": "issue:41720", "accept": false, "reason": "Different concrete failures: a Gemma3/vLLM config attribute error versus a Qwen3 auto device-mapping CUDA assert." }, { "left": "issue:43039", "right": "issue:44560", "accept": false, "reason": "No shared code-path problem; Liger Kernel cross-entropy dispatch and Qwen3-VL video StopIteration are unrelated." }, { "left": "issue:43866", "right": "issue:44863", "accept": false, "reason": "Both involve model loading, but one is a corrupted checkpoint report and the other is NemotronH checkpoint compatibility; too different to merge." }, { "left": "issue:43854", "right": "issue:43866", "accept": false, "reason": "Different model families and different failure modes: GLM-4.7-Flash unit-test loading versus an Ovis2 checkpoint corruption report." }, { "left": "issue:43425", "right": "issue:45070", "accept": false, "reason": "Unrelated regressions: Torch 2.10 incompatibility versus a pydantic/PretrainedConfig field issue in v5.4.0." }, { "left": "issue:39290", "right": "issue:43531", "accept": false, "reason": "Both mention sliding_window, but the concrete bugs differ: missing Gemma3TextConfig attribute in vLLM versus Qwen3-MoE sliding_window behavior." }, { "left": "issue:41093", "right": "issue:43638", "accept": false, "reason": "Different index errors with different shapes and model contexts; no evidence they share the same root cause." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 5, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36010", "issue:39290", "issue:41093", "issue:41762", "issue:43296", "issue:43366", "issue:43404", "issue:43541", "issue:43572", "issue:43792", "issue:43828", "issue:43866", "issue:44841", "issue:45070", "issue:45084", "issue:45362", "issue:45464" ], "soft_pairs": [ "issue:45362|issue:45464", "issue:43792|issue:45070", "issue:43404|issue:43866", "issue:43541|issue:43828", "issue:36010|issue:39290", "issue:44841|issue:45084", "issue:41093|issue:41762", "issue:43296|issue:43366", "issue:43572|issue:45070" ], "prepared_review_unit_hash": "085ce3405236c35ba13f9363af5468d8f34f05c70c87f92ef175dbc8da0c859b", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12471, "estimated_input_tokens": 3118, "estimated_eval_tokens": 6492 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This set is heterogeneous: the soft pairs share superficial transformer/model-runtime wording, but each pair points to a different concrete bug or regression. I do not see any true duplicate pairs here.", "confidence": 0.89, "canonical_issue_reason": "issue:45070 is the best cluster anchor because it is the only open item and describes the broadest, most general regression in this set.", "canonical_pr_reason": null, "best_issue_reason": "issue:45070 is the strongest representative overall: open, recent, and phrased as a general framework regression rather than a narrow model-specific failure.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45362", "right": "issue:45464", "accept": false, "reason": "Both involve Qwen3.5/chat, but one is a generic crash in transformers chat and the other is a streaming chat/completions API failure for a different Qwen3.5 size; not the same bug." }, { "left": "issue:43792", "right": "issue:45070", "accept": false, "reason": "Whisper model loading/runtime failure is unrelated to a pydantic/PretrainedConfig field regression." }, { "left": "issue:43404", "right": "issue:43866", "accept": false, "reason": "Mistral3 weight tying in image-text generation is a model implementation bug; Ovis2 checkpoint corruption is a different artifact integrity issue." }, { "left": "issue:43541", "right": "issue:43828", "accept": false, "reason": "Grouped_mm torch-dynamo tracing failure in Mixtral is a different code path from Phi-tiny-MoE dtype mismatch under autocast." }, { "left": "issue:36010", "right": "issue:39290", "accept": false, "reason": "GenerationMixin import error and Gemma3Config missing sliding_window_pattern are separate API/config regressions." }, { "left": "issue:44841", "right": "issue:45084", "accept": false, "reason": "Voxtral processor failure and non-template-node compilation error are distinct processing/compiler issues with no clear shared root cause." }, { "left": "issue:41093", "right": "issue:41762", "accept": false, "reason": "Mask/tensor shape mismatch during indexing and DeepSpeed ZeRO-3 out-of-bounds loading are different runtime failures." }, { "left": "issue:43296", "right": "issue:43366", "accept": false, "reason": "PaddleOCR-VL loading failure with vLLM is not the same as adding GGUF support for gpt-oss architecture." }, { "left": "issue:43572", "right": "issue:45070", "accept": false, "reason": "StableLmConfig missing pad_token_idx is a config-field omission, but it is not the same regression as the pydantic PretrainedConfig field issue." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 6, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39290", "issue:41093", "issue:43054", "issue:43366", "issue:43531", "issue:43541", "issue:43596", "issue:43742", "issue:43828", "issue:43994", "issue:44291", "issue:44387", "issue:44589", "issue:44661", "issue:45005", "issue:45070", "issue:45161", "issue:45464" ], "soft_pairs": [ "issue:44387|issue:45005", "issue:43541|issue:45161", "issue:43828|issue:45464", "issue:44291|issue:45070", "issue:43531|issue:45070", "issue:43366|issue:45464", "issue:44661|issue:45464", "issue:43541|issue:44589", "issue:39290|issue:43742", "issue:43054|issue:43994", "issue:41093|issue:43596" ], "prepared_review_unit_hash": "85c271f95b3150537c63410f8194bed9c2e5c267b1d0a5d0f1cbc59f2da54804", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13714, "estimated_input_tokens": 3429, "estimated_eval_tokens": 7114 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a mix of unrelated model-loading/runtime issues. The only plausible duplicate pair is the two SigLIP2 reports, which both describe degraded or nonsensical outputs for the same model family.", "confidence": 0.88, "canonical_issue_reason": "issue:43994 is the best canonical issue: it names the exact model and concrete user-visible failure with AutoModel/pipeline, making it the clearest representative of the SigLIP2 regression.", "canonical_pr_reason": null, "best_issue_reason": "issue:43994 is the strongest global issue candidate because it is specific, reproducible, and aligns closely with the duplicate SigLIP2 symptom report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44387", "right": "issue:45005", "accept": false, "reason": "Both are v5 regressions, but one is an int4 quantization OOM and the other is a tied-weights translation-model issue; different failure modes and code paths." }, { "left": "issue:43541", "right": "issue:45161", "accept": false, "reason": "Both mention MoE-related model behavior, but Mixtral grouped_mm tracing and GPT-OSS tensor-parallel support are distinct problems." }, { "left": "issue:43828", "right": "issue:45464", "accept": false, "reason": "Autocast dtype mismatch in Phi-tiny-MoE is unrelated to a chat/completions streaming API failure on Qwen3.5." }, { "left": "issue:44291", "right": "issue:45070", "accept": false, "reason": "init_empty_weights argument handling and pydantic PretrainedConfig field breakage are separate compatibility issues." }, { "left": "issue:43531", "right": "issue:45070", "accept": false, "reason": "Sliding-window behavior for Qwen3-MoE is unrelated to the pydantic PretrainedConfig regression." }, { "left": "issue:43366", "right": "issue:45464", "accept": false, "reason": "GPT-OSS GGUF architecture support and Qwen3.5 streaming inference are different model/API concerns." }, { "left": "issue:44661", "right": "issue:45464", "accept": false, "reason": "Tokenizer-mapping failure in add-new-model-like is a registry bug, not the same as a streaming inference API error." }, { "left": "issue:43541", "right": "issue:44589", "accept": false, "reason": "Grouped_mm tracing on Mixtral and missing Float8 storage are different runtime errors." }, { "left": "issue:39290", "right": "issue:43742", "accept": false, "reason": "Gemma3 sliding_window_pattern with vLLM and MobileLLM key-error loading are separate model/config issues." }, { "left": "issue:43054", "right": "issue:43994", "accept": true, "reason": "Both reports concern SigLIP2 producing poor or nonsensical outputs; they appear to describe the same underlying regression in model behavior." }, { "left": "issue:41093", "right": "issue:43596", "accept": false, "reason": "Mask-shape mismatch during indexing and zero-sized tensor index-out-of-bounds under deepspeed zero3 are both index errors but clearly different bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 7, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39186", "issue:40990", "issue:42915", "issue:43404", "issue:43541", "issue:43645", "issue:43873", "issue:43975", "issue:44292", "issue:44360", "issue:44492", "issue:44512", "issue:44530", "issue:44863", "issue:44918", "issue:44960", "issue:45005", "issue:45399" ], "soft_pairs": [ "issue:42915|issue:43541", "issue:40990|issue:44960", "issue:39186|issue:44918", "issue:43404|issue:45005", "issue:43645|issue:45399", "issue:44530|issue:45005", "issue:44863|issue:45005", "issue:43873|issue:45005", "issue:42915|issue:45399", "issue:44360|issue:44512", "issue:43975|issue:44292", "issue:44492|issue:44512" ], "prepared_review_unit_hash": "59df15011923da80efcc4664a7746dcf6b8583ff752d95e15193d44d91290608", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 12, "serialized_chars": 13742, "estimated_input_tokens": 3436, "estimated_eval_tokens": 7128 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are mostly unrelated model-specific bugs, docs issues, and one broader v5 tied-weights regression. None of the soft pairs looks like the same underlying bug/change closely enough to merge as duplicates.", "confidence": 0.52, "canonical_issue_reason": "issue:45005 is the broadest and most umbrella-like bug report in the set, covering a general tied-weights regression across v5 translation models rather than a single model-specific symptom.", "canonical_pr_reason": null, "best_issue_reason": "issue:45005 is the best representative issue because it describes a reusable regression class and is more central than the highly specific runtime/model reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42915", "right": "issue:43541", "accept": false, "reason": "Both are runtime failures in specialized model paths, but one is FP8/Qwen3Moe and the other is float32/Mixtral during dynamo tracing; different code paths and symptoms." }, { "left": "issue:40990", "right": "issue:44960", "accept": false, "reason": "Perplexity degradation on gpt-oss-20b is unrelated to the generic GLM5 issue; no shared concrete failure mode is evident." }, { "left": "issue:39186", "right": "issue:44918", "accept": false, "reason": "FSDP 2-D weight error and Qwen3.5/TRL embedding unpacking are different stack traces and different subsystems." }, { "left": "issue:43404", "right": "issue:45005", "accept": false, "reason": "Both mention tied weights, but one is a Mistral3 model bug and the other is a broader translation-model regression; not the same concrete issue." }, { "left": "issue:43645", "right": "issue:45399", "accept": false, "reason": "Custom-model notebook initialization in v5 is unrelated to flash-attn2 fallback being blocked by checks." }, { "left": "issue:44530", "right": "issue:45005", "accept": false, "reason": "PagedAttentionCache linear_attention crash on Qwen3.5 is a cache/kernel issue, not a tied-weights regression." }, { "left": "issue:44863", "right": "issue:45005", "accept": false, "reason": "NemotronH checkpoint-loading failure is a model implementation/load-path problem, distinct from tied-weights handling." }, { "left": "issue:43873", "right": "issue:45005", "accept": false, "reason": "Quantization offloading behavior is a separate feature area from v5 tied-weights issues." }, { "left": "issue:42915", "right": "issue:45399", "accept": false, "reason": "FP8/Qwen3Moe failure and flash-attn2 fallback gating are unrelated bug classes." }, { "left": "issue:44360", "right": "issue:44512", "accept": false, "reason": "A DSA indexer ReLU discussion and a docs typo are clearly different topics." }, { "left": "issue:43975", "right": "issue:44292", "accept": false, "reason": "Incorrect detokenization in v5 and an NVFP4 runtime error for Qwen-3-8B are different symptoms and likely different fixes." }, { "left": "issue:44492", "right": "issue:44512", "accept": false, "reason": "A cache-strategy typo is just documentation/editing; it is not the same as stale docs about the removed run command." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 8, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36331", "issue:38175", "issue:39692", "issue:41553", "issue:41762", "issue:43054", "issue:43644", "issue:43828", "issue:43866", "issue:43872", "issue:43881", "issue:43883", "issue:43950", "issue:44403", "issue:44534", "issue:44589", "issue:44928" ], "soft_pairs": [ "issue:43950|issue:44534", "issue:43644|issue:43950", "issue:43828|issue:44928", "issue:39692|issue:43054", "issue:41553|issue:43883", "issue:43866|issue:44403", "issue:36331|issue:41762", "issue:41553|issue:43881", "issue:43872|issue:44589", "issue:38175|issue:43054" ], "prepared_review_unit_hash": "a57c8ecf8dbd1f9f64d61ab75824ab98b5ceff03c860874c7198261c1356dc1c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12848, "estimated_input_tokens": 3212, "estimated_eval_tokens": 6680 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a grab-bag of unrelated Transformers issues; the only strong duplicate subcluster is the Transformers 5.x non-persistent buffer regression. The remaining soft pairs are only thematically similar, not the same underlying bug.", "confidence": 0.88, "canonical_issue_reason": "issue:43950 is the clearest and most specific representative of the non-persistent buffer regression: it names the exact failure mode (`from_pretrained()` corrupting `register_buffer(persistent=False)`) and the Transformers 5.x regression context.", "canonical_pr_reason": null, "best_issue_reason": "issue:43950 is the best global issue representative because it is the most precise statement of the only clear duplicate topic in the set and subsumes the broader wording of issue:43644 and issue:44534.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43950", "right": "issue:44534", "accept": true, "reason": "Same underlying bug: both report Transformers v5 filling non-persistent buffers with junk/corruption during load." }, { "left": "issue:43644", "right": "issue:43950", "accept": true, "reason": "Same regression and same failure mode; 43950 is just the more detailed wording of the non-persistent buffer corruption bug." }, { "left": "issue:43828", "right": "issue:44928", "accept": false, "reason": "Both are training/numerics problems, but the concrete failures differ: dtype mismatch under autocast vs NaN/SDPA fallback gradient explosion." }, { "left": "issue:39692", "right": "issue:43054", "accept": false, "reason": "Both involve SigLIP2, but one is a broken docs example and the other is degraded text embedding quality; not the same bug." }, { "left": "issue:41553", "right": "issue:43883", "accept": false, "reason": "Different models and different issues: Voxtral tokenizer error messaging vs Molmo tied-weights attribute failure." }, { "left": "issue:43866", "right": "issue:44403", "accept": false, "reason": "Corrupted Ovis2 checkpoint is not the same as generic loading noise/unnecessary warnings." }, { "left": "issue:36331", "right": "issue:41762", "accept": false, "reason": "Unrelated failures: custom trainer API mismatch vs Gemma3 DeepSpeed ZeRO-3 loading index error." }, { "left": "issue:41553", "right": "issue:43881", "accept": false, "reason": "Both are model-loading problems, but they concern different models and different failure paths." }, { "left": "issue:43872", "right": "issue:44589", "accept": false, "reason": "Different incompatibilities: bitsandbytes kwarg mismatch vs missing Float8 storage object." }, { "left": "issue:38175", "right": "issue:43054", "accept": false, "reason": "Same model family, but one is zero probabilities and the other is poor embedding quality; too broad to treat as the same bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 9, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41762", "issue:42491", "issue:42915", "issue:42947", "issue:43054", "issue:43493", "issue:43643", "issue:43782", "issue:43856", "issue:43881", "issue:43883", "issue:44368", "issue:44387", "issue:44403", "issue:44451", "issue:44488", "issue:44960", "issue:45020" ], "soft_pairs": [ "issue:42947|issue:44387", "issue:43881|issue:43883", "issue:43054|issue:43493", "issue:44403|issue:44451", "issue:42947|issue:43856", "issue:43782|issue:44960", "issue:43643|issue:45020", "issue:44403|issue:44488", "issue:42491|issue:44368", "issue:41762|issue:42915" ], "prepared_review_unit_hash": "4c02d4642493f4c33386d78c53c52375e9e3284c2a21d4a1b05c229d172b1833", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13331, "estimated_input_tokens": 3333, "estimated_eval_tokens": 6922 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly separate model-loading or training regressions across unrelated models/subsystems. None of the soft-edge pairs is a clear duplicate based on titles alone; the only near-overlaps are in the remote_code and SigLIP2 areas, but they still read as different concrete bugs.", "confidence": 0.87, "canonical_issue_reason": "issue:45020 is the broadest and most representative issue in the set, covering a recurring remote_code regression pattern that is more general than the other specific loading failures.", "canonical_pr_reason": null, "best_issue_reason": "issue:45020 is the best single issue to keep as a representative umbrella for the cluster because it is the most general and actionable regression report among otherwise unrelated model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42947", "right": "issue:44387", "accept": false, "reason": "Different symptoms and contexts: LoRA/gradient checkpointing versus int4 quantization memory/OOM." }, { "left": "issue:43881", "right": "issue:43883", "accept": false, "reason": "Unrelated models and failure modes; one is GLM-4V loading, the other is a Molmo tied-weights attribute error." }, { "left": "issue:43054", "right": "issue:43493", "accept": false, "reason": "Related SigLIP2 topic, but one reports degraded embeddings while the other reports an HF-vs-JAX implementation discrepancy; not clearly the same concrete bug." }, { "left": "issue:44403", "right": "issue:44451", "accept": false, "reason": "One is a general loading-noise complaint, the other is a specific model load failure for ScandiBERT." }, { "left": "issue:42947", "right": "issue:43856", "accept": false, "reason": "Both mention training/memory concerns, but the concrete problems differ: checkpointing behavior versus MoE memory efficiency." }, { "left": "issue:43782", "right": "issue:44960", "accept": false, "reason": "No clear overlap: Qwen3VL weight_only loading error versus a generic GLM5 issue." }, { "left": "issue:43643", "right": "issue:45020", "accept": false, "reason": "Both involve remote_code, but one is a specific missing-fields bug in AutoConfig while the other is a broad umbrella report; not enough to prove the same underlying change." }, { "left": "issue:44403", "right": "issue:44488", "accept": false, "reason": "Both concern model loading, but the first is generic noise while the second is a concrete sleng-bert load failure." }, { "left": "issue:42491", "right": "issue:44368", "accept": false, "reason": "Different model families and symptoms; Qwen3_moe LoRA compatibility versus a warning about tie_word_embeddings." }, { "left": "issue:41762", "right": "issue:42915", "accept": false, "reason": "Different model/config combinations and different failure modes; Gemma3 ZeRO-3 indexing error versus Qwen3 MoE FP8 failure." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 10, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43054", "issue:43278", "issue:43792", "issue:43824", "issue:43827", "issue:43872", "issue:43975", "issue:44220", "issue:44451", "issue:44488", "issue:44509", "issue:44661", "issue:44991", "issue:45020", "issue:45081", "issue:45356", "issue:45399" ], "soft_pairs": [ "issue:43975|issue:44451", "issue:45020|issue:45356", "issue:43824|issue:43872", "issue:44661|issue:45020", "issue:43975|issue:44488", "issue:43054|issue:43278", "issue:44991|issue:45399", "issue:43792|issue:44220", "issue:43827|issue:44509", "issue:45081|issue:45399" ], "prepared_review_unit_hash": "164baca69670bfaa991d58f0f3d84431ca9a368eddcda9cc3e48e48358ca0cb6", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12931, "estimated_input_tokens": 3233, "estimated_eval_tokens": 6722 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Most pairs are related only at a broad subsystem level and should stay separate. The only clear duplicate-like pair is the v5 docs cleanup around removed pipeline tasks.", "confidence": 0.86, "canonical_issue_reason": "Issue 44509 is the best representative for the docs-regression cluster because it states the broader v5 pipeline-removal documentation problem more completely than the narrower 43827 report.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44509 is the strongest global representative among the submitted issues: it is specific, actionable, and closest to a likely dedupe target with broader coverage of the same docs bug.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43975", "right": "issue:44451", "accept": false, "reason": "Both concern model/tokenizer loading behavior, but one is a detokenization regression and the other is a load failure for a different model; not the same underlying bug." }, { "left": "issue:45020", "right": "issue:45356", "accept": false, "reason": "45356 is a specific Kimi-K2.5 tokenizer regression, while 45020 is a broader remote_code breakage report; related area, different failure and fix scope." }, { "left": "issue:43824", "right": "issue:43872", "accept": false, "reason": "Importing a model class and a bitsandbytes Int8Params constructor incompatibility are separate code paths and separate bugs." }, { "left": "issue:44661", "right": "issue:45020", "accept": false, "reason": "`add-new-model-like` mapping logic and remote_code loading regressions are different issues; same general packaging area but not the same defect." }, { "left": "issue:43975", "right": "issue:44488", "accept": false, "reason": "Both are loading-related, but they affect different models and describe different symptoms/root causes." }, { "left": "issue:43054", "right": "issue:43278", "accept": false, "reason": "Both mention embedding quality/dtype, but one is a quality regression for SigLIP2 text embeddings and the other is a training/eval dtype mismatch; not clearly the same bug." }, { "left": "issue:44991", "right": "issue:45399", "accept": false, "reason": "Tokenizer loading for a specific model and flash-attn fallback selection are unrelated failures." }, { "left": "issue:43792", "right": "issue:44220", "accept": false, "reason": "Both are audio/Whisper-related, but one is a general 'can't run' report and the other is a specific fbank feature extraction issue; not enough evidence they are the same bug." }, { "left": "issue:43827", "right": "issue:44509", "accept": true, "reason": "Both report v5 docs still mentioning removed text-generation/summarization/translation pipeline tasks; same documentation cleanup bug and plausibly one PR." }, { "left": "issue:45081", "right": "issue:45399", "accept": false, "reason": "Tokenizer regex patching and flash-attn fallback gating are separate mechanisms and separate issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 11, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41553", "issue:42915", "issue:43257", "issue:43404", "issue:43742", "issue:43854", "issue:43866", "issue:43872", "issue:43940", "issue:43950", "issue:44291", "issue:44488", "issue:44863", "issue:44991", "issue:45020", "issue:45305", "issue:45313", "issue:45399" ], "soft_pairs": [ "issue:43404|issue:43950", "issue:42915|issue:45020", "issue:41553|issue:44991", "issue:43257|issue:43866", "issue:43742|issue:45399", "issue:43742|issue:45020", "issue:43872|issue:44291", "issue:43940|issue:45313", "issue:41553|issue:44488", "issue:42915|issue:43872", "issue:43854|issue:44863", "issue:43257|issue:45305" ], "prepared_review_unit_hash": "a3380af810b9988bc70f4c67e93ecc845dcd194c378a630fd199737eed15ef9c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 12, "serialized_chars": 14031, "estimated_input_tokens": 3508, "estimated_eval_tokens": 7272 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Mostly a heterogeneous set of model-loading regressions and unrelated tokenizer / training / quantization bugs. The only clear duplicate-like pair is the `_is_hf_initialized` loading regression (43872 \u2194 44291); the rest look like distinct issues despite some shared wording around DeepSpeed, Qwen, or tokenizer loading.", "confidence": 0.84, "canonical_issue_reason": "issue:44291 is the broader, cleaner description of the `_is_hf_initialized` TypeError regression; issue:43872 looks like a specific bitsandbytes manifestation of the same underlying problem.", "canonical_pr_reason": null, "best_issue_reason": "issue:44291 is the best representative issue in this set because it captures the shared loading regression without being overly model-specific.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43404", "right": "issue:43950", "accept": false, "reason": "Both concern loading behavior, but one is a tied-weight bug in Mistral3 while the other is silent corruption of non-persistent buffers; different code paths and failure modes." }, { "left": "issue:42915", "right": "issue:45020", "accept": false, "reason": "Qwen3Moe FP8 config failure is a concrete quantization/config issue, while the other is a broad remote_code regression; not the same bug." }, { "left": "issue:41553", "right": "issue:44991", "accept": false, "reason": "These are different tokenizer problems on different models: one is a bad error message, the other is an actual tokenizer-loading failure." }, { "left": "issue:43257", "right": "issue:43866", "accept": false, "reason": "Both mention Qwen/Ovis model loading, but the models and symptoms differ; no evidence they stem from the same underlying conversion or checkpoint bug." }, { "left": "issue:43742", "right": "issue:45399", "accept": false, "reason": "A key error when loading MobileLLM and a flash-attn fallback-blocking issue are unrelated code paths." }, { "left": "issue:43742", "right": "issue:45020", "accept": false, "reason": "MobileLLM key errors and remote_code breakage are both general loading failures, but they are not the same concrete defect." }, { "left": "issue:43872", "right": "issue:44291", "accept": true, "reason": "Both describe the same `_is_hf_initialized` constructor/initialization TypeError during model loading; 44291 is the broader report and 43872 a specific downstream manifestation." }, { "left": "issue:43940", "right": "issue:45313", "accept": false, "reason": "Both are DeepSpeed ZeRO-3 weight-loading failures, but they affect different Qwen variants with different missing-weight symptoms; insufficient evidence they are the same bug." }, { "left": "issue:41553", "right": "issue:44488", "accept": false, "reason": "Different tokenizer-loading failures for different models; one is specifically about Voxtral's error message, the other about sleng-bert failing to load." }, { "left": "issue:43854", "right": "issue:44863", "accept": false, "reason": "Both are model loading failures, but one is a unit-test/load issue for GLM-4.7-Flash and the other is NemotronH checkpoint compatibility; too different to merge." }, { "left": "issue:43257", "right": "issue:45305", "accept": false, "reason": "Qwen3 MOE weight-conversion/loading under accelerate+deepspeed is not the same as a gradient-averaging bug during training." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 12, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:24643", "issue:30064", "issue:36331", "issue:41093", "issue:43366", "issue:43531", "issue:43828", "issue:43872", "issue:43950", "issue:44479", "issue:44530", "issue:44560", "issue:44805", "issue:44863", "issue:44918", "issue:45084", "issue:45325" ], "soft_pairs": [ "issue:36331|issue:41093", "issue:41093|issue:44805", "issue:44479|issue:44560", "issue:44530|issue:44863", "issue:43872|issue:45084", "issue:24643|issue:30064", "issue:43366|issue:43828", "issue:43366|issue:43531", "issue:44560|issue:44918", "issue:43950|issue:45325" ], "prepared_review_unit_hash": "1afa0d9fa845fc4d383c44e04307b7917d3f83ad6fdac13f4b7bedb25a9e39ec", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12949, "estimated_input_tokens": 3238, "estimated_eval_tokens": 6732 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly heterogeneous, but two soft-linked subgroups look like real duplicates: the mask-shape IndexError pair and the Qwen video regression pair. The remaining pairs are unrelated on their actual failure modes.", "confidence": 0.68, "canonical_issue_reason": "If a single issue must anchor the cluster, 41093 is the best representative of the only clear duplicate subgroup: it states the generic mask/tensor shape mismatch and predates the near-identical later report 44805.", "canonical_pr_reason": null, "best_issue_reason": "41093 is the strongest issue representative overall because its title is broad enough to cover the duplicate mask-shape error pattern, and it has more discussion than the later variant.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36331", "right": "issue:41093", "accept": false, "reason": "Different failures: a trainer API TypeError versus an IndexError in mask/tensor shape alignment." }, { "left": "issue:41093", "right": "issue:44805", "accept": true, "reason": "Same underlying error class: both report the same mask-vs-indexed-tensor shape mismatch IndexError, with only the concrete shapes/examples differing." }, { "left": "issue:44479", "right": "issue:44560", "accept": true, "reason": "Both describe the same Qwen video-input regression in transformers 5.3.0; 44560 is a specific StopIteration manifestation of that video-processing path." }, { "left": "issue:44530", "right": "issue:44863", "accept": false, "reason": "Different model/runtime problems: PagedAttentionCache linear_attention crash versus NemotronH checkpoint loading failure." }, { "left": "issue:43872", "right": "issue:45084", "accept": false, "reason": "Unrelated bugs: bitsandbytes Int8Params constructor incompatibility versus a compile-time template-node error." }, { "left": "issue:24643", "right": "issue:30064", "accept": false, "reason": "Completely different subsystems and symptoms: DeepSpeed training weight-dimension error versus image processor void segmentation map handling." }, { "left": "issue:43366", "right": "issue:43828", "accept": false, "reason": "No shared code-path or concrete bug; one is GGUF architecture support, the other is an autocast dtype mismatch in a MoE model." }, { "left": "issue:43366", "right": "issue:43531", "accept": false, "reason": "Different issues in different model families: GGUF gpt-oss support versus Qwen3-MoE sliding_window behavior." }, { "left": "issue:44560", "right": "issue:44918", "accept": false, "reason": "Both touch Qwen/video-adjacent areas, but the concrete failures differ: video StopIteration versus TRL SFT embedding unpacking." }, { "left": "issue:43950", "right": "issue:45325", "accept": false, "reason": "Unrelated regressions: non-persistent buffer corruption in from_pretrained versus a Qwen2.5-VL rope/position_ids scaling bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 13, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36010", "issue:38175", "issue:42617", "issue:42915", "issue:43366", "issue:43550", "issue:43645", "issue:43646", "issue:43824", "issue:43950", "issue:43957", "issue:43994", "issue:44560", "issue:44589", "issue:44805", "issue:44877", "issue:44910", "issue:45030" ], "soft_pairs": [ "issue:43645|issue:43646", "issue:43366|issue:44589", "issue:43550|issue:44910", "issue:43824|issue:43957", "issue:42915|issue:43957", "issue:42617|issue:43366", "issue:38175|issue:43994", "issue:43646|issue:43950", "issue:44560|issue:44805", "issue:36010|issue:42915", "issue:44877|issue:45030" ], "prepared_review_unit_hash": "3a14320a5b27e135cc82f2fa1ed99f072b11a85683b7da125efe973e55b94a54", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13749, "estimated_input_tokens": 3438, "estimated_eval_tokens": 7132 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly heterogeneous, with two likely true duplicate pairs: the custom-model initialization regression and the siglip2 wrong-output regression. The other soft-similarity pairs look related only at a broad subsystem level, not the same underlying bug.", "confidence": 0.77, "canonical_issue_reason": "Issue 43646 is the best representative of the clearest duplicate-like subthread: it states the broader Transformers 5.0.0 custom model initialization regression, while 43645 is just a notebook-specific variant.", "canonical_pr_reason": null, "best_issue_reason": "43646 is the strongest overall issue anchor in this set because it is broad, user-facing, and describes a concrete regression in the custom-model initialization path more generally than its sibling report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43645", "right": "issue:43646", "accept": true, "reason": "Same underlying Transformers 5.0.0 custom-model initialization regression; 43645 appears to be a notebook-specific manifestation of the broader 43646 report." }, { "left": "issue:43366", "right": "issue:44589", "accept": false, "reason": "Different bugs: GGUF gpt-oss architecture support vs a Float8 storage/type error during model handling." }, { "left": "issue:43550", "right": "issue:44910", "accept": false, "reason": "Both involve attention/compiled execution, but they describe different concrete failures on different models and code paths (SDPA torch.compile vs Flash Attention position_ids handling)." }, { "left": "issue:43824", "right": "issue:43957", "accept": false, "reason": "Importing a missing exported class is not the same bug as meta-device loading breaking some models; related area, but different failure modes." }, { "left": "issue:42915", "right": "issue:43957", "accept": false, "reason": "Qwen3Moe + FineGrainedFP8Config failure is a different issue from meta-device loading regressions." }, { "left": "issue:42617", "right": "issue:43366", "accept": false, "reason": "Unrelated: a 3d_parallel.py execution problem versus GGUF gpt-oss support." }, { "left": "issue:38175", "right": "issue:43994", "accept": true, "reason": "Same siglip2-base-patch16-224 output regression; one report calls out zero probabilities and the other describes nonsensical outputs via AutoModel/pipeline." }, { "left": "issue:43646", "right": "issue:43950", "accept": false, "reason": "Custom-model initialization regression and non-persistent buffer corruption are distinct bugs in different load paths." }, { "left": "issue:44560", "right": "issue:44805", "accept": false, "reason": "Different concrete failures: Qwen3-vl-embedding video StopIteration versus a tensor/mask shape mismatch." }, { "left": "issue:36010", "right": "issue:42915", "accept": false, "reason": "ImportError for GenerationMixin is unrelated to the Qwen3Moe FineGrainedFP8Config failure." }, { "left": "issue:44877", "right": "issue:45030", "accept": false, "reason": "Both mention config validation, but they concern different models and lack evidence of the same underlying validation bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 14, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38175", "issue:39692", "issue:43317", "issue:43596", "issue:43638", "issue:43644", "issue:43708", "issue:44075", "issue:44451", "issue:44488", "issue:44534", "issue:44661", "issue:44756", "issue:44960", "issue:44977", "issue:45313", "issue:45357" ], "soft_pairs": [ "issue:43638|issue:44661", "issue:43596|issue:44661", "issue:43317|issue:44756", "issue:43644|issue:44534", "issue:45313|issue:45357", "issue:38175|issue:39692", "issue:44960|issue:44977", "issue:44451|issue:44488", "issue:43708|issue:44075" ], "prepared_review_unit_hash": "c0022fe712b448386b2cba2442508c98b9a158ea406c4765a438435e5cf45196", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12427, "estimated_input_tokens": 3107, "estimated_eval_tokens": 6470 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is mostly heterogeneous. Only the two issues about Transformers v5/non-persistent buffers being filled with junk look like the same underlying bug; the rest are topic-adjacent at best but not duplicate-level matches.", "confidence": 0.88, "canonical_issue_reason": "Issue 43644 is the best canonical issue for the only clear duplicate topic: it states the core non-persistent-buffer regression directly and predates the later re-report in 44534.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43644 is the strongest issue representative overall because it most clearly describes the shared regression in the accepted duplicate pair.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43638", "right": "issue:44661", "accept": false, "reason": "Both mention failures, but one is a DeepSpeed ZeRO-3/BertModel initialization IndexError and the other is an add-new-model-like tokenizer mapping failure; different code paths and bugs." }, { "left": "issue:43596", "right": "issue:44661", "accept": false, "reason": "One is a BertModel ZeRO-3 empty-shard IndexError, the other is a tokenizer/model registration issue; too different to be the same bug." }, { "left": "issue:43317", "right": "issue:44756", "accept": false, "reason": "Both are memory/offload related, but one is dequantized model loading with device_map=auto and the other is mmap OOM on Strix Halo; not the same underlying fix." }, { "left": "issue:43644", "right": "issue:44534", "accept": true, "reason": "These describe the same regression: non-persistent buffers being incorrectly filled with junk in Transformers v5/v5.0.0." }, { "left": "issue:45313", "right": "issue:45357", "accept": false, "reason": "One is ZeRO-3 loading failure for Qwen3.5 language_model weights; the other is save_pretrained writing incorrect visual encoder keys. Different directions and bugs." }, { "left": "issue:38175", "right": "issue:39692", "accept": false, "reason": "Both involve SigLIP2, but one reports zero probabilities from a model, while the other is a documentation example with model/processor mismatch and quantization errors; not the same issue." }, { "left": "issue:44960", "right": "issue:44977", "accept": false, "reason": "Different models and symptoms: GLM5 vs Qwen3.5 flash-attention generation failure. No shared concrete code-path bug is evident." }, { "left": "issue:44451", "right": "issue:44488", "accept": false, "reason": "Both are model-loading failures for different models, but there is no evidence they stem from the same regression or code path." }, { "left": "issue:43708", "right": "issue:44075", "accept": false, "reason": "Trainer checkpoint resume max_steps miscalculation and SGD args being ignored are unrelated bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 15, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39692", "issue:41720", "issue:42491", "issue:42915", "issue:43257", "issue:43720", "issue:43854", "issue:43856", "issue:43873", "issue:43931", "issue:43994", "issue:44488", "issue:44661", "issue:44683", "issue:44991", "issue:45276", "issue:45362", "issue:45406" ], "soft_pairs": [ "issue:43856|issue:43873", "issue:41720|issue:42915", "issue:44488|issue:44991", "issue:39692|issue:43994", "issue:43854|issue:43931", "issue:45362|issue:45406", "issue:42491|issue:43720", "issue:42491|issue:45276", "issue:43257|issue:43854", "issue:44661|issue:44683" ], "prepared_review_unit_hash": "5d5dd805bc9ce75a1ccc6487d02a6b7f3d4bc574fbbade386d63ab692f4ffaeb", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13364, "estimated_input_tokens": 3341, "estimated_eval_tokens": 6938 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The soft edges mostly connect thematically similar but distinct bugs. A few share a model family or loading path, but the underlying failures differ enough that they should stay separate.", "confidence": 0.86, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "No single issue cleanly represents the whole set because these are several unrelated model-loading / quantization / processor bugs. If one had to be picked as the most representative standalone report, issue 43873 is the best candidate only because it is open and has inbound references, but it does not cover the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43856", "right": "issue:43873", "accept": false, "reason": "Both mention Qwen3 and memory/quantization, but one is about MoE training memory usage while the other is about offloading behavior under quantization; different concrete bugs." }, { "left": "issue:41720", "right": "issue:42915", "accept": false, "reason": "Both are Qwen3-related failures, but one is an auto device mapping cuda assert and the other is a FineGrainedFP8Config failure for Qwen3Moe; not the same bug." }, { "left": "issue:44488", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer-loading regressions for different models, but the affected models and failure details differ; too broad to treat as one duplicate." }, { "left": "issue:39692", "right": "issue:43994", "accept": false, "reason": "Both involve SigLIP2, but 39692 is a docs/example mismatch plus quantization failure, while 43994 is about nonsensical outputs from AutoModel/pipeline; related area, not clearly the same underlying bug." }, { "left": "issue:43854", "right": "issue:43931", "accept": false, "reason": "Different models and different loading failures: GLM-4.7-Flash unit-test loading versus Qwen3-VL weight-shape mismatch." }, { "left": "issue:45362", "right": "issue:45406", "accept": false, "reason": "Both are chat/serve crashes, but they hit different code paths and different model types; not one concrete bug." }, { "left": "issue:42491", "right": "issue:43720", "accept": false, "reason": "Both concern loading-time compatibility, but one is Qwen3 MoE LoRA across hf4.x/hf5.x and the other is BitNet packed-weight handling in accelerate; separate root causes." }, { "left": "issue:42491", "right": "issue:45276", "accept": false, "reason": "Qwen3 MoE LoRA compatibility and Gemma4 embedding resize behavior are unrelated bugs." }, { "left": "issue:43257", "right": "issue:43854", "accept": false, "reason": "Both mention model loading under accelerate/deepspeed, but the reported models and failures are different; no evidence of the same underlying defect." }, { "left": "issue:44661", "right": "issue:44683", "accept": false, "reason": "Tokenizer-mapping model-generation failure and compiled flex_attention torch-version failure are unrelated." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 16, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36010", "issue:39404", "issue:41720", "issue:42491", "issue:42915", "issue:43404", "issue:43454", "issue:43644", "issue:43645", "issue:43856", "issue:43931", "issue:44155", "issue:44164", "issue:44291", "issue:44387", "issue:44756", "issue:45276", "issue:45335" ], "soft_pairs": [ "issue:42915|issue:44756", "issue:43856|issue:44387", "issue:43644|issue:43645", "issue:42491|issue:43931", "issue:45276|issue:45335", "issue:42491|issue:43856", "issue:43404|issue:43454", "issue:36010|issue:39404", "issue:44164|issue:44291", "issue:41720|issue:44155" ], "prepared_review_unit_hash": "3aac8f05baab0aa3779e39c87d89f04343cad6e1b28675da08a78f8bcdd2cc03", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13437, "estimated_input_tokens": 3360, "estimated_eval_tokens": 6976 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This set is mostly a false-positive cluster: the issues share broad Transformers themes (Qwen3, memory, resizing embeddings, loading/saving) but do not describe the same underlying bug or change. No soft pair is safe to merge as a duplicate, though 45276/45335 are the closest in wording.", "confidence": 0.93, "canonical_issue_reason": "No canonical issue: the cluster is heterogeneous, and none of the candidate pairs describe the same concrete bug across the same code path.", "canonical_pr_reason": null, "best_issue_reason": "If a single issue must be kept as the representative, 45276 is the closest anchor because it has the strongest semantic overlap with another item (45335) around resize_token_embeddings; however, it still is not a true duplicate cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42915", "right": "issue:44756", "accept": false, "reason": "Different problems: Qwen3 MoE LoRA compatibility/versioning versus disabling mmap on Strix Halo to avoid OOM. No shared code-path bug." }, { "left": "issue:43856", "right": "issue:44387", "accept": false, "reason": "Both mention memory, but one is about Qwen3 MoE training efficiency and the other about int4 quantization reserving too much CUDA memory. Different failure modes and fixes." }, { "left": "issue:43644", "right": "issue:43645", "accept": false, "reason": "Separate regressions in Transformers 5.0.0: one about non-persistent buffers being initialized incorrectly, the other about custom models in Jupyter notebooks. Not the same bug." }, { "left": "issue:42491", "right": "issue:43931", "accept": false, "reason": "One is a LoRA/version compatibility issue for qwen3_moe; the other is a shape-mismatch loading error for Qwen3-VL-30B-A3B-Instruct. Different root causes." }, { "left": "issue:45276", "right": "issue:45335", "accept": false, "reason": "Closest pair, but they affect different model families and different embeddings paths (Gemma4 vs T5Gemma, per-layer/output embeddings vs decoder.embed_tokens). Too model-specific to treat as one bug." }, { "left": "issue:42491", "right": "issue:43856", "accept": false, "reason": "LoRA compatibility on hf5.x versus memory usage during Qwen3 MoE training are unrelated issues, even though both involve Qwen3/MoE." }, { "left": "issue:43404", "right": "issue:43454", "accept": false, "reason": "Both are weight-tying bugs, but in different model classes and different multimodal pathways (Mistral3ForConditionalGeneration vs AyaVisionConfig). Not mergeable as one fix." }, { "left": "issue:36010", "right": "issue:39404", "accept": false, "reason": "ImportError for GenerationMixin and Whisper pipeline return_language regression are unrelated code paths." }, { "left": "issue:44164", "right": "issue:44291", "accept": false, "reason": "Both concern model save/load behavior, but one is extra_state serialization and the other is init_empty_weights/_is_hf_initialized argument handling. Distinct bugs." }, { "left": "issue:41720", "right": "issue:44155", "accept": false, "reason": "One is a Qwen3 auto device mapping CUDA assert; the other is AudioFlamingo3 batched inference leaking embeddings/tokens between tracks. No underlying shared bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 17, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39692", "issue:42947", "issue:43381", "issue:43425", "issue:43854", "issue:43873", "issue:43957", "issue:44291", "issue:44292", "issue:44387", "issue:44402", "issue:44488", "issue:44756", "issue:44912", "issue:44960", "issue:45216", "issue:45276", "issue:45292" ], "soft_pairs": [ "issue:44402|issue:44488", "issue:43873|issue:44387", "issue:39692|issue:44960", "issue:44387|issue:44756", "issue:43425|issue:43957", "issue:42947|issue:43381", "issue:44292|issue:44912", "issue:43854|issue:45216", "issue:44291|issue:44756", "issue:45276|issue:45292" ], "prepared_review_unit_hash": "55fdc5fd18d85471734110ebdfdd8bd186fce12113b9d555ed505d32a501d1d8", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13313, "estimated_input_tokens": 3329, "estimated_eval_tokens": 6914 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a set of unrelated issues, with only one strong duplicate pair: the two `resize_token_embeddings` reports. The other soft pairs look like neighboring or thematically similar bugs, but not the same concrete underlying problem.", "confidence": 0.86, "canonical_issue_reason": "No single canonical issue fits the whole set because the items span multiple unrelated bug classes. If forced to pick the strongest duplicate target, `issue:45292` is the best canonical issue because it is the broader open report covering the shared `resize_token_embeddings` failure mode.", "canonical_pr_reason": null, "best_issue_reason": "`issue:45292` is the best overall issue candidate: it is open, broadly phrased, and matches the clearest duplicate pair on the same API behavior. `issue:45276` is a narrower model-specific variant of the same bug.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44402", "right": "issue:44488", "accept": true, "reason": "Both describe the same kind of model-loading failure involving tokenizer special-token ids/vocab-size mismatch in BERT-style models; they plausibly stem from the same loader bug." }, { "left": "issue:43873", "right": "issue:44387", "accept": false, "reason": "Both mention quantization and memory/offloading, but one is about offloading behavior while the other is an int4 CUDA reserved-memory/OOM regression; too broad and not the same concrete bug." }, { "left": "issue:39692", "right": "issue:44960", "accept": false, "reason": "Different models and different failure modes; the only overlap is that both are model-related issues, which is far too broad." }, { "left": "issue:44387", "right": "issue:44756", "accept": false, "reason": "One is a CUDA int4 memory regression, the other is mmap on Strix Halo causing OOM; these are distinct code paths and likely distinct fixes." }, { "left": "issue:43425", "right": "issue:43957", "accept": false, "reason": "Both are compatibility/loading issues, but one is specifically Torch 2.10 incompatibility and the other is a `torch.device(\"meta\")` / model-loading breakage; not the same underlying bug." }, { "left": "issue:42947", "right": "issue:43381", "accept": false, "reason": "Both involve gradient checkpointing, but one is about PEFT LoRA not enabling it effectively during training and the other is about eval-mode usage; different behaviors and fixes." }, { "left": "issue:44292", "right": "issue:44912", "accept": false, "reason": "Both are quantized-model loading problems, but they concern different quantization formats, different models, and different symptoms; too weak to treat as one bug." }, { "left": "issue:43854", "right": "issue:45216", "accept": false, "reason": "Completely different reports: GLM-4.7-Flash test loading versus Qwen3.5 checkpoint saving regression." }, { "left": "issue:44291", "right": "issue:44756", "accept": false, "reason": "`init_empty_weights` TypeError from an unexpected argument is unrelated to disabling mmap to avoid OOM." }, { "left": "issue:45276", "right": "issue:45292", "accept": true, "reason": "These are clearly the same underlying bug: `resize_token_embeddings` does not propagate correctly to output embeddings, with `45276` just being a gemma4-specific manifestation." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 18, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43381", "issue:43645", "issue:43646", "issue:43708", "issue:43957", "issue:44164", "issue:44265", "issue:44402", "issue:44451", "issue:44479", "issue:44743", "issue:44756", "issue:44877", "issue:44898", "issue:44928", "issue:44960", "issue:45292", "issue:45335" ], "soft_pairs": [ "issue:44743|issue:44960", "issue:43381|issue:44928", "issue:43957|issue:44756", "issue:44164|issue:44756", "issue:43646|issue:43708", "issue:45292|issue:45335", "issue:44479|issue:44877", "issue:44402|issue:44451", "issue:43645|issue:44164", "issue:44265|issue:44898" ], "prepared_review_unit_hash": "af8240ee9f92b560787f6742316ab59b361627aa617b622b16e340c6c181949d", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13424, "estimated_input_tokens": 3356, "estimated_eval_tokens": 6968 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This set is mostly heterogeneous and does not form one duplicate cluster. The only clear duplicate-style pair is the resize_token_embeddings regression: the generic report and the t5gemma-specific report describe the same underlying failure to propagate resized embeddings. All other soft pairs appear to be distinct bugs touching different code paths or models.", "confidence": 0.88, "canonical_issue_reason": "Issue 45292 is the broader, generic report of the embedding-resize bug; 45335 is a model-specific manifestation of the same problem.", "canonical_pr_reason": null, "best_issue_reason": "45292 is the best representative issue because it states the core regression in a model-agnostic way and subsumes the narrower t5gemma variant.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44743", "right": "issue:44960", "accept": false, "reason": "Different models and failure modes: recurrent cache/state reset in qwen3_5 vs a GLM5 topic with no clear shared code path." }, { "left": "issue:43381", "right": "issue:44928", "accept": false, "reason": "Gradient checkpointing eval-mode behavior is unrelated to the Qwen3.5 SDPA/BF16 NaN regression." }, { "left": "issue:43957", "right": "issue:44756", "accept": false, "reason": "Model loading on torch.device(\"meta\") is a different class of bug from disabling mmap for Strix Halo OOM." }, { "left": "issue:44164", "right": "issue:44756", "accept": false, "reason": "save/from_pretrained extra_state handling is unrelated to mmap/OOM behavior." }, { "left": "issue:43646", "right": "issue:43708", "accept": false, "reason": "Custom model initialization in notebooks is unrelated to incorrect max_steps computation during resume_from_checkpoint." }, { "left": "issue:45292", "right": "issue:45335", "accept": true, "reason": "Both report the same resize_token_embeddings regression; 45335 is the t5gemma-specific instance of the broader output embedding mismatch." }, { "left": "issue:44479", "right": "issue:44877", "accept": false, "reason": "Video-input regression for Qwen variants is unrelated to strict config blocking granite_speech loading." }, { "left": "issue:44402", "right": "issue:44451", "accept": false, "reason": "Both mention loading/tokenizer issues, but one is a specific vocab-size mismatch and the other is only a generic inability to load a model; not enough to treat as the same bug." }, { "left": "issue:43645", "right": "issue:44164", "accept": false, "reason": "Breaking custom model initialization in Jupyter is distinct from failing to handle extra_state in save/from_pretrained." }, { "left": "issue:44265", "right": "issue:44898", "accept": false, "reason": "torch.export failures due to torch_compilable_check are a different code path from Perceiver interpolation/resize-position issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 19, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39404", "issue:43257", "issue:43425", "issue:43611", "issue:43645", "issue:43716", "issue:43828", "issue:43901", "issue:43906", "issue:43931", "issue:43957", "issue:44509", "issue:44530", "issue:44589", "issue:44756", "issue:44863", "issue:44912", "issue:45084" ], "soft_pairs": [ "issue:43425|issue:43828", "issue:43257|issue:44863", "issue:43828|issue:43957", "issue:44589|issue:44756", "issue:43906|issue:44530", "issue:43931|issue:44863", "issue:39404|issue:44912", "issue:43425|issue:43716", "issue:43611|issue:43645", "issue:44530|issue:45084", "issue:43901|issue:44509" ], "prepared_review_unit_hash": "dc2fc7fa3cf055e6db105ee3ec1aee99c8d76a426f4d43229d74972f5ac3575c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13650, "estimated_input_tokens": 3413, "estimated_eval_tokens": 7082 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is not a duplicate set; it contains several unrelated regressions and docs issues around model loading, dtype mismatches, and pipeline changes in Transformers 5.x. None of the soft pairs look like the same underlying bug or change.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43425", "right": "issue:43828", "accept": false, "reason": "Both mention dtype/compatibility problems, but one is a Torch 2.10 incompatibility report and the other is an autocast-specific mismatch on a specific MoE model; different failure paths and fixes." }, { "left": "issue:43257", "right": "issue:44863", "accept": false, "reason": "Both are model-loading failures, but they concern different model families and different conversion/implementation issues; not the same concrete bug." }, { "left": "issue:43828", "right": "issue:43957", "accept": false, "reason": "Superficially similar dtype/loading symptoms, but one is an autocast model execution error and the other is a meta-device load regression affecting some models; different code paths." }, { "left": "issue:44589", "right": "issue:44756", "accept": false, "reason": "One is a missing Float8 storage object error, the other is an mmap/OOM workaround on Strix Halo; unrelated root causes." }, { "left": "issue:43906", "right": "issue:44530", "accept": false, "reason": "The first is an isolated reproduction of an earlier issue, while the second is a Qwen3.5 PagedAttentionCache crash with a different invalid group type; not the same bug." }, { "left": "issue:43931", "right": "issue:44863", "accept": false, "reason": "Both are checkpoint loading failures, but the affected models and mismatch causes differ; no evidence they stem from one shared defect." }, { "left": "issue:39404", "right": "issue:44912", "accept": false, "reason": "Whisper pipeline return_language behavior and MXFP4 quantization fallback are unrelated features and failures." }, { "left": "issue:43425", "right": "issue:43716", "accept": false, "reason": "Torch version incompatibility is a different problem from the Mistral-3 image preprocessor/model dtype mismatch." }, { "left": "issue:43611", "right": "issue:43645", "accept": false, "reason": "Both are Transformers 5.0 breakages, but one is about base_model_prefix loading and the other about notebook-defined custom model initialization; too broad and not the same fix." }, { "left": "issue:44530", "right": "issue:45084", "accept": false, "reason": "PagedAttentionCache group-type crash and template-node compilation error are distinct compiler/runtime failures." }, { "left": "issue:43901", "right": "issue:44509", "accept": false, "reason": "Both are docs-related, but one is about a stale return_all_scores mention and the other about removed pipeline task names; separate documentation issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 20, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:40990", "issue:42947", "issue:43299", "issue:43425", "issue:43596", "issue:43856", "issue:43931", "issue:43994", "issue:44079", "issue:44479", "issue:44589", "issue:44671", "issue:44683", "issue:44756", "issue:44805", "issue:44918", "issue:44991", "issue:45084" ], "soft_pairs": [ "issue:43299|issue:43931", "issue:44479|issue:44805", "issue:43596|issue:44683", "issue:44589|issue:45084", "issue:43856|issue:44756", "issue:43994|issue:44079", "issue:40990|issue:42947", "issue:43425|issue:44918", "issue:44671|issue:44991" ], "prepared_review_unit_hash": "3cacec9122d356cbc5a5b4b226f4cf053946a27eae745cad849f9274b250b8e3", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 12971, "estimated_input_tokens": 3243, "estimated_eval_tokens": 6742 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The candidate pairs are mostly superficial similarities within Transformers-related bugs, but they describe different failures, models, or code paths. None look like true duplicates or mergeable into one fix.", "confidence": 0.95, "canonical_issue_reason": "No single issue cleanly represents the whole set because the items are unrelated bugs; if one must be chosen as the most generic core-library defect, issue #44079 is the closest fit, but it does not cover the others.", "canonical_pr_reason": null, "best_issue_reason": "Issue #44079 is the best representative only in the sense that it is a concise internal library bug with a clear code-path symptom; however, it is not a true umbrella for the rest of the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43299", "right": "issue:43931", "accept": false, "reason": "Different failure modes: one is Qwen3VL MoE loading breaking in v5.0.dev0, the other is a weight-shape mismatch for Qwen3-VL-30B-A3B-Instruct." }, { "left": "issue:44479", "right": "issue:44805", "accept": false, "reason": "Both are model/input regressions, but one is a video-input bug across Qwen VL variants while the other is a tensor mask shape mismatch IndexError; not the same code path." }, { "left": "issue:43596", "right": "issue:44683", "accept": false, "reason": "Different subsystems and symptoms: DeepSpeed ZeRO-3/BertModel initialization versus compiled flex_attention compatibility on torch >= 2.9." }, { "left": "issue:44589", "right": "issue:45084", "accept": false, "reason": "Unrelated compile/runtime problems: missing Float8 storage object versus Triton/template-node compilation error." }, { "left": "issue:43856", "right": "issue:44756", "accept": false, "reason": "Both mention memory, but one is Qwen3 MoE training inefficiency and the other is a Strix Halo mmap/OOM platform workaround; not the same bug." }, { "left": "issue:43994", "right": "issue:44079", "accept": false, "reason": "Both are model behavior regressions, but one is SigLIP2 inference quality and the other is ModelOutput key assignment logic." }, { "left": "issue:40990", "right": "issue:42947", "accept": false, "reason": "Perplexity on a specific model/data pair and ineffective gradient checkpointing with PEFT LoRA are distinct training/evaluation issues." }, { "left": "issue:43425", "right": "issue:44918", "accept": false, "reason": "Torch 2.10 incompatibility is a versioning/backend issue, while Qwen3.5 embedding unpacking in TRL SFT trainer is a trainer integration bug." }, { "left": "issue:44671", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer/model-loading regressions in v5, but CamemBERT masked-LM predictions and EMBEDDIA/est-roberta tokenizer loading are different concrete failures." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 21, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:32090", "issue:36032", "issue:39404", "issue:40990", "issue:43421", "issue:43425", "issue:43452", "issue:43582", "issue:43632", "issue:43883", "issue:43957", "issue:44164", "issue:44479", "issue:44488", "issue:44671", "issue:44991", "issue:45081", "issue:45161" ], "soft_pairs": [ "issue:39404|issue:44671", "issue:36032|issue:43452", "issue:44991|issue:45081", "issue:43582|issue:43632", "issue:32090|issue:39404", "issue:43957|issue:44164", "issue:44488|issue:45081", "issue:40990|issue:43421", "issue:43883|issue:44488", "issue:43425|issue:45161", "issue:44164|issue:44479" ], "prepared_review_unit_hash": "af5380ae6388f4e8d7f13461d7bef809de4575d1460ba685fc7fc464538c1e05", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13790, "estimated_input_tokens": 3448, "estimated_eval_tokens": 7152 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a grab bag of unrelated bugs across tokenizers, model loading, distributed training, and model-specific regressions; there is no single clear duplicate canon. All soft pairs are false positives based on superficial similarity only.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:39404", "right": "issue:44671", "accept": false, "reason": "Different problems: Whisper pipeline `return_language` regression vs CamemBERT masked-LM prediction changes in v5; different models and code paths." }, { "left": "issue:36032", "right": "issue:43452", "accept": false, "reason": "One is a T5 tokenizer `add_special_tokens` naming conflict, the other is a `gguf_file` loading failure for AutoTokenizer/AutoModelForCausalLM; not the same bug." }, { "left": "issue:44991", "right": "issue:45081", "accept": false, "reason": "Both involve tokenizer loading, but they target different models and failures: EMBEDDIA/est-roberta load breakage vs Mistral regex patch crashing on a missing backend tokenizer attribute." }, { "left": "issue:43582", "right": "issue:43632", "accept": false, "reason": "Unrelated issues: Apple Silicon `caching_allocator_warmup` TypeError vs Transformers v5 breaking `_is_hf_initialized`." }, { "left": "issue:32090", "right": "issue:39404", "accept": false, "reason": "Trainer distributed broadcast `NoneType` error is unrelated to Whisper pipeline `return_language` behavior." }, { "left": "issue:43957", "right": "issue:44164", "accept": false, "reason": "Both are loading/saving related, but one is a `torch.device(\"meta\")` model-loading regression and the other is failure to preserve `extra_state` in save/from_pretrained; different underlying mechanisms." }, { "left": "issue:44488", "right": "issue:45081", "accept": false, "reason": "Different tokenizer/model failures: loading `cjvt/sleng-bert` vs Mistral tokenizer regex patch crash." }, { "left": "issue:40990", "right": "issue:43421", "accept": false, "reason": "High perplexity on gpt-oss-20b is a model evaluation/behavior issue, while the other is a tokenizer backend post-processor update bug." }, { "left": "issue:43883", "right": "issue:44488", "accept": false, "reason": "Molmo missing `all_tied_weights_keys` is a model attribute bug, not a tokenizer loading failure for `cjvt/sleng-bert`." }, { "left": "issue:43425", "right": "issue:45161", "accept": false, "reason": "Torch 2.10 incompatibility and GPT-OSS MoE tensor-parallel failure are both environment/model-parallel related, but they are not the same concrete code-path problem." }, { "left": "issue:44164", "right": "issue:44479", "accept": false, "reason": "Serialization `extra_state` handling and Qwen VL video-input regression are distinct bugs with different subsystems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 22, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34567", "issue:40444", "issue:42947", "issue:43452", "issue:43482", "issue:43611", "issue:43644", "issue:43716", "issue:43883", "issue:44206", "issue:44220", "issue:44265", "issue:44488", "issue:44610", "issue:44756", "issue:44987", "issue:44991", "issue:45084" ], "soft_pairs": [ "issue:44265|issue:44610", "issue:44488|issue:44987", "issue:43716|issue:45084", "issue:43611|issue:43644", "issue:44206|issue:44220", "issue:42947|issue:44756", "issue:34567|issue:40444", "issue:43883|issue:44991", "issue:43452|issue:43482" ], "prepared_review_unit_hash": "d0af60c85e5d157607ebf8b6fc7719e1ff87a4792be56961951ac8fc7ce198b8", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 12993, "estimated_input_tokens": 3249, "estimated_eval_tokens": 6754 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The set is heterogeneous: it spans unrelated Transformers regressions in loading, audio, vision, export, training stats, and memory behavior. None of the soft pairs look like the same underlying bug, so I would not merge any of them as duplicates.", "confidence": 0.62, "canonical_issue_reason": "issue:43611 is the best single anchor because it is the broadest and most generally described regression in the set, with clear version-scoped impact and substantive discussion activity.", "canonical_pr_reason": null, "best_issue_reason": "issue:43611 is the strongest representative issue overall due to its broad loading-regression framing and likely higher reuse as an umbrella reference than the more model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44265", "right": "issue:44610", "accept": false, "reason": "Both are multimodal-related failures, but one is torch.export on torch_compilable_check and the other is an OmDet-Turbo processor/image-size mismatch; different code paths and failure modes." }, { "left": "issue:44488", "right": "issue:44987", "accept": false, "reason": "Both are model-loading regressions, but they target different models and likely different breakpoints in the v5/v5.1 loader path; not enough evidence of one underlying bug." }, { "left": "issue:43716", "right": "issue:45084", "accept": false, "reason": "One is a Mistral-3 image-preprocessor dtype mismatch, the other is a template-compilation TypeError; unrelated subsystems and error causes." }, { "left": "issue:43611", "right": "issue:43644", "accept": false, "reason": "Both are Transformers 5.0 regressions, but one is about base_model_prefix model loading and the other about non-persistent buffer initialization; separate concrete bugs." }, { "left": "issue:44206", "right": "issue:44220", "accept": false, "reason": "Both touch audio preprocessing, but one is an unsupported center argument in LasrFeatureExtractor and the other is a _torch_extract_fbank_features issue; different call sites and likely different fixes." }, { "left": "issue:42947", "right": "issue:44756", "accept": false, "reason": "PEFT/LoRA gradient checkpointing and Strix Halo mmap OOM are unrelated problems with no shared code path." }, { "left": "issue:34567", "right": "issue:40444", "accept": false, "reason": "TrainerState token counting and IterableDataset multi-image finetuning are distinct training issues; the overlap is only that they happen during training." }, { "left": "issue:43883", "right": "issue:44991", "accept": false, "reason": "One is a missing all_tied_weights_keys attribute in Molmo save/load behavior, the other is a tokenizer load regression for a different model; different failure modes." }, { "left": "issue:43452", "right": "issue:43482", "accept": false, "reason": "Both involve GGUF/model loading, but one is a generic gguf_file breakage in AutoTokenizer/AutoModelForCausalLM.from_pretrained and the other is a Qwen2.5-GGUF v5-specific loading failure; not clearly the same bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 23, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36032", "issue:39404", "issue:42915", "issue:43262", "issue:43317", "issue:43377", "issue:43482", "issue:43541", "issue:43643", "issue:43644", "issue:43646", "issue:43825", "issue:44451", "issue:44530", "issue:44849", "issue:44987", "issue:45084", "issue:45325" ], "soft_pairs": [ "issue:43644|issue:43646", "issue:43541|issue:45084", "issue:43643|issue:44530", "issue:44451|issue:44987", "issue:43317|issue:43482", "issue:36032|issue:42915", "issue:43262|issue:43377", "issue:44849|issue:45325", "issue:39404|issue:43825", "issue:42915|issue:44530" ], "prepared_review_unit_hash": "aa9693a1ebdde090b19693237010f9402693bf2bebe687c71725a87044b3998c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13457, "estimated_input_tokens": 3365, "estimated_eval_tokens": 6986 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly separate Transformers bug reports spanning different models and code paths. A few are thematically close because they all mention v5 regressions, but none of the soft pairs look like the same underlying bug/change, so I would not merge any of them as duplicates.", "confidence": 0.88, "canonical_issue_reason": "issue:43644 is the strongest representative of the cluster: it is a concrete Transformers v5 regression report with a clear loading/state-management symptom, and it sits in the middle of the broader \u201cv5 broke model behavior\u201d theme seen across the set.", "canonical_pr_reason": null, "best_issue_reason": "issue:43644 is the best single issue to anchor the cluster because it is specific enough to describe a real code-path regression, yet general enough to represent the broader set of v5 breakage reports better than the more model-specific or UI/message-only issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43644", "right": "issue:43646", "accept": false, "reason": "Both are v5 regression reports, but one is about junk-filled non-persistent buffers and the other about custom model initialization. Those are related at a high level, not the same bug or a single fixable code path." }, { "left": "issue:43541", "right": "issue:45084", "accept": false, "reason": "Both mention runtime errors, but they concern different failures: Mixtral grouped_mm tracing vs. a template compilation error. No shared concrete bug or fix path." }, { "left": "issue:43643", "right": "issue:44530", "accept": false, "reason": "These are different subsystems and symptoms: AutoConfig with trust_remote_code loses fields, while Qwen3.5 cache crashes on linear_attention group type. Not a duplicate." }, { "left": "issue:44451", "right": "issue:44987", "accept": false, "reason": "Both are model-loading failures, but one is a specific model compatibility issue and the other is a transformers>=5.1.0 loading regression for a different repo/model. Too broad to treat as the same bug." }, { "left": "issue:43317", "right": "issue:43482", "accept": false, "reason": "These involve different loading paths and failure modes: device_map/offload dequantized model loading versus Qwen2.5-GGUF loading on v5. No evidence of the same underlying defect." }, { "left": "issue:36032", "right": "issue:42915", "accept": false, "reason": "The T5 tokenizer add_special_tokens conflict and the Qwen3Moe FineGrainedFP8Config failure are unrelated model/runtime issues." }, { "left": "issue:43262", "right": "issue:43377", "accept": false, "reason": "Audio chat-template sampling-rate defaulting and MIMI encoder batch-vs-single output mismatch are both audio-related, but they are different behaviors with different likely fixes." }, { "left": "issue:44849", "right": "issue:45325", "accept": false, "reason": "Both are Qwen3.5/Qwen2.5-VL model-specific bugs, but one concerns output_hidden_states and the other rope index scaling for still images. Different code paths, no duplicate evidence." }, { "left": "issue:39404", "right": "issue:43825", "accept": false, "reason": "Whisper pipeline return_language behavior and a pipeline translation-task error message are unrelated; one is a feature regression, the other is messaging/validation." }, { "left": "issue:42915", "right": "issue:44530", "accept": false, "reason": "These are distinct model-specific failures: Qwen3Moe FineGrainedFP8Config versus Qwen3.5 PagedAttentionCache linear_attention handling. Not the same bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 24, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36032", "issue:43322", "issue:43404", "issue:43540", "issue:43645", "issue:43646", "issue:43994", "issue:44403", "issue:44451", "issue:44466", "issue:44534", "issue:44610", "issue:44987", "issue:44991", "issue:45081", "issue:45216", "issue:45357" ], "soft_pairs": [ "issue:44987|issue:44991", "issue:36032|issue:43645", "issue:43404|issue:44466", "issue:43646|issue:44534", "issue:44403|issue:45081", "issue:43645|issue:44534", "issue:43322|issue:43540", "issue:43994|issue:44610", "issue:45216|issue:45357", "issue:44451|issue:45081" ], "prepared_review_unit_hash": "5ab1af964ac3f5d9c521f69a8fbbc92920d3a0164491e11a09b26046d6586edc", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12953, "estimated_input_tokens": 3239, "estimated_eval_tokens": 6734 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Most pairs are clearly different bugs across unrelated models or code paths. The only likely duplicate is the Qwen3.5 save_pretrained regression pair, which describes the same incorrect checkpoint serialization issue across versions.", "confidence": 0.78, "canonical_issue_reason": "issue:45216 is the earlier, broader regression report for the Qwen3.5 save_pretrained checkpoint corruption, making it the best anchor for the duplicate cluster.", "canonical_pr_reason": null, "best_issue_reason": "issue:45216 is the most general and original report of the shared Qwen3.5 save_pretrained bug; issue:45357 reads like a later, narrower follow-up on the same failure mode.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44987", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer-loading failures, but they affect different models and likely different loader paths; not the same underlying bug." }, { "left": "issue:36032", "right": "issue:43645", "accept": false, "reason": "T5 tokenizer attribute conflict and custom model initialization in notebooks are distinct failures with different triggers." }, { "left": "issue:43404", "right": "issue:44466", "accept": false, "reason": "Both mention lm_head weight/tied weights, but one is a model-specific tie bug and the other is a save_pretrained serialization inconsistency; different concrete issues." }, { "left": "issue:43646", "right": "issue:44534", "accept": false, "reason": "Custom model initialization breakage and non-persistent buffer junk are unrelated v5 regressions." }, { "left": "issue:44403", "right": "issue:45081", "accept": false, "reason": "One is generic loading noise, the other is a Mistral tokenizer crash caused by fix_mistral_regex; not the same bug." }, { "left": "issue:43645", "right": "issue:44534", "accept": false, "reason": "Notebook/custom-model initialization and buffer corruption are separate problems." }, { "left": "issue:43322", "right": "issue:43540", "accept": false, "reason": "Different multimodal model families and different runtime failures; no shared code-path evidence." }, { "left": "issue:43994", "right": "issue:44610", "accept": false, "reason": "Both concern processor/model input mismatch, but they involve different models and symptoms; too broad to be the same bug." }, { "left": "issue:45216", "right": "issue:45357", "accept": true, "reason": "Both report the same Qwen3.5 save_pretrained regression causing incorrect checkpoint serialization; 45357 looks like a later version-specific continuation of issue:45216." }, { "left": "issue:44451", "right": "issue:45081", "accept": false, "reason": "Loading failure for a specific model and a Mistral regex patch crash are different tokenizer-loading bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 25, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41553", "issue:42915", "issue:43296", "issue:43377", "issue:43404", "issue:43531", "issue:43582", "issue:43632", "issue:43819", "issue:44062", "issue:44220", "issue:44683", "issue:44756", "issue:44987", "issue:45081", "issue:45084", "issue:45127" ], "soft_pairs": [ "issue:44220|issue:44683", "issue:43377|issue:43819", "issue:41553|issue:43582", "issue:43632|issue:44756", "issue:42915|issue:45084", "issue:44987|issue:45081", "issue:43404|issue:45127", "issue:43296|issue:43531", "issue:44062|issue:44987" ], "prepared_review_unit_hash": "e3ab0dcb7a9023081415a26baf6b4041311f68a64d213efb11bffbb1aeca3c80", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12485, "estimated_input_tokens": 3122, "estimated_eval_tokens": 6500 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are only superficially similar; they span unrelated bugs across tokenizer loading, model-specific regressions, quantization, attention compilation, allocator behavior, and LoRA/vocab interactions. No soft pair looks like the same underlying defect.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:45081 is the most actionable standalone report: it has a concrete stack trace, clear repro conditions, and inbound references, but it is not a duplicate of the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44220", "right": "issue:44683", "accept": false, "reason": "Different failures: `_torch_extract_fbank_features()` is an audio feature-extraction bug, while 44683 is about compiled flex_attention on torch >= 2.9." }, { "left": "issue:43377", "right": "issue:43819", "accept": false, "reason": "Different models and mechanisms: MIMI batching/padding-mask mismatch vs DAC `from_latents` missing STE behavior." }, { "left": "issue:41553", "right": "issue:43582", "accept": false, "reason": "Unrelated subsystems: Voxtral AutoTokenizer error messaging vs Apple Silicon `caching_allocator_warmup` TypeError." }, { "left": "issue:43632", "right": "issue:44756", "accept": false, "reason": "Different regressions: `_is_hf_initialized` flag breakage vs disabling mmap on Strix Halo to avoid OOM." }, { "left": "issue:42915", "right": "issue:45084", "accept": false, "reason": "Qwen3MoE + FineGrainedFP8Config is a model/config issue; 45084 is a template compilation error. No shared code path." }, { "left": "issue:44987", "right": "issue:45081", "accept": false, "reason": "Both are loading-time failures, but one is `physical-intelligence/fast` under transformers>=5.1.0 and the other is a Mistral tokenizer regex patch AttributeError; different root causes." }, { "left": "issue:43404", "right": "issue:45127", "accept": false, "reason": "Both involve weight tying, but one is Mistral3 lm_head tying for image-text-to-text and the other is LoRA merge with extended vocabulary; not the same bug." }, { "left": "issue:43296", "right": "issue:43531", "accept": false, "reason": "Different model-specific issues: PaddleOCR-VL load failure in vLLM vs Qwen3-MoE sliding_window handling." }, { "left": "issue:44062", "right": "issue:44987", "accept": false, "reason": "`AddedToken(special=...)` constructor conflict is distinct from the `physical-intelligence/fast` loading failure." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 26, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36032", "issue:40990", "issue:42915", "issue:43317", "issue:43421", "issue:43611", "issue:43632", "issue:43742", "issue:43856", "issue:44265", "issue:44291", "issue:44479", "issue:44534", "issue:44568", "issue:44589", "issue:44811", "issue:45072", "issue:45325" ], "soft_pairs": [ "issue:44479|issue:45325", "issue:40990|issue:44811", "issue:43317|issue:43856", "issue:44265|issue:45072", "issue:42915|issue:44291", "issue:43632|issue:44534", "issue:43611|issue:44534", "issue:36032|issue:43742", "issue:42915|issue:44589", "issue:43421|issue:44568" ], "prepared_review_unit_hash": "b628c498f5584bd8b63cf9b6d504ae4661caea73911a15f64c00f48ebe5ce960", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13552, "estimated_input_tokens": 3388, "estimated_eval_tokens": 7032 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: most items are distinct Transformers regressions across loading, tokenizers, quantization, multimodal/video, and export paths. The soft pairs share only broad surface similarity, not the same concrete bug or change.", "confidence": 0.93, "canonical_issue_reason": "No canonical issue stands out because the items do not form one underlying defect; they span unrelated failure modes in tokenization, model loading, precision/quantization, export, and multimodal handling.", "canonical_pr_reason": null, "best_issue_reason": "No single issue is a good global representative: none captures the cluster as a whole, and any choice would mischaracterize several unrelated regressions.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44479", "right": "issue:45325", "accept": false, "reason": "Both concern Qwen2.5-VL/Qwen video-position logic, but one is a broad video-input regression while the other is a specific still-image rope-index scaling bug; not the same concrete path." }, { "left": "issue:40990", "right": "issue:44811", "accept": false, "reason": "Unrelated tokenizer/output issues: perplexity on gpt-oss is not the same bug as Whisper batch_decode ignoring skip_special_tokens." }, { "left": "issue:43317", "right": "issue:43856", "accept": false, "reason": "Different subsystems and symptoms: device_map/offload loading failure vs Qwen3 MoE training memory inefficiency." }, { "left": "issue:44265", "right": "issue:45072", "accept": false, "reason": "torch.export failure with torch_compilable_check and bfloat16 dtype mismatches are different code paths and fixes." }, { "left": "issue:42915", "right": "issue:44291", "accept": false, "reason": "Both involve Transformers v5 loading/init, but one is a Qwen3Moe FineGrainedFP8Config failure and the other is an init_empty_weights/_is_hf_initialized argument error; not the same bug." }, { "left": "issue:43632", "right": "issue:44534", "accept": false, "reason": "The _is_hf_initialized flag regression and non-persistent buffer junk filling are unrelated model-state initialization problems." }, { "left": "issue:43611", "right": "issue:44534", "accept": false, "reason": "base_model_prefix loading breakage is a separate issue from junk-filled non-persistent buffers." }, { "left": "issue:36032", "right": "issue:43742", "accept": false, "reason": "Tokenizer method-name conflict and MobileLLM KeyError are different loading failures with no shared underlying cause." }, { "left": "issue:42915", "right": "issue:44589", "accept": false, "reason": "Both mention float8/quantization-adjacent behavior, but one is a Qwen3Moe FP8 config failure while the other is a missing Float8 storage type during load; distinct bugs." }, { "left": "issue:43421", "right": "issue:44568", "accept": false, "reason": "Both involve special tokens, but one is runtime post-processor synchronization and the other is add_special_tokens not inserting BOS/EOS for a specific tokenizer; different behaviors and fixes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 27, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36032", "issue:40444", "issue:41720", "issue:43425", "issue:43531", "issue:43792", "issue:43818", "issue:44186", "issue:44265", "issue:44403", "issue:44589", "issue:44683", "issue:44821", "issue:44877", "issue:45020", "issue:45084", "issue:45325", "issue:45357" ], "soft_pairs": [ "issue:40444|issue:45325", "issue:43818|issue:45357", "issue:44683|issue:45020", "issue:43531|issue:44877", "issue:44821|issue:45084", "issue:43425|issue:44403", "issue:36032|issue:41720", "issue:43792|issue:44589", "issue:44186|issue:44265" ], "prepared_review_unit_hash": "e5cde5b83628801072d08172971f4fa102e47f25afc892ba7d939ecd7b652d3e", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 12989, "estimated_input_tokens": 3248, "estimated_eval_tokens": 6752 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No true duplicate cluster emerges here: the items span unrelated bugs across tokenizers, vision-language models, export/compile, config loading, and version compatibility. All soft-similarity pairs should be rejected.", "confidence": 0.97, "canonical_issue_reason": "Issue 45020 is the broadest open report, so it is the least-bad anchor, but the set is not cohesive enough to justify a real canonical duplicate.", "canonical_pr_reason": null, "best_issue_reason": "Issue 45020 is the most umbrella-like and still open, but it does not represent the other reports well; there is no strong global best issue for this cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:40444", "right": "issue:45325", "accept": false, "reason": "Both concern Qwen2.5-VL, but one is a multi-image IterableDataset finetuning failure while the other is a still-image rope-index regression; different code paths and fixes." }, { "left": "issue:43818", "right": "issue:45357", "accept": false, "reason": "Video-LLaVA weight/temporal-attention corruption is unrelated to Qwen3.5 save_pretrained writing wrong visual encoder keys." }, { "left": "issue:44683", "right": "issue:45020", "accept": false, "reason": "torch>=2.9 flex_attention compile failure is unrelated to recent transformers breaking remote_code models." }, { "left": "issue:43531", "right": "issue:44877", "accept": false, "reason": "Qwen3-MoE sliding_window behavior and granite_speech config strictness are different problems in different subsystems." }, { "left": "issue:44821", "right": "issue:45084", "accept": false, "reason": "AutoImageProcessor URL loading and non-template-node compilation errors are not the same bug." }, { "left": "issue:43425", "right": "issue:44403", "accept": false, "reason": "Torch 2.10 incompatibility is unrelated to the loading noise/verbosity report." }, { "left": "issue:36032", "right": "issue:41720", "accept": false, "reason": "Tokenizer method-name conflict and Qwen3 auto device-mapping cuda assert are different failure modes with no shared code path." }, { "left": "issue:43792", "right": "issue:44589", "accept": false, "reason": "Whisper-large-v2 runtime failure and missing Float8 storage type are distinct compatibility issues." }, { "left": "issue:44186", "right": "issue:44265", "accept": false, "reason": "LayoutLMv2Tokenizer crashing on NER/padding is unrelated to torch.export failures around torch_compilable_check." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 28, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33666", "issue:35707", "issue:40444", "issue:43482", "issue:43525", "issue:43606", "issue:43644", "issue:43749", "issue:43761", "issue:43819", "issue:44079", "issue:44265", "issue:44291", "issue:44403", "issue:44534", "issue:44589", "issue:44991" ], "soft_pairs": [ "issue:44403|issue:44991", "issue:33666|issue:40444", "issue:35707|issue:40444", "issue:44079|issue:44534", "issue:43482|issue:43525", "issue:44291|issue:44589", "issue:43606|issue:44265", "issue:43606|issue:43819", "issue:43749|issue:44079", "issue:43761|issue:44079", "issue:43644|issue:44079" ], "prepared_review_unit_hash": "196122d22d77624ece36fe75ff16321e1d537fb715174d28a4078bc2a5a4817c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 11, "serialized_chars": 13055, "estimated_input_tokens": 3264, "estimated_eval_tokens": 6784 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The soft pairs are only superficially similar (same broad model family or same Transformers v5 context) and do not describe the same underlying bug, so none should be merged as duplicates.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "No single true canonical issue stands out because the set is heterogeneous; if one representative is needed, issue 44534 is the broadest Transformers v5 regression in the group, but it does not duplicate the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44403", "right": "issue:44991", "accept": false, "reason": "Different failures: noisy loading/logging vs tokenizer loading error for a specific model." }, { "left": "issue:33666", "right": "issue:40444", "accept": false, "reason": "Both are Qwen-VL related, but one is multi-GPU training and the other is iterable-dataset/multi-image finetuning failure; different bug paths." }, { "left": "issue:35707", "right": "issue:40444", "accept": false, "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to multi-image iterable-dataset finetuning." }, { "left": "issue:44079", "right": "issue:44534", "accept": false, "reason": "ModelOutput key bookkeeping and non-persistent buffer initialization are distinct internals with different symptoms." }, { "left": "issue:43482", "right": "issue:43525", "accept": false, "reason": "Different models and errors: Qwen2.5-GGUF loading failure vs Llama4Config missing pad_token_id." }, { "left": "issue:44291", "right": "issue:44589", "accept": false, "reason": "init_empty_weights argument mismatch is a different incompatibility from the Float8 storage-object loading failure." }, { "left": "issue:43606", "right": "issue:44265", "accept": false, "reason": "CPU offload device mismatch in bark-small is not the same code path as torch.export failing on torch_compilable_check." }, { "left": "issue:43606", "right": "issue:43819", "accept": false, "reason": "Different model bugs: bark-small CPU offload mismatch vs DAC.from_latents not matching forward with missing STE." }, { "left": "issue:43749", "right": "issue:44079", "accept": false, "reason": "FSDP CPU RAM efficient loading and ModelOutput key assignment are unrelated regressions." }, { "left": "issue:43761", "right": "issue:44079", "accept": false, "reason": "CLIPVision hidden_states regression and ModelOutput key assignment are separate issues." }, { "left": "issue:43644", "right": "issue:44079", "accept": false, "reason": "Non-persistent buffer junk filling and ModelOutput key assignment are different bug classes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 29, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:40444", "issue:41720", "issue:42175", "issue:43296", "issue:43531", "issue:43632", "issue:43828", "issue:43881", "issue:44291", "issue:44387", "issue:44488", "issue:44610", "issue:44617", "issue:44683", "issue:45072", "issue:45081", "issue:45325", "issue:45381" ], "soft_pairs": [ "issue:43881|issue:44488", "issue:40444|issue:45381", "issue:43531|issue:44291", "issue:42175|issue:43296", "issue:41720|issue:45081", "issue:44610|issue:45325", "issue:45325|issue:45381", "issue:43828|issue:44683", "issue:43632|issue:45072", "issue:44387|issue:44617" ], "prepared_review_unit_hash": "b55b67536bd13f6bb2925a132158654fe5f0773db6baeccef49ca7c89f6d45e2", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13431, "estimated_input_tokens": 3358, "estimated_eval_tokens": 6972 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Most items are unrelated bugs across different models/subsystems. The only plausible duplicate pair is the Qwen2.5-VL position_id regression reports in transformers 5.3.0; everything else should stay separate.", "confidence": 0.79, "canonical_issue_reason": "issue:45325 is the clearest, most concrete report in the only near-duplicate pair: it names the exact Qwen2.5-VL get_rope_index / position_ids regression and the affected version.", "canonical_pr_reason": null, "best_issue_reason": "issue:45325 is the strongest representative issue overall because it is specific, actionable, and aligns with the only accepted soft duplicate edge.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43881", "right": "issue:44488", "accept": false, "reason": "Different models and symptoms; both are loading failures, but there is no clear shared underlying bug or fix path." }, { "left": "issue:40444", "right": "issue:45381", "accept": false, "reason": "One is about multi-image IterableDataset finetuning, the other about Qwen2.5-VL video position_ids; same broad model family is not enough." }, { "left": "issue:43531", "right": "issue:44291", "accept": false, "reason": "Qwen3-MoE sliding_window behavior and init_empty_weights/_is_hf_initialized are unrelated code paths." }, { "left": "issue:42175", "right": "issue:43296", "accept": false, "reason": "TensorFlow optional dependency packaging vs PaddleOCR-VL loading in vLLM are different problems." }, { "left": "issue:41720", "right": "issue:45081", "accept": false, "reason": "A800 cudaErrorAssert with auto device mapping is unrelated to a Mistral tokenizer backend_tokenizer AttributeError." }, { "left": "issue:44610", "right": "issue:45325", "accept": false, "reason": "OmDet-Turbo image size mismatch and Qwen2.5-VL position_ids scaling are distinct model-specific bugs." }, { "left": "issue:45325", "right": "issue:45381", "accept": true, "reason": "Both report Qwen2.5-VL position_id/rope-index regressions in transformers 5.3.0, likely the same underlying code-path affecting image/video handling." }, { "left": "issue:43828", "right": "issue:44683", "accept": false, "reason": "torch.autocast dtype mismatch in Phi-tiny-MoE is not the same as compiled flex_attention failing on torch >= 2.9." }, { "left": "issue:43632", "right": "issue:45072", "accept": false, "reason": "An initialization-flag regression in Transformers v5 is unrelated to bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:44387", "right": "issue:44617", "accept": false, "reason": "CUDA OOM under int4 quantization and SAM3Video OOM are too broad and have different likely causes/code paths." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 30, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42915", "issue:43317", "issue:43454", "issue:43482", "issue:43493", "issue:43526", "issue:43611", "issue:43646", "issue:43792", "issue:43873", "issue:43881", "issue:43883", "issue:43957", "issue:43994", "issue:44186", "issue:44466", "issue:44991", "issue:45072" ], "soft_pairs": [ "issue:43611|issue:43646", "issue:43493|issue:43994", "issue:42915|issue:43482", "issue:43454|issue:44466", "issue:43526|issue:43883", "issue:43792|issue:43957", "issue:43881|issue:44991", "issue:44186|issue:45072", "issue:43317|issue:43873" ], "prepared_review_unit_hash": "e8ba5de6ad9a426688c3c002e43cd793e09b5f978abc47deb8a7d7b1a8aa0281", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13083, "estimated_input_tokens": 3271, "estimated_eval_tokens": 6798 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly about Transformers 5.x load/regression issues, but it is heterogeneous; only the quantization/offload pair and the SigLIP2 pair look like true duplicates. The rest are related at a broad theme level but not the same underlying bug.", "confidence": 0.63, "canonical_issue_reason": "Issue 43611 is the broadest and most representative of the repeated Transformers 5.x model-loading regression theme in this set.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43611 is the best overall anchor for the cluster because it captures the central loading-regression pattern more cleanly than the narrower, model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43611", "right": "issue:43646", "accept": false, "reason": "Both are Transformers 5.x loading problems, but one is about base_model_prefix handling and the other about custom model initialization; they read as related regressions, not the same bug." }, { "left": "issue:43493", "right": "issue:43994", "accept": true, "reason": "Both concern SigLIP2 producing wrong outputs and point to the same model-implementation discrepancy path; the second looks like a user-visible manifestation of the first." }, { "left": "issue:42915", "right": "issue:43482", "accept": false, "reason": "Different models and different failure modes: Qwen3Moe FP8 config vs Qwen2.5 GGUF loading under v5." }, { "left": "issue:43454", "right": "issue:44466", "accept": false, "reason": "Both mention tied weights/lm_head, but one is a generation bug in AyaVisionConfig and the other is serialization inconsistency across device placement; not the same concrete bug." }, { "left": "issue:43526", "right": "issue:43883", "accept": false, "reason": "Unrelated components and symptoms: BEiT fast processor label reduction vs Molmo tied-weights attribute error." }, { "left": "issue:43792", "right": "issue:43957", "accept": false, "reason": "Both are loading-related, but Whisper failing to run and meta-device loading regressions are not the same underlying issue." }, { "left": "issue:43881", "right": "issue:44991", "accept": false, "reason": "Different failure surface and subsystem: glm-4v-9b model loading vs tokenizer loading for est-roberta." }, { "left": "issue:44186", "right": "issue:45072", "accept": false, "reason": "Tokenizer NER/padding crash and bf16 dtype mismatches are distinct bugs in different code paths." }, { "left": "issue:43317", "right": "issue:43873", "accept": true, "reason": "These describe the same quantization/offload problem: device_map=auto fails when loading dequantized or quantized models with CPU/GPU offload." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 31, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34634", "issue:35707", "issue:41720", "issue:43262", "issue:43296", "issue:43377", "issue:43550", "issue:43632", "issue:43720", "issue:43746", "issue:44164", "issue:44291", "issue:44610", "issue:44877", "issue:44898", "issue:44991", "issue:45325" ], "soft_pairs": [ "issue:43262|issue:43746", "issue:44898|issue:45325", "issue:34634|issue:35707", "issue:44291|issue:44877", "issue:43632|issue:44164", "issue:43550|issue:43720", "issue:35707|issue:41720", "issue:44610|issue:44898", "issue:43377|issue:44610", "issue:43377|issue:44898", "issue:43296|issue:44991", "issue:43296|issue:44877" ], "prepared_review_unit_hash": "98ac45fb1aab5e363edec10d71fab9951efed76660e3157c12d41628512ba0a7", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 12, "serialized_chars": 13586, "estimated_input_tokens": 3397, "estimated_eval_tokens": 7050 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is not a duplicate set; it mixes many unrelated bugs across audio processors, model loading/init, vision, and model-specific runtime failures. All soft edges appear to be superficial text similarity only, so none should be merged.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43262", "right": "issue:43746", "accept": false, "reason": "Audio processor chat-template sampling-rate bug vs GraniteSpeech PEFT/local checkpoint loading bug; different component and failure mode." }, { "left": "issue:44898", "right": "issue:45325", "accept": false, "reason": "Perceiver non-default-resolution image sizing issue vs Qwen2.5-VL rope index/temporal position bug; unrelated subsystems." }, { "left": "issue:34634", "right": "issue:35707", "accept": false, "reason": "BarkProcessor voice_preset not working vs progressive generation with inputs_embeds/past_key_values; no shared code path." }, { "left": "issue:44291", "right": "issue:44877", "accept": false, "reason": "init_empty_weights/_is_hf_initialized TypeError vs strict config blocking granite_speech config; one is model init API breakage, the other config validation." }, { "left": "issue:43632", "right": "issue:44164", "accept": false, "reason": "_is_hf_initialized flag breakage vs save/from_pretrained extra_state handling; both loading/saving related but distinct bugs." }, { "left": "issue:43550", "right": "issue:43720", "accept": false, "reason": "torch.compile with SDPA on Bamba-9B-v2 vs BitNet packed-weights loading in accelerate; different models and failure mechanisms." }, { "left": "issue:35707", "right": "issue:41720", "accept": false, "reason": "Generation with inputs_embeds/past_key_values vs Qwen3 auto device mapping cudaErrorAssert; unrelated generation vs device-placement bug." }, { "left": "issue:44610", "right": "issue:44898", "accept": false, "reason": "OmDet-Turbo processor output/input size mismatch vs Perceiver resolution interpolation failure; both vision-related but different models and causes." }, { "left": "issue:43377", "right": "issue:44610", "accept": false, "reason": "MIMI encoder batch padding mask discrepancy vs OmDet-Turbo image size mismatch; unrelated audio encoder vs vision processor bug." }, { "left": "issue:43377", "right": "issue:44898", "accept": false, "reason": "MIMI encoder padding-mask bug vs Perceiver positional interpolation issue; different models and code paths." }, { "left": "issue:43296", "right": "issue:44991", "accept": false, "reason": "PaddleOCR-VL model loading under transformers 4.53/vLLM vs tokenizer loading failure for EMBEDDIA/est-roberta in transformers >=5; separate loading failures." }, { "left": "issue:43296", "right": "issue:44877", "accept": false, "reason": "PaddleOCR-VL/vLLM loading failure vs strict granite_speech config validation; unrelated model families and error sources." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 32, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39692", "issue:40444", "issue:41720", "issue:43322", "issue:43404", "issue:43454", "issue:43550", "issue:43606", "issue:43749", "issue:43792", "issue:44186", "issue:44466", "issue:44534", "issue:44610", "issue:44683", "issue:44977", "issue:45072", "issue:45357" ], "soft_pairs": [ "issue:43749|issue:44534", "issue:39692|issue:40444", "issue:44466|issue:45357", "issue:43550|issue:43606", "issue:43454|issue:45072", "issue:43792|issue:44683", "issue:43454|issue:43550", "issue:41720|issue:43322", "issue:44186|issue:44610", "issue:41720|issue:44977", "issue:43404|issue:45357" ], "prepared_review_unit_hash": "a374184f783086e8d719730a7cf1068152fec9e161398b4a7ff9270cb7d9700a", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13688, "estimated_input_tokens": 3422, "estimated_eval_tokens": 7100 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a set of distinct bug reports across unrelated models/features. None of the soft-linked pairs look like the same underlying issue, so all soft edges should be rejected and there is no true canonical duplicate within this set.", "confidence": 0.98, "canonical_issue_reason": "No single issue is a valid canonical duplicate here: the reports cover different models, different failure modes, and different code paths (documentation, training, loading, serialization, attention, preprocessing).", "canonical_pr_reason": null, "best_issue_reason": "If one issue had to anchor the cluster, issue 43454 is the clearest self-contained bug report, but it is not a duplicate representative of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43749", "right": "issue:44534", "accept": false, "reason": "Different problems: FSDP CPU RAM-efficient loading vs v5 non-persistent buffer serialization junk. Same broad theme of loading/state handling, but not the same bug or fix." }, { "left": "issue:39692", "right": "issue:40444", "accept": false, "reason": "Both are multimodal-related, but one is a SigLIP2 docs/model-processor mismatch and quantization example failure; the other is Qwen2.5-VL finetuning with IterableDataset and multiple images per prompt. Different code paths and symptoms." }, { "left": "issue:44466", "right": "issue:45357", "accept": false, "reason": "Both involve save_pretrained/serialization, but one is tied lm_head.weight serialization depending on device, while the other is incorrect visual encoder keys for Qwen3.5. Not the same underlying bug." }, { "left": "issue:43550", "right": "issue:43606", "accept": false, "reason": "Different runtime failures: torch.compile with SDPA on Bamba vs CPU offload device mismatch on bark-small. No shared concrete code-path bug." }, { "left": "issue:43454", "right": "issue:45072", "accept": false, "reason": "AyaVision lm_head tying bug causing bad generation is unrelated to SwitchTransformers/TimmWrapperModel bf16 dtype mismatch." }, { "left": "issue:43792", "right": "issue:44683", "accept": false, "reason": "Whisper model loading/running failure and compiled flex_attention failure on torch >= 2.9 are unrelated issues." }, { "left": "issue:43454", "right": "issue:43550", "accept": false, "reason": "One is a missing lm_head tie in AyaVision; the other is a torch.compile/SDPA failure in Bamba. Same area (model execution) but different bugs." }, { "left": "issue:41720", "right": "issue:43322", "accept": false, "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segmentation fault are different model/runtime failures with no evidence of the same root cause." }, { "left": "issue:44186", "right": "issue:44610", "accept": false, "reason": "Both concern input preprocessing, but one is a LayoutLMv2 tokenizer crash on NER/batching and the other is OmDet-Turbo processor producing the wrong image size. Different failures and fixes." }, { "left": "issue:41720", "right": "issue:44977", "accept": false, "reason": "Both mention Qwen, but the bugs differ: auto device mapping assertion on A800 vs flash-attention generation issues in Qwen3.5." }, { "left": "issue:43404", "right": "issue:45357", "accept": false, "reason": "Both touch model saving/weight handling, but one is untied lm_head weights in Mistral3ForConditionalGeneration and the other is incorrect visual encoder keys in Qwen3.5 save_pretrained. Not the same issue." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 33, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34567", "issue:40990", "issue:42617", "issue:42915", "issue:43377", "issue:43454", "issue:43526", "issue:43577", "issue:43708", "issue:43720", "issue:43873", "issue:43881", "issue:44186", "issue:44361", "issue:44423", "issue:44756", "issue:44811", "issue:45406" ], "soft_pairs": [ "issue:43526|issue:43881", "issue:44423|issue:45406", "issue:43454|issue:43720", "issue:34567|issue:40990", "issue:43873|issue:44756", "issue:43577|issue:43708", "issue:43377|issue:44811", "issue:44186|issue:44361", "issue:42617|issue:42915" ], "prepared_review_unit_hash": "bbd81ecbb684e972f81a30204af20bf6527ed78382e84f642c3d45b999db3699", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13193, "estimated_input_tokens": 3299, "estimated_eval_tokens": 6854 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are a grab bag of unrelated bugs across trainer state, model loading, tokenizers, multimodal serving, quantization/offloading, and hardware-specific memory issues. None of the soft pairs look like the same underlying bug or change.", "confidence": 0.95, "canonical_issue_reason": "No single canonical duplicate stands out because the set is heterogeneous. If an anchor is needed, issue #43873 is the strongest standalone report due to higher discussion/inbound references and a clear reproducible bug, but it is not a duplicate of the others.", "canonical_pr_reason": null, "best_issue_reason": "Issue #43873 is the best standalone issue candidate: it has the most activity and references, a concrete failure mode, and an open status. It is still not a merge target for any of the other items.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43526", "right": "issue:43881", "accept": false, "reason": "Different components and symptoms: BeitImageProcessorFast label reduction vs glm-4v-9b model loading failure. No shared underlying bug." }, { "left": "issue:44423", "right": "issue:45406", "accept": false, "reason": "Both involve transformers serve, but the failures are different AttributeErrors in different processors/code paths, so they are not the same bug." }, { "left": "issue:43454", "right": "issue:43720", "accept": false, "reason": "AyaVision lm_head tying bug and BitNet packed-weight loading bug affect different models and different code paths." }, { "left": "issue:34567", "right": "issue:40990", "accept": false, "reason": "TrainerState token accounting not updating is unrelated to extreme perplexity on gpt-oss-20b/WikiText-2." }, { "left": "issue:43873", "right": "issue:44756", "accept": false, "reason": "Both mention memory-related behavior, but one is quantization offloading not working and the other is mmap causing OOM on Strix Halo; different root causes and fixes." }, { "left": "issue:43577", "right": "issue:43708", "accept": false, "reason": "Model dtype casting on Blip2 is unrelated to resume_from_checkpoint max_steps calculation with batch-size changes." }, { "left": "issue:43377", "right": "issue:44811", "accept": false, "reason": "MIMI encoder padding-mask batching bug and Whisper batch_decode skip_special_tokens bug are separate preprocessing/decoding issues." }, { "left": "issue:44186", "right": "issue:44361", "accept": false, "reason": "Different tokenizer implementations and different failure modes; these are not the same bug." }, { "left": "issue:42617", "right": "issue:42915", "accept": false, "reason": "3d_parallel.py execution failure and Qwen3Moe FineGrainedFP8Config failure are unrelated." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 34, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36331", "issue:42617", "issue:42915", "issue:42994", "issue:43316", "issue:43317", "issue:43482", "issue:43550", "issue:43606", "issue:43653", "issue:43720", "issue:44479", "issue:44671", "issue:44743", "issue:44877", "issue:44913", "issue:44918", "issue:44991" ], "soft_pairs": [ "issue:44479|issue:44918", "issue:44877|issue:44991", "issue:43606|issue:43720", "issue:42994|issue:43482", "issue:42915|issue:43317", "issue:36331|issue:42915", "issue:44671|issue:44743", "issue:43316|issue:44913", "issue:43550|issue:43653", "issue:36331|issue:42617" ], "prepared_review_unit_hash": "7de98d78e606262053607a138457e1dea9e7df968c5fb89c2ecb682c3f31d46a", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13276, "estimated_input_tokens": 3319, "estimated_eval_tokens": 6894 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a loose set of unrelated bug reports across different models, configs, tokenizers, and training/loading paths. None of the soft-similarity pairs look like the same underlying defect, so no duplicates should be merged here.", "confidence": 0.91, "canonical_issue_reason": "There is no clear canonical duplicate in this set because the issues are heterogeneous. If one issue must serve as a stand-in, `#44918` is the best candidate because it has the most concrete, actionable regression description and context.", "canonical_pr_reason": "No pull requests are present in this cluster.", "best_issue_reason": "`#44918` is the strongest representative only in the sense of clarity and specificity, not because it duplicates the others; it has a narrow, reproducible trainer-related failure with a clearly named model family.", "best_pr_reason": "No pull requests are present in this cluster.", "soft_edge_verdicts": [ { "left": "issue:44479", "right": "issue:44918", "accept": false, "reason": "Both mention Qwen variants, but one is a video-input regression and the other is a TRL SFT embedding unpacking failure; different code paths and symptoms." }, { "left": "issue:44877", "right": "issue:44991", "accept": false, "reason": "A strict config-loading bug for `granite_speech` is unrelated to a tokenizer loading failure for `EMBEDDIA/est-roberta`." }, { "left": "issue:43606", "right": "issue:43720", "accept": false, "reason": "Both involve loading/offload scenarios, but one is a CPU-offload device mismatch and the other is unpacked packed weights during Accelerate loading; not the same bug." }, { "left": "issue:42994", "right": "issue:43482", "accept": false, "reason": "Quantized model saving failure and GGUF loading failure in v5 are different operations with different failure modes." }, { "left": "issue:42915", "right": "issue:43317", "accept": false, "reason": "These are separate model-loading issues: one with FineGrainedFP8Config on Qwen3Moe, the other with `device_map=auto` dequantized offload behavior." }, { "left": "issue:36331", "right": "issue:42915", "accept": false, "reason": "A trainer API kwarg mismatch is unrelated to a Qwen3Moe FP8 configuration failure." }, { "left": "issue:44671", "right": "issue:44743", "accept": false, "reason": "CamemBERT masked-LM prediction regressions and Qwen3.5 recurrent-state reset behavior are distinct model-specific bugs." }, { "left": "issue:43316", "right": "issue:44913", "accept": false, "reason": "Both are config/API consistency reports, but they concern different fields, models, and serialization behavior; not the same underlying defect." }, { "left": "issue:43550", "right": "issue:43653", "accept": false, "reason": "Torch.compile/SDPA failure for Bamba-9B-v2 is unrelated to a tokenizer special-token registration problem in BigBirdTokenizer." }, { "left": "issue:36331", "right": "issue:42617", "accept": false, "reason": "Unexpected `compute_loss()` kwarg and failure to run `3d_parallel.py` do not point to the same code-path or root cause." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 35, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41720", "issue:42617", "issue:42915", "issue:43322", "issue:43388", "issue:43531", "issue:43540", "issue:43572", "issue:43606", "issue:43653", "issue:43782", "issue:43873", "issue:44292", "issue:44479", "issue:44898", "issue:45059", "issue:45072", "issue:45161" ], "soft_pairs": [ "issue:44898|issue:45072", "issue:43606|issue:43653", "issue:43531|issue:43572", "issue:44292|issue:44479", "issue:41720|issue:45161", "issue:41720|issue:42617", "issue:43388|issue:45059", "issue:43540|issue:43782", "issue:42915|issue:43322", "issue:41720|issue:43873" ], "prepared_review_unit_hash": "cc1cdce295a5f38b4ae6c407ee74a416cbee99ea6e009ffc67207d81367ef93d", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13260, "estimated_input_tokens": 3315, "estimated_eval_tokens": 6886 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly unrelated model- or subsystem-specific bug reports. The soft pairs share superficial themes (Qwen, offloading, dtype/device issues, multimodal inputs) but not the same concrete failure path, so none should be merged as duplicates.", "confidence": 0.89, "canonical_issue_reason": "Issue #43873 is the broadest and most active report here, centered on offloading/quantization behavior, but the cluster overall does not have a single true duplicate hub.", "canonical_pr_reason": null, "best_issue_reason": "Issue #43873 is the best representative issue among this set because it has the widest scope and the most discussion/inbound references, even though the rest of the cluster is heterogeneous.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44898", "right": "issue:45072", "accept": false, "reason": "Both involve inference-time mismatches, but one is Perceiver image classification with interpolate_pos_encoding and the other is SwitchTransformers/TimmWrapper bfloat16 dtype mismatch; different models and code paths." }, { "left": "issue:43606", "right": "issue:43653", "accept": false, "reason": "One is a CPU offload device-mismatch bug in bark-small; the other is a tokenizer special-token registration problem causing empty decode output. Different subsystems." }, { "left": "issue:43531", "right": "issue:43572", "accept": false, "reason": "Qwen3-MoE sliding_window behavior is unrelated to StableLmConfig missing pad_token_idx after a version update." }, { "left": "issue:44292", "right": "issue:44479", "accept": false, "reason": "Qwen-3-8B-NVFP4 runtime failure and a video-input regression across several Qwen VL/3.5 variants are different bugs affecting different features." }, { "left": "issue:41720", "right": "issue:45161", "accept": false, "reason": "Auto device mapping on Qwen3 with cudaErrorAssert is a specific placement/runtime crash; GPT-OSS MoE only-TP failure is a separate parallelism issue." }, { "left": "issue:41720", "right": "issue:42617", "accept": false, "reason": "Qwen3 auto device mapping crash and failure to run 3d_parallel.py do not appear to share the same underlying failure mode or model path." }, { "left": "issue:43388", "right": "issue:45059", "accept": false, "reason": "gather_for_metrics dropping tuple labels is a training/evaluation batching bug, while SAM3 PCS text-and-box weirdness is a multimodal prompting/interaction issue." }, { "left": "issue:43540", "right": "issue:43782", "accept": false, "reason": "Both mention Qwen3 multimodal models, but one is video-input processing in Qwen3OmniMoe and the other is from_pretrained weight_only=True loading in Qwen3VL; different code paths." }, { "left": "issue:42915", "right": "issue:43322", "accept": false, "reason": "Qwen3Moe FineGrainedFP8Config failure and Llava Next segmentation fault are unrelated model/runtime failures." }, { "left": "issue:41720", "right": "issue:43873", "accept": false, "reason": "Auto device mapping causing cudaErrorAssert on Qwen3 is not the same as quantization/offloading not working as expected; overlap is only in broad device-placement themes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 36, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30064", "issue:33357", "issue:34634", "issue:35707", "issue:36010", "issue:41720", "issue:42617", "issue:43653", "issue:43720", "issue:43746", "issue:44368", "issue:44492", "issue:44509", "issue:44514", "issue:44545", "issue:45357", "issue:45406", "issue:45464" ], "soft_pairs": [ "issue:45406|issue:45464", "issue:36010|issue:42617", "issue:44514|issue:44545", "issue:33357|issue:35707", "issue:30064|issue:34634", "issue:41720|issue:43746", "issue:44492|issue:44509", "issue:43653|issue:43720", "issue:44368|issue:45357" ], "prepared_review_unit_hash": "3c9dc6e6865dc172e69a40c10c7e539b56c04bef52806222b9a8491e63b14bbc", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13219, "estimated_input_tokens": 3305, "estimated_eval_tokens": 6866 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "There is one clear duplicate pair: the two Qwen2_5_VLProcessor.apply_chat_template padding=False crash reports. The other soft-similarity pairs describe different models, different code paths, or different bug classes and should stay separate.", "confidence": 0.97, "canonical_issue_reason": "Issue 44514 is the earlier, more detailed report of the same Qwen2_5_VLProcessor.apply_chat_template crash on batched input with padding=False, so it is the best canonical issue over 44545.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44514 is the strongest representative in this set because it is the earliest report of the only confirmed duplicate bug and has the fuller original description.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45406", "right": "issue:45464", "accept": false, "reason": "Different surfaces and failure modes: Gemma4Processor _tokenizer AttributeError in serve vs Qwen3.5 streaming inference failure in chat/completions." }, { "left": "issue:36010", "right": "issue:42617", "accept": false, "reason": "Unrelated problems: GenerationMixin import error vs inability to run 3d_parallel.py; no shared concrete bug path." }, { "left": "issue:44514", "right": "issue:44545", "accept": true, "reason": "Same exact bug report: Qwen2_5_VLProcessor.apply_chat_template crashes on batched input when padding=False, with essentially identical title and symptom." }, { "left": "issue:33357", "right": "issue:35707", "accept": false, "reason": "Different issues: MacOS bus error with a CLIP model vs progressive generation using inputs_embeds/past_key_values." }, { "left": "issue:30064", "right": "issue:34634", "accept": false, "reason": "Different components and symptoms: image processor void segmentation maps vs BarkProcessor voice_preset not working." }, { "left": "issue:41720", "right": "issue:43746", "accept": false, "reason": "Different model/runtime failures: Qwen3 auto device mapping cudaErrorAssert vs GraniteSpeech PEFT/local checkpoint loading." }, { "left": "issue:44492", "right": "issue:44509", "accept": false, "reason": "Both are docs/content-related, but one is a typo in cache strategies and the other is stale pipeline-task docs; not the same underlying bug or change." }, { "left": "issue:43653", "right": "issue:43720", "accept": false, "reason": "Different tokenizer vs loading issues: BigBirdTokenizer special-token registration vs BitNet packed-weight unpacking during accelerate loading." }, { "left": "issue:44368", "right": "issue:45357", "accept": false, "reason": "Different regressions affecting different model behaviors: tie_word_embeddings warning vs incorrect visual encoder keys in save_pretrained." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 37, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34634", "issue:41720", "issue:42886", "issue:43381", "issue:43550", "issue:43606", "issue:43782", "issue:43844", "issue:44062", "issue:44361", "issue:44451", "issue:44843", "issue:44918", "issue:44936", "issue:44991", "issue:45072", "issue:45381" ], "soft_pairs": [ "issue:43381|issue:43844", "issue:43606|issue:45072", "issue:43782|issue:45381", "issue:44918|issue:44936", "issue:44451|issue:44991", "issue:43550|issue:45072", "issue:34634|issue:41720", "issue:43550|issue:44361", "issue:44062|issue:44451", "issue:42886|issue:44843" ], "prepared_review_unit_hash": "6606ecff1a1267ae1616cac21d0069da29025162a6cb936b512522c7181a2ed1", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12758, "estimated_input_tokens": 3190, "estimated_eval_tokens": 6636 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Mostly heterogeneous bug reports with no strong global duplicate cluster; only the offline-tokenizer pair looks like the same underlying issue.", "confidence": 0.78, "canonical_issue_reason": "issue:44843 is the clearest representative of the one likely duplicate pair because it names the concrete root cause: `_patch_mistral_regex` unconditionally calling `model_info()` and breaking `HF_HUB_OFFLINE` mode.", "canonical_pr_reason": null, "best_issue_reason": "issue:44843 is the strongest issue anchor in the set: specific failure mode, concrete code path, and it best explains the related offline-cache report.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43381", "right": "issue:43844", "accept": false, "reason": "Both mention training/gradient behavior, but one is eval-mode checkpointing and the other is ZeRO-3 gradient explosion; different bugs." }, { "left": "issue:43606", "right": "issue:45072", "accept": false, "reason": "Both are device/dtype mismatch reports, but for different models and different runtime paths (CPU offload vs bfloat16 inference)." }, { "left": "issue:43782", "right": "issue:45381", "accept": false, "reason": "Both are Qwen VLM issues, but one is a weight-only loading error and the other is wrong video `vision_position_ids`; not the same bug." }, { "left": "issue:44918", "right": "issue:44936", "accept": false, "reason": "Different failure points: TRL SFT embedding unpacking vs `trainer.evaluate()` after `trainer.train()`." }, { "left": "issue:44451", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer/model loading regressions, but they affect different models and the failure details are different enough to not treat as one bug." }, { "left": "issue:43550", "right": "issue:45072", "accept": false, "reason": "`torch.compile` with SDPA on Bamba is unrelated to TimmWrapper/SwitchTransformers bfloat16 dtype mismatches." }, { "left": "issue:34634", "right": "issue:41720", "accept": false, "reason": "Voice preset handling in BarkProcessor is unrelated to Qwen3 auto device mapping cuda assert failures." }, { "left": "issue:43550", "right": "issue:44361", "accept": false, "reason": "A compile/SDPA model bug is unrelated to an MLukeTokenizer AttributeError on tasks." }, { "left": "issue:44062", "right": "issue:44451", "accept": false, "reason": "AddedToken keyword-argument duplication is a tokenizer-construction bug, not the same as a model-specific tokenizer loading failure." }, { "left": "issue:42886", "right": "issue:44843", "accept": true, "reason": "These describe the same offline-tokenizer failure: loading from cache with `HF_HUB_OFFLINE=1` and the unconditional `model_info()` call that breaks offline mode." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 38, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34567", "issue:35707", "issue:43421", "issue:43531", "issue:43653", "issue:43720", "issue:43819", "issue:43927", "issue:43994", "issue:44077", "issue:44186", "issue:44589", "issue:44625", "issue:44671", "issue:44898", "issue:44987", "issue:45042" ], "soft_pairs": [ "issue:43421|issue:43927", "issue:43720|issue:43819", "issue:34567|issue:35707", "issue:43531|issue:44589", "issue:44987|issue:45042", "issue:43994|issue:44671", "issue:44077|issue:44625", "issue:43421|issue:43653", "issue:43421|issue:44186", "issue:44186|issue:44898" ], "prepared_review_unit_hash": "26a35f87b00419aafc911406e47ab236f140716c9d1225d45826978c27b3aa13", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12866, "estimated_input_tokens": 3217, "estimated_eval_tokens": 6690 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are largely unrelated and do not form a duplicate cluster; the soft pairs are only superficially similar at the subsystem level and should be rejected.", "confidence": 0.94, "canonical_issue_reason": "No single canonical issue: the set spans unrelated bugs in tokenizers, configs, generation, image processing, and model-specific behaviors.", "canonical_pr_reason": null, "best_issue_reason": "No global best issue; none of the items is a good representative for the others because the pairwise overlaps are thematic rather than the same underlying defect.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43421", "right": "issue:43927", "accept": false, "reason": "Both involve tokenizer/special-token handling, but one is about runtime post-processor refresh and the other about config losing custom token IDs after save/load; different failure modes and code paths." }, { "left": "issue:43720", "right": "issue:43819", "accept": false, "reason": "Different models and bugs: BitNet loading/unpacking weights versus DAC latent conversion/STE behavior." }, { "left": "issue:34567", "right": "issue:35707", "accept": false, "reason": "TrainerState token counting and progressive generation with inputs_embeds/past_key_values are unrelated issues." }, { "left": "issue:43531", "right": "issue:44589", "accept": false, "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to the float8 storage lookup error during serialization/loading." }, { "left": "issue:44987", "right": "issue:45042", "accept": false, "reason": "Both are load-time regressions in transformers, but one concerns a specific model load failure and the other an image-processor dependency issue; not the same bug." }, { "left": "issue:43994", "right": "issue:44671", "accept": false, "reason": "Different model-specific prediction regressions with different symptoms and likely distinct code paths." }, { "left": "issue:44077", "right": "issue:44625", "accept": false, "reason": "One is about disallowing optional post_init in PatchTSMixer; the other is about num_labels propagation in Qwen3.5 config." }, { "left": "issue:43421", "right": "issue:43653", "accept": false, "reason": "Both touch tokenizer special-token handling, but one is a post-processor refresh bug while the other is a BigBird mask-token registration/decode issue." }, { "left": "issue:43421", "right": "issue:44186", "accept": false, "reason": "Tokenizer-related, but the reported problems are different: special-token post-processor updates versus LayoutLMv2 NER padding/truncation crashes." }, { "left": "issue:44186", "right": "issue:44898", "accept": false, "reason": "Completely different components and failures: tokenizer batching errors versus Perceiver image classification with positional interpolation." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 39, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39692", "issue:43525", "issue:43531", "issue:43540", "issue:43550", "issue:43596", "issue:43653", "issue:43746", "issue:43819", "issue:43873", "issue:44008", "issue:44220", "issue:44610", "issue:44661", "issue:45072", "issue:45081", "issue:45356", "issue:45381" ], "soft_pairs": [ "issue:44610|issue:45072", "issue:45081|issue:45356", "issue:39692|issue:44610", "issue:43596|issue:44220", "issue:43525|issue:43531", "issue:43746|issue:45381", "issue:43540|issue:44008", "issue:43653|issue:43819", "issue:44220|issue:44661", "issue:39692|issue:43873", "issue:43550|issue:43819" ], "prepared_review_unit_hash": "3e42bc49cf1e6b1b58b1d36e955f1d73fe11edcb72baa20a243e5d8764acb3df", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13933, "estimated_input_tokens": 3484, "estimated_eval_tokens": 7224 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This is a heterogeneous set of unrelated bug reports across tokenizers, multimodal inputs, quantization/offloading, and model-specific config/runtime failures. None of the soft pairs look like the same underlying bug or a mergeable PR-equivalent change.", "confidence": 0.96, "canonical_issue_reason": "No single issue cleanly represents this cluster because the reports span different models and failure modes; if forced, 43873 is the broadest open issue, but it is not a true canonical duplicate target.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43873 is the broadest and still open, so it is the closest thing to a cluster anchor, but it should not be treated as a duplicate umbrella for the rest.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44610", "right": "issue:45072", "accept": false, "reason": "Different models and bugs: OmDet-Turbo input size mismatch vs bfloat16 dtype mismatch in SwitchTransformers/TimmWrapperModel." }, { "left": "issue:45081", "right": "issue:45356", "accept": false, "reason": "Both involve tokenizer internals, but the concrete failures differ: backend_tokenizer AttributeError vs Kimi-K2.5 codec/regression behavior and warning mismatch." }, { "left": "issue:39692", "right": "issue:44610", "accept": false, "reason": "Both mention processor/model mismatch, but they are different models and distinct failure modes, so not the same underlying bug." }, { "left": "issue:43596", "right": "issue:44220", "accept": false, "reason": "Unrelated bugs: DeepSpeed ZeRO3/BertModel index error vs _torch_extract_fbank_features() failure." }, { "left": "issue:43525", "right": "issue:43531", "accept": false, "reason": "Different code paths and symptoms: missing pad_token_id on Llama4Config vs sliding_window behavior in Qwen3-MoE." }, { "left": "issue:43746", "right": "issue:45381", "accept": false, "reason": "Different subsystems and concrete problems: PEFT/local checkpoint loading for GraniteSpeech vs Qwen2.5-VL video vision_position_ids." }, { "left": "issue:43540", "right": "issue:44008", "accept": false, "reason": "Both are multimodal model bugs, but one is a video ValueError in Qwen3OmniMoe and the other is a variable-name collision causing audio_mel_mask AttributeError in Gemma3nModel." }, { "left": "issue:43653", "right": "issue:43819", "accept": false, "reason": "Completely different issues: tokenizer special-token registration bug vs DAC.from_latents forward/STE mismatch." }, { "left": "issue:44220", "right": "issue:44661", "accept": false, "reason": "Different functionality: fbank feature extraction failure vs add-new-model-like breaking when the model appears in TOKENIZER_MAPPING_NAMES." }, { "left": "issue:39692", "right": "issue:43873", "accept": false, "reason": "One is a SigLIP2 documentation example bug; the other is a broad quantization/offloading runtime issue." }, { "left": "issue:43550", "right": "issue:43819", "accept": false, "reason": "Different model/runtime bugs: torch.compile+SDPA failure in Bamba-9B-v2 vs DAC.from_latents mismatch with missing STE." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 40, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:40444", "issue:43335", "issue:43618", "issue:43697", "issue:43716", "issue:43720", "issue:43746", "issue:43756", "issue:43873", "issue:44008", "issue:44062", "issue:44186", "issue:44442", "issue:44488", "issue:44843", "issue:45072", "issue:45081" ], "soft_pairs": [ "issue:44843|issue:45081", "issue:43716|issue:44008", "issue:40444|issue:43746", "issue:43746|issue:43873", "issue:43618|issue:43697", "issue:44186|issue:44442", "issue:43335|issue:43756", "issue:43720|issue:45072", "issue:44062|issue:44488" ], "prepared_review_unit_hash": "f39996fc1a937c5037d81b6c7cfc0eef279332579f4aacaef862793d39604699", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12804, "estimated_input_tokens": 3201, "estimated_eval_tokens": 6658 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are mostly unrelated bug reports across different models and tokenizer paths. The only near-match is the pair of Mistral regex issues, but they describe different failure modes, so no soft pair should be merged.", "confidence": 0.91, "canonical_issue_reason": "No single canonical issue: this set is not a duplicate cluster, but a collection of distinct bugs in different components. If forced to pick a representative, issue 44843 is the most actionable and well-scoped of the group.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44843 is the strongest representative candidate because it has a clear repro, references, and a narrowly defined regression in one helper path. It is still not a duplicate target for the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44843", "right": "issue:45081", "accept": false, "reason": "Both involve _patch_mistral_regex, but 44843 is an offline-mode model_info() call bug while 45081 is an AttributeError on missing backend_tokenizer; same area, different underlying failures." }, { "left": "issue:43716", "right": "issue:44008", "accept": false, "reason": "Different models and bugs: Mistral-3 image-preprocessor dtype mismatch versus Gemma3n forward() variable name collision." }, { "left": "issue:40444", "right": "issue:43746", "accept": false, "reason": "Completely different subsystems and failure modes: Qwen2.5-VL iterable multi-image finetuning versus GraniteSpeech PEFT local checkpoint loading." }, { "left": "issue:43746", "right": "issue:43873", "accept": false, "reason": "Both mention loading/offloading concerns, but one is PEFT checkpoint loading and the other is quantization/offloading behavior; not the same concrete bug." }, { "left": "issue:43618", "right": "issue:43697", "accept": false, "reason": "CLIP attentions assignment regression and RTDetrV2 output differences are unrelated model-specific issues." }, { "left": "issue:44186", "right": "issue:44442", "accept": false, "reason": "Both are tokenizer-related, but one is a LayoutLMv2 NER/padding crash and the other is AutoTokenizer failing to load a specific tokenizer class." }, { "left": "issue:43335", "right": "issue:43756", "accept": false, "reason": "Different model configuration bugs with different mechanisms: sparse-layer creation in SwitchTransformers versus RoPE-layer dropping in Smollm3." }, { "left": "issue:43720", "right": "issue:45072", "accept": false, "reason": "BitNet accelerate-loading unpacking bug and bfloat16 dtype mismatch in SwitchTransformers/TimmWrapperModel are unrelated." }, { "left": "issue:44062", "right": "issue:44488", "accept": false, "reason": "AddedToken keyword duplication and a model-specific AutoTokenizer load failure are not the same bug or code path." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 41, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30064", "issue:36010", "issue:43421", "issue:43531", "issue:43540", "issue:43577", "issue:43582", "issue:43643", "issue:43782", "issue:43818", "issue:44448", "issue:44561", "issue:44671", "issue:44811", "issue:44877", "issue:44977", "issue:45020", "issue:45381" ], "soft_pairs": [ "issue:43421|issue:44811", "issue:30064|issue:36010", "issue:43540|issue:45381", "issue:43577|issue:43818", "issue:43643|issue:44877", "issue:43782|issue:44977", "issue:44448|issue:44671", "issue:43577|issue:43582", "issue:44561|issue:45020", "issue:43531|issue:43643" ], "prepared_review_unit_hash": "7be36fdb5f74f029d43f4c0f999acca8f0aa9f58e2049a2d5eaa4efaaaf5a0c5", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13427, "estimated_input_tokens": 3357, "estimated_eval_tokens": 6970 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These look like loosely related Transformers regression reports rather than a single duplicate cluster; most soft pairs share only a broad subsystem/theme and do not appear to describe the same concrete bug. No PRs are present.", "confidence": 0.74, "canonical_issue_reason": "If a representative issue is needed, issue 45020 is the broadest umbrella for the remote_code regression family, but the set is too heterogeneous to treat as one true duplicate cluster.", "canonical_pr_reason": null, "best_issue_reason": "Issue 45020 is the most central/open and widest-scoped report among the items, making it the best general anchor for related remote_code breakages even though it does not subsume the other issues exactly.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43421", "right": "issue:44811", "accept": false, "reason": "Both involve token/processor behavior, but one is runtime special-token/post-processor syncing and the other is Whisper batch_decode skip_special_tokens handling; different code paths and bugs." }, { "left": "issue:30064", "right": "issue:36010", "accept": false, "reason": "Completely unrelated: image segmentation-map processing vs an import error for GenerationMixin." }, { "left": "issue:43540", "right": "issue:45381", "accept": false, "reason": "Both are video-related Qwen issues, but they affect different models and different failure modes (ValueError in Qwen3OmniMoe vs incorrect vision_position_ids in Qwen2.5-VL)." }, { "left": "issue:43577", "right": "issue:43818", "accept": false, "reason": "Different models and problems: BLIP2 dtype propagation vs Video-LLaVA tower architecture/weight sharing." }, { "left": "issue:43643", "right": "issue:44877", "accept": false, "reason": "Both concern config loading, but one is missing fields with trust_remote_code and the other is strict config validation for granite_speech; not the same concrete bug." }, { "left": "issue:43782", "right": "issue:44977", "accept": false, "reason": "Same broad Qwen family, but the reported failures are distinct: weight_only loading error vs flash-attention generation behavior." }, { "left": "issue:44448", "right": "issue:44671", "accept": false, "reason": "Both are v5 output regressions, but they affect different models and likely different implementation paths; too broad to merge as duplicates." }, { "left": "issue:43577", "right": "issue:43582", "accept": false, "reason": "No meaningful overlap: BLIP2 dtype issue versus Apple Silicon allocator warmup TypeError." }, { "left": "issue:44561", "right": "issue:45020", "accept": false, "reason": "45020 is a broad umbrella for remote_code breakages, but 44561 is a specific regression tied to is_torch_fx_available removal; not enough evidence of the same underlying bug." }, { "left": "issue:43531", "right": "issue:43643", "accept": false, "reason": "Unrelated: Qwen3-MoE sliding_window behavior versus AutoConfig trust_remote_code field loss." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 42, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41720", "issue:42175", "issue:43644", "issue:43650", "issue:43720", "issue:43749", "issue:43819", "issue:43825", "issue:43827", "issue:43906", "issue:43994", "issue:44361", "issue:44610", "issue:44805", "issue:44936", "issue:44991", "issue:45081", "issue:45406" ], "soft_pairs": [ "issue:43644|issue:43749", "issue:43825|issue:43994", "issue:43819|issue:44361", "issue:44805|issue:44936", "issue:43720|issue:44610", "issue:41720|issue:44991", "issue:43825|issue:43827", "issue:42175|issue:45081", "issue:43650|issue:43906", "issue:45081|issue:45406" ], "prepared_review_unit_hash": "2ce1cf39c13a1282f8644a819e3035609c8519b8a2a23f55362a94ec8b3f51b9", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13461, "estimated_input_tokens": 3366, "estimated_eval_tokens": 6988 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the soft-paired issues cover unrelated bugs in loading, tokenizers, pipelines, docs, and model-specific behavior. None of the candidate edges look like the same underlying issue, so all soft matches should be rejected.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43644", "right": "issue:43749", "accept": false, "reason": "Both involve loading/runtime behavior, but one is about non-persistent buffers being initialized with junk in Transformers 5.0 and the other is a specific FSDP CPU RAM efficient loading regression. Different failure modes and likely different fixes." }, { "left": "issue:43825", "right": "issue:43994", "accept": false, "reason": "The first is about an incorrect pipeline() error message for translation support; the second is a model-specific bad output issue with AutoModel/pipeline. They do not describe the same bug or code path." }, { "left": "issue:43819", "right": "issue:44361", "accept": false, "reason": "DAC.from_latents missing STE behavior and MLukeTokenizer AttributeError on tasks are unrelated components with different symptoms and fixes." }, { "left": "issue:44805", "right": "issue:44936", "accept": false, "reason": "A tensor mask shape mismatch during indexing is a different bug from trainer.evaluate() failing after trainer.train(). The symptoms and likely code paths do not align." }, { "left": "issue:43720", "right": "issue:44610", "accept": false, "reason": "Packed-weight unpacking during accelerate loading for BitNet is unrelated to OmDet-Turbo producing the wrong input size. Different models, components, and root causes." }, { "left": "issue:41720", "right": "issue:44991", "accept": false, "reason": "Qwen3 auto device mapping causing cudaErrorAssert is unrelated to tokenizer loading failure for EMBEDDIA/est-roberta. No shared underlying bug." }, { "left": "issue:43825", "right": "issue:43827", "accept": false, "reason": "Both mention pipeline removals, but one is a runtime error message bug and the other is stale documentation references. That is the same theme, not the same concrete issue." }, { "left": "issue:42175", "right": "issue:45081", "accept": false, "reason": "Missing TensorFlow from the torch extra and a Mistral regex patch crash are unrelated. They touch different installation/loading paths and different failure mechanisms." }, { "left": "issue:43650", "right": "issue:43906", "accept": false, "reason": "'ADD THE DATA' is not a technical bug report and does not match the isolated reproduction of issue 38071." }, { "left": "issue:45081", "right": "issue:45406", "accept": false, "reason": "Both are tokenizer/processor attribute errors, but they involve different classes, different attributes (_backend_tokenizer vs _tokenizer), and different entrypoints. Not the same concrete bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 43, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30064", "issue:34634", "issue:36010", "issue:36331", "issue:43526", "issue:43582", "issue:43653", "issue:43697", "issue:43749", "issue:43782", "issue:44060", "issue:44291", "issue:44368", "issue:44485", "issue:44509", "issue:44671", "issue:44857", "issue:45325" ], "soft_pairs": [ "issue:30064|issue:36331", "issue:43782|issue:45325", "issue:44485|issue:44509", "issue:34634|issue:36010", "issue:44060|issue:44368", "issue:43582|issue:44857", "issue:43749|issue:44291", "issue:43526|issue:43697", "issue:43653|issue:44671" ], "prepared_review_unit_hash": "1d0c60d2dfd11245196b32f5ee298720267f623b8f68a5727c38c5fe2b239c34", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13269, "estimated_input_tokens": 3318, "estimated_eval_tokens": 6892 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the issues cover unrelated bugs across image processing, tokenizers, trainers, model-specific configs, docs, and runtime/environment failures. None of the soft-similarity pairs look like the same underlying defect, so all soft edges should be rejected.", "confidence": 0.97, "canonical_issue_reason": "No single canonical issue: the items do not describe one shared bug or change, but many unrelated reports.", "canonical_pr_reason": null, "best_issue_reason": "No global best issue for deduping: any one item would misrepresent the rest of the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:30064", "right": "issue:36331", "accept": false, "reason": "Different failures in different areas: image processor void segmentation maps vs a custom trainer compute_loss signature error." }, { "left": "issue:43782", "right": "issue:45325", "accept": false, "reason": "Both involve Qwen/VL, but one is a weight_only load error and the other is a rope index scaling bug in get_rope_index; different code paths." }, { "left": "issue:44485", "right": "issue:44509", "accept": false, "reason": "GLM-5 RoPE implementation discussion is unrelated to docs still referencing removed pipeline tasks." }, { "left": "issue:34634", "right": "issue:36010", "accept": false, "reason": "BarkProcessor voice_preset failure is unrelated to an ImportError for GenerationMixin from transformers.generation." }, { "left": "issue:44060", "right": "issue:44368", "accept": false, "reason": "Both mention Qwen3/Qwen3.5 tied-weights warnings, but one is a specific incorrect tying bug and the other is a user-facing warning during LoRA fine-tuning; not the same defect." }, { "left": "issue:43582", "right": "issue:44857", "accept": false, "reason": "Apple Silicon allocator warmup TypeError is unrelated to LwDetrImageLoss crashing under float16 CUDA AMP." }, { "left": "issue:43749", "right": "issue:44291", "accept": false, "reason": "FSDP CPU RAM efficient loading breakage is a different problem from init_empty_weights rejecting unexpected _is_hf_initialized." }, { "left": "issue:43526", "right": "issue:43697", "accept": false, "reason": "BeitImageProcessorFast reduce_labels bug is unrelated to RTDetrV2 output drift in Transformers v5." }, { "left": "issue:43653", "right": "issue:44671", "accept": false, "reason": "BigBirdTokenizer special-token registration/empty decode issue is unrelated to CamemBERT masked LM prediction changes in v5." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 44, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34634", "issue:36331", "issue:43262", "issue:43452", "issue:43454", "issue:43526", "issue:43577", "issue:43638", "issue:43653", "issue:43746", "issue:43819", "issue:43854", "issue:44554", "issue:44811", "issue:44912", "issue:44936", "issue:44977", "issue:44991" ], "soft_pairs": [ "issue:43526|issue:44554", "issue:43262|issue:43819", "issue:43454|issue:43577", "issue:43638|issue:44936", "issue:43452|issue:44991", "issue:43746|issue:44811", "issue:43653|issue:44912", "issue:34634|issue:36331", "issue:43854|issue:44977" ], "prepared_review_unit_hash": "7023d2f903696b6cb4e4c3de5b9e6399d2b3a4f82074d208622a253a14bd378b", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13298, "estimated_input_tokens": 3325, "estimated_eval_tokens": 6906 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster does not look like a duplicate set. The items span unrelated bugs across audio processors, tokenizers, training loops, quantization, and model loading, so none of the soft similarity pairs are safe to merge.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:43452 is the strongest standalone report in the set: it names a specific API failure mode (`gguf_file` affecting `AutoTokenizer.from_pretrained` and `AutoModelForCausalLM.from_pretrained`) and reads like a concrete, actionable bug report. It is not a duplicate anchor for the rest of the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43526", "right": "issue:44554", "accept": false, "reason": "Different bugs: BEiT `reduce_labels` preprocessing vs an MPS attention correctness issue. No shared codepath or concrete fix overlap." }, { "left": "issue:43262", "right": "issue:43819", "accept": false, "reason": "Different subsystems: audio processor chat-template sampling-rate handling vs DAC latent decoding/STE behavior." }, { "left": "issue:43454", "right": "issue:43577", "accept": false, "reason": "Unrelated: one is a `gguf_file` loading problem, the other is dtype propagation for BLIP-2. Same broad area of model loading, but not the same bug." }, { "left": "issue:43638", "right": "issue:44936", "accept": false, "reason": "Both are trainer/runtime failures, but the concrete failures differ: ZeRO-3 with a non-pretrained Bert model vs `evaluate()` after `train()`." }, { "left": "issue:43452", "right": "issue:44991", "accept": false, "reason": "Both involve loading/tokenizers, but the failure modes are distinct: `gguf_file` support vs a tokenizer loading regression for a specific repo." }, { "left": "issue:43746", "right": "issue:44811", "accept": false, "reason": "Different behavior entirely: local checkpoint loading with PEFT adapters vs Whisper `batch_decode()` ignoring `skip_special_tokens`." }, { "left": "issue:43653", "right": "issue:44912", "accept": false, "reason": "No shared bug: BigBird tokenizer special-token registration vs MXFP4 quantization fallback to bf16." }, { "left": "issue:34634", "right": "issue:36331", "accept": false, "reason": "Completely unrelated: BarkProcessor voice preset configuration vs a `CustomTrainer.compute_loss()` signature mismatch." }, { "left": "issue:43854", "right": "issue:44977", "accept": false, "reason": "Both are model-generation failures, but they concern different models and different underlying issues: GLM-4.7-Flash loading in tests vs Qwen3.5 flash-attention generation problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 45, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36010", "issue:38175", "issue:39692", "issue:42175", "issue:43257", "issue:43329", "issue:43645", "issue:43696", "issue:43749", "issue:44568", "issue:44589", "issue:44610", "issue:44756", "issue:44912", "issue:44938", "issue:45042", "issue:45356" ], "soft_pairs": [ "issue:44589|issue:45042", "issue:43257|issue:43329", "issue:44568|issue:45356", "issue:43257|issue:43696", "issue:38175|issue:44610", "issue:36010|issue:43645", "issue:39692|issue:44912", "issue:42175|issue:44938", "issue:43749|issue:44756" ], "prepared_review_unit_hash": "65a7d0f0d9d963599fed609e7adf6ecb56481b215c226a60f717b1da4f94db46", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12596, "estimated_input_tokens": 3149, "estimated_eval_tokens": 6554 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly unrelated Transformer issues, with the soft pairs only sharing broad themes (loading, tokenization, quantization, vision processing) rather than the same underlying defect. I rejected all soft-edge matches.", "confidence": 0.82, "canonical_issue_reason": "Issue 43749 has the strongest triage signal in the set: high discussion activity, inbound references, and a concrete runtime regression affecting FSDP CPU RAM efficient loading.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43749 is the best single representative of the cluster because it is active, well-discussed, and describes a clearly actionable broken code path.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44589", "right": "issue:45042", "accept": false, "reason": "Different bugs: a missing storage object error versus an image backend dependency regression. Same library, but not the same failure path." }, { "left": "issue:43257", "right": "issue:43329", "accept": false, "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed is unrelated to a multimodal token counting bug in the video branch." }, { "left": "issue:44568", "right": "issue:45356", "accept": false, "reason": "Both are tokenizer regressions, but they affect different models and different mechanics: special-token insertion versus codec handling/warning behavior." }, { "left": "issue:43257", "right": "issue:43696", "accept": false, "reason": "One is a loading/conversion bug; the other is an out-of-memory failure. No shared underlying code-path problem." }, { "left": "issue:38175", "right": "issue:44610", "accept": false, "reason": "Both involve vision model inputs, but the concrete problems differ: zero probabilities for SigLIP2 versus a 640px/224px processor-model mismatch for OmDet-Turbo." }, { "left": "issue:36010", "right": "issue:43645", "accept": false, "reason": "Both relate to Transformers 5.0 breakage, but one is an import error for GenerationMixin and the other is a Jupyter custom-model initialization regression." }, { "left": "issue:39692", "right": "issue:44912", "accept": false, "reason": "Both mention quantization, but they are different models and different failures: a docs example mismatch/quantization failure versus MXFP4 fallback on git-oss-20b." }, { "left": "issue:42175", "right": "issue:44938", "accept": false, "reason": "Package/backend installation mismatch is unrelated to Python 3.14 load failures; they do not point to the same bug." }, { "left": "issue:43749", "right": "issue:44756", "accept": false, "reason": "Both are loading/memory-related, but one is FSDP CPU RAM efficient loading and the other is mmap-induced OOM on Strix Halo. Different code paths and fixes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 46, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36010", "issue:36331", "issue:43257", "issue:43262", "issue:43329", "issue:43452", "issue:43531", "issue:43550", "issue:43582", "issue:43697", "issue:43756", "issue:43931", "issue:44479", "issue:44811", "issue:44912", "issue:45042", "issue:45357" ], "soft_pairs": [ "issue:43262|issue:44811", "issue:43531|issue:45042", "issue:43329|issue:44479", "issue:43257|issue:43756", "issue:43550|issue:44912", "issue:36010|issue:36331", "issue:43697|issue:45357", "issue:43257|issue:43931", "issue:43452|issue:43582" ], "prepared_review_unit_hash": "6500d5a91c449d4c2260f466ce270e54d002ae35e0cef2cd006c5daa37957cf4", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12735, "estimated_input_tokens": 3184, "estimated_eval_tokens": 6624 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: the soft-paired items share broad language or subsystem overlap, but each pair describes a different bug with a different concrete failure mode. I would not merge any of the soft pairs as duplicates.", "confidence": 0.96, "canonical_issue_reason": "No clear canonical issue: these are separate bugs across unrelated areas (audio, video/VLM, quantization, loading, training, and model-specific regressions), so no single issue subsumes the rest.", "canonical_pr_reason": null, "best_issue_reason": "No single best issue for the cluster; if forced to pick a representative, issue:43257 has the most discussion and a concrete reproducible loading failure, but it still does not represent the other items.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43262", "right": "issue:44811", "accept": false, "reason": "Both concern processor behavior, but one is about `apply_chat_template()` default sampling rate and the other about `batch_decode()` ignoring `skip_special_tokens`; different API paths and different bugs." }, { "left": "issue:43531", "right": "issue:45042", "accept": false, "reason": "These are unrelated: Qwen3-MoE `sliding_window` behavior versus PIL image processors requiring torchvision." }, { "left": "issue:43329", "right": "issue:44479", "accept": false, "reason": "Both touch video/VLM code, but one is an undefined-name bug in multimodal token counting while the other is a broader video-input regression across models; not the same concrete failure." }, { "left": "issue:43257", "right": "issue:43756", "accept": false, "reason": "Different model families and different problems: deepspeed/accelerate weight conversion for Qwen3 MoE versus RoPE layer dropping in Smollm3." }, { "left": "issue:43550", "right": "issue:44912", "accept": false, "reason": "One is a torch.compile + SDPA failure for Bamba, the other is MXFP4 quantization fallback for git-oss-20b; different code paths and symptoms." }, { "left": "issue:36010", "right": "issue:36331", "accept": false, "reason": "Importing `GenerationMixin` and a custom trainer `compute_loss()` signature mismatch are unrelated failures." }, { "left": "issue:43697", "right": "issue:45357", "accept": false, "reason": "RTDetrV2 output differences in v5 and incorrect visual encoder keys during `save_pretrained` are distinct model-specific regressions." }, { "left": "issue:43257", "right": "issue:43931", "accept": false, "reason": "Both involve Qwen3-family models, but one is a Qwen3 MoE loading/conversion issue and the other is a Qwen3-VL weight-shape mismatch; different underlying bugs." }, { "left": "issue:43452", "right": "issue:43582", "accept": false, "reason": "`gguf_file` loading failures and an Apple Silicon allocator warmup `TypeError` are unrelated subsystems and failure modes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 47, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41720", "issue:42175", "issue:43329", "issue:43450", "issue:43540", "issue:43577", "issue:43824", "issue:44062", "issue:44291", "issue:44361", "issue:44442", "issue:44479", "issue:44560", "issue:44991", "issue:45042", "issue:45072", "issue:45381" ], "soft_pairs": [ "issue:41720|issue:43540", "issue:44361|issue:44442", "issue:43329|issue:43450", "issue:44062|issue:44991", "issue:44479|issue:45381", "issue:43577|issue:45072", "issue:43540|issue:44560", "issue:44291|issue:45042", "issue:42175|issue:43824" ], "prepared_review_unit_hash": "c5ed77f3d6980dbcf0fda3af2f36ef6212997401ee6bbed8e731fcfd3cd5ab1c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12708, "estimated_input_tokens": 3177, "estimated_eval_tokens": 6610 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly unrelated closed issues spanning tokenizer loading, video-processing regressions, dtype mismatches, and backend packaging. No pair is clearly the same underlying bug/change; the cluster\u2019s best anchor is the broad Qwen video regression issue.", "confidence": 0.86, "canonical_issue_reason": "issue:44479 is the broadest and most representative bug report in the set: it describes a v5.3.0 video-input regression affecting multiple Qwen VL/Omni variants, making it the most likely umbrella issue among the candidates.", "canonical_pr_reason": null, "best_issue_reason": "issue:44479 is the strongest global representative because it names a concrete regression, has the widest affected surface, and is more general than the more model-specific or symptom-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41720", "right": "issue:43540", "accept": false, "reason": "Both mention Qwen3 and failures, but one is a device-mapping cuda assert on A800 while the other is a video-input ValueError in Qwen3OmniMoe; different code paths and symptoms." }, { "left": "issue:44361", "right": "issue:44442", "accept": false, "reason": "Both are tokenizer-related, but they fail in different components: MLukeTokenizer task handling vs AutoTokenizer loading FastSpeech2ConformerTokenizer." }, { "left": "issue:43329", "right": "issue:43450", "accept": false, "reason": "Both involve video processing, but one is an internal token-counting bug with undefined variables and the other is a batched-shape output issue in video processors; not the same bug." }, { "left": "issue:44062", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer loading errors, but the first is an AddedToken keyword conflict and the second is a model-specific failure for EMBEDDIA/est-roberta on transformers>=5.0.0." }, { "left": "issue:44479", "right": "issue:45381", "accept": false, "reason": "These are related in theme, but 44479 is a broad multi-model video regression while 45381 is a specific vision_position_ids problem for qwen2.5-vl; not clearly the same concrete code-path bug." }, { "left": "issue:43577", "right": "issue:45072", "accept": false, "reason": "Both concern dtype mismatches, but they affect different model families and different inference paths (BLIP2/QFormer vs SwitchTransformers/TimmWrapperModel)." }, { "left": "issue:43540", "right": "issue:44560", "accept": false, "reason": "Both are Qwen video issues, but one is a ValueError in Qwen3OmniMoe video input processing and the other is a StopIteration in Qwen3-vl-embedding; too different to merge as one bug." }, { "left": "issue:44291", "right": "issue:45042", "accept": false, "reason": "These are unrelated: one is an init_empty_weights/_is_hf_initialized argument error, the other is a PIL backend image-processor dependency regression." }, { "left": "issue:42175", "right": "issue:43824", "accept": false, "reason": "No meaningful overlap: one is about TensorFlow not being included in a torch extra install, the other is an ImportError for a missing Qwen2_5_VL class." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 48, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33357", "issue:34567", "issue:42175", "issue:42915", "issue:43540", "issue:43582", "issue:43720", "issue:43742", "issue:43782", "issue:43906", "issue:44479", "issue:44484", "issue:44488", "issue:44704", "issue:44821", "issue:44898", "issue:45381" ], "soft_pairs": [ "issue:42915|issue:44821", "issue:42175|issue:44488", "issue:43540|issue:44479", "issue:42915|issue:43742", "issue:43906|issue:45381", "issue:43720|issue:44898", "issue:33357|issue:34567", "issue:44484|issue:44704", "issue:43582|issue:43782" ], "prepared_review_unit_hash": "5f0ea9457db6c8cb78be2a5c5ff6c910beb13ce4f704042b3fceb4f16b2eff21", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12437, "estimated_input_tokens": 3110, "estimated_eval_tokens": 6476 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the soft pairs span unrelated bugs in model loading, video processing, packaging, training state, and Apple Silicon/runtime issues. I do not see a single duplicate target for the cluster.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42915", "right": "issue:44821", "accept": false, "reason": "Different failures: Qwen3Moe FP8 config handling vs AutoImageProcessor loading from URL. No shared code path or concrete duplicate symptom." }, { "left": "issue:42175", "right": "issue:44488", "accept": false, "reason": "Packaging/backend extra omission vs a model load failure. They are unrelated issues with different root causes." }, { "left": "issue:43540", "right": "issue:44479", "accept": false, "reason": "Both involve video inputs, but the affected models and reported behavior differ enough that this does not clearly indicate the same underlying bug." }, { "left": "issue:42915", "right": "issue:43742", "accept": false, "reason": "FP8 config failure for Qwen3Moe is unrelated to a key error when loading MobileLLM-125M." }, { "left": "issue:43906", "right": "issue:45381", "accept": false, "reason": "An isolated reproduction of an older issue is not enough to match this specific Qwen2.5-VL video position_ids regression." }, { "left": "issue:43720", "right": "issue:44898", "accept": false, "reason": "BitNet packed-weight loading and Perceiver positional-encoding handling are distinct code paths and bugs." }, { "left": "issue:33357", "right": "issue:34567", "accept": false, "reason": "Completely different domains: MacOS bus error with CLIP vs TrainerState token counting." }, { "left": "issue:44484", "right": "issue:44704", "accept": false, "reason": "Save-pretrained sharding behavior and AutoProcessor kwargs forwarding are separate concerns, not the same bug." }, { "left": "issue:43582", "right": "issue:43782", "accept": false, "reason": "Apple Silicon allocator warmup TypeError and Qwen3VL weight_only load error are unrelated runtime/loading problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 49, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:28282", "issue:34689", "issue:36010", "issue:38175", "issue:42175", "issue:43643", "issue:43646", "issue:43742", "issue:43749", "issue:43756", "issue:44062", "issue:44291", "issue:44898", "issue:45081", "issue:45137", "issue:45325", "issue:45375", "issue:45405" ], "soft_pairs": [ "issue:43756|issue:45325", "issue:45137|issue:45405", "issue:36010|issue:42175", "issue:38175|issue:44898", "issue:43643|issue:45375", "issue:43646|issue:43749", "issue:43742|issue:44291", "issue:44062|issue:45081", "issue:28282|issue:34689" ], "prepared_review_unit_hash": "2449fbc77d3893867fa8d4335d6c97edd4be881074bc0544c84d3a5bd364891f", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13298, "estimated_input_tokens": 3325, "estimated_eval_tokens": 6906 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No duplicate cluster emerges here: the items span unrelated regressions in model loading, tokenizers, configs, training, versioning, and backend packaging. All soft-similarity pairs are broad-topic matches only and should be rejected.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43756", "right": "issue:45325", "accept": false, "reason": "Both involve RoPE/position handling in vision models, but they are different model families and different failures (Smollm3 layer usage vs Qwen2.5-VL temporal position scaling)." }, { "left": "issue:45137", "right": "issue:45405", "accept": false, "reason": "DeepSpeed ZeRO3 deque errors and a PEFT version bump are unrelated; one is a runtime training bug, the other is a packaging/version requirement issue." }, { "left": "issue:36010", "right": "issue:42175", "accept": false, "reason": "These are both import/backend-related, but one is a missing PyTorch import dependency and the other is TensorFlow not being included in a torch extra install." }, { "left": "issue:38175", "right": "issue:44898", "accept": false, "reason": "Both are vision-model inference problems, but they affect different architectures and different code paths; no evidence of the same underlying bug." }, { "left": "issue:43643", "right": "issue:45375", "accept": false, "reason": "Both mention config fields being lost, but one is a generic trust_remote_code/AutoConfig behavior and the other is a specific model config field omitted by strict serialization." }, { "left": "issue:43646", "right": "issue:43749", "accept": false, "reason": "These are separate 5.0-era breakages: custom model initialization vs FSDP CPU RAM efficient loading. Same version window is not enough to merge them." }, { "left": "issue:43742", "right": "issue:44291", "accept": false, "reason": "Both fail while loading models, but one is a KeyError for a specific model and the other is a TypeError from init_empty_weights/_is_hf_initialized handling." }, { "left": "issue:44062", "right": "issue:45081", "accept": false, "reason": "Tokenizer-related crashes, but the concrete failures differ: AddedToken special-arg duplication versus Mistral regex patching on a Tokenizer without backend_tokenizer." }, { "left": "issue:28282", "right": "issue:34689", "accept": false, "reason": "Both are loading/import problems, but one is missing PyTorch in the environment and the other is a model-loading regression for Llama 3.2 Vision." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 50, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:28282", "issue:29127", "issue:34567", "issue:36331", "issue:43452", "issue:43643", "issue:43653", "issue:43688", "issue:43749", "issue:43927", "issue:44291", "issue:44361", "issue:44488", "issue:44561", "issue:44792", "issue:44821", "issue:44964" ], "soft_pairs": [ "issue:44291|issue:44821", "issue:43452|issue:44488", "issue:43653|issue:44361", "issue:28282|issue:29127", "issue:34567|issue:36331", "issue:43688|issue:43749", "issue:44792|issue:44964", "issue:43643|issue:44561", "issue:43653|issue:43927" ], "prepared_review_unit_hash": "bf6224bfbd1b26de0be7685822c3b61755293ccef1c7db79d15e57a894ebbebb", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12606, "estimated_input_tokens": 3152, "estimated_eval_tokens": 6560 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a set of unrelated Hugging Face Transformers bugs spanning tokenizers, model loading, trainer internals, multimodal models, and config serialization. The soft pairs share broad themes but not the same concrete bug or fix path, so none should be merged as duplicates.", "confidence": 0.94, "canonical_issue_reason": "No single issue cleanly represents the whole cluster because the items are heterogeneous and do not describe the same underlying defect. If one must be chosen as the most representative standalone report, issue 44821 is the clearest actionable model-loading bug.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44821 is the best standalone representative because it is specific, reproducible, and clearly scoped to a single failure mode in model loading from URL.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44291", "right": "issue:44821", "accept": false, "reason": "Both involve loading/runtime errors, but one is about `init_empty_weights` and an unexpected argument, while the other is about `AutoImageProcessor` loading from a URL. Different code paths and fixes." }, { "left": "issue:43452", "right": "issue:44488", "accept": false, "reason": "Both are model-loading complaints, but one is specifically about `gguf_file` handling in AutoTokenizer/AutoModelForCausalLM and the other is a failure to load a particular BERT model. No clear shared root cause." }, { "left": "issue:43653", "right": "issue:44361", "accept": false, "reason": "Different tokenizer/model families and different symptoms: special-token decode behavior vs an AttributeError in tasks. Not the same bug." }, { "left": "issue:28282", "right": "issue:29127", "accept": false, "reason": "PyTorch import availability is unrelated to LayoutLMv3 box validation/error messaging." }, { "left": "issue:34567", "right": "issue:36331", "accept": false, "reason": "Both touch trainer code, but `num_input_tokens_seen` not updating and `compute_loss` receiving an unexpected kwarg are distinct bugs with different fixes." }, { "left": "issue:43688", "right": "issue:43749", "accept": false, "reason": "Different subsystems and failure modes: auxiliary-loss normalization in MoE models vs broken FSDP CPU RAM-efficient loading." }, { "left": "issue:44792", "right": "issue:44964", "accept": false, "reason": "Both are multimodal-model loading/generation issues, but they concern different models and different failing paths; not enough evidence they share one fix." }, { "left": "issue:43643", "right": "issue:44561", "accept": false, "reason": "Both mention `trust_remote_code`, but one is missing fields in `AutoConfig` output and the other is a v5 regression from removing `is_torch_fx_available`. Different underlying problems." }, { "left": "issue:43653", "right": "issue:43927", "accept": false, "reason": "BigBird tokenizer special-token registration and DiaConfig custom token-ID persistence are unrelated bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 51, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43452", "issue:43504", "issue:43531", "issue:43550", "issue:43577", "issue:43697", "issue:43720", "issue:43742", "issue:43927", "issue:44514", "issue:44589", "issue:44821", "issue:45042", "issue:45081", "issue:45216", "issue:45290", "issue:45381" ], "soft_pairs": [ "issue:43742|issue:44589", "issue:43550|issue:43927", "issue:43531|issue:44821", "issue:44514|issue:45290", "issue:43452|issue:43577", "issue:43504|issue:43720", "issue:44514|issue:45381", "issue:45042|issue:45081", "issue:43697|issue:45216" ], "prepared_review_unit_hash": "ef94999bbd46061cb195a4fd3322520838174b7df3baa46765883a448987c2fb", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12718, "estimated_input_tokens": 3180, "estimated_eval_tokens": 6616 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: these issues span unrelated models, processors, tokenizer loading, save/load, dtype, and chat-template bugs. I do not see any soft-edge pair that is the same underlying defect, so no merges are justified.", "confidence": 0.95, "canonical_issue_reason": "Issue 45081 is the strongest representative candidate: it is a detailed, concrete bug report with multiple inbound references and an active repro path. Even so, the cluster does not form a true duplicate set.", "canonical_pr_reason": null, "best_issue_reason": "Issue 45081 is the most actionable overall because it has a specific failure mode, clear trigger (`fix_mistral_regex=True`), and ongoing discussion, making it the best single issue to keep as the representative record.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43742", "right": "issue:44589", "accept": false, "reason": "Both are model-loading failures, but one is a key-mismatch on MobileLLM while the other is a storage deserialization error for float8; different root causes and code paths." }, { "left": "issue:43550", "right": "issue:43927", "accept": false, "reason": "These concern different models and different bugs: torch.compile/SDPA behavior in Bamba versus save/load of custom token IDs in DiaConfig." }, { "left": "issue:43531", "right": "issue:44821", "accept": false, "reason": "One is a Qwen3-MoE sliding_window behavior bug; the other is URL loading for AutoImageProcessor. No shared underlying defect." }, { "left": "issue:44514", "right": "issue:45290", "accept": false, "reason": "Both involve `apply_chat_template`, but the triggers differ materially: batched padding=False crash versus assistant tool-call messages with no content." }, { "left": "issue:43452", "right": "issue:43577", "accept": false, "reason": "These are unrelated loading/dtype issues: gguf tokenizer/model loading versus Blip2 dtype propagation for model and qformer." }, { "left": "issue:43504", "right": "issue:43720", "accept": false, "reason": "Different models, different failure mechanisms: legacy field handling in BEiT versus packed-weight unpacking during accelerate loading for BitNet." }, { "left": "issue:44514", "right": "issue:45381", "accept": false, "reason": "Same model family, but different subsystems: chat-template batching crash versus incorrect video vision_position_ids." }, { "left": "issue:45042", "right": "issue:45081", "accept": false, "reason": "These are separate regressions in different areas: PIL image processor torchvision dependency versus Mistral tokenizer regex patching." }, { "left": "issue:43697", "right": "issue:45216", "accept": false, "reason": "Both are regression-style issues, but they affect different models and different operations: RTDetrV2 inference outputs versus Qwen3.5 save_pretrained checkpoint correctness." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 52, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41762", "issue:43295", "issue:43452", "issue:43504", "issue:43550", "issue:43606", "issue:43653", "issue:43688", "issue:44568", "issue:44610", "issue:44625", "issue:44843", "issue:44877", "issue:44898", "issue:44936", "issue:45356", "issue:45440" ], "soft_pairs": [ "issue:43688|issue:45440", "issue:43452|issue:44843", "issue:43504|issue:43550", "issue:41762|issue:44936", "issue:43606|issue:44898", "issue:43295|issue:45356", "issue:43504|issue:44610", "issue:44625|issue:44877", "issue:43653|issue:44568" ], "prepared_review_unit_hash": "3dd17bcec92131698257626f1520d2f10182d002330304960b9dfd346967b980", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12768, "estimated_input_tokens": 3192, "estimated_eval_tokens": 6640 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated model- or tokenizer-specific bugs. The soft pairs share broad themes (tokenizers, config loading, model divergence, runtime failures) but not the same concrete underlying code path, so none should be merged as duplicates.", "confidence": 0.87, "canonical_issue_reason": "Issue 44843 is the strongest representative because it has the most discussion and an inbound reference, and it describes a concrete regression in a common tokenizer loading path. The other issues are more isolated model-specific failures or different bug classes.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44843 is the best single issue to anchor the cluster: it is actionable, regression-focused, and tied to a broadly used API path. The rest do not form a true duplicate set around it.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43688", "right": "issue:45440", "accept": false, "reason": "Both concern MoE-related model behavior, but one is about auxiliary loss normalization and the other about a native-vs-remote implementation divergence. Different bug mechanisms and not mergeable as one fix." }, { "left": "issue:43452", "right": "issue:44843", "accept": false, "reason": "Both involve tokenizer/model loading, but 43452 is about gguf_file breaking from_pretrained while 44843 is an unconditional model_info call in _patch_mistral_regex and offline mode. Different code paths." }, { "left": "issue:43504", "right": "issue:43550", "accept": false, "reason": "These are unrelated: one is a legacy-field load failure in BEiT semantic segmentation, the other is a torch.compile + SDPA failure in Bamba-9B-v2." }, { "left": "issue:41762", "right": "issue:44936", "accept": false, "reason": "Both are runtime failures, but Gemma3 ZeRO-3 loading IndexError and trainer.evaluate() failing after train() are distinct failures in different subsystems." }, { "left": "issue:43606", "right": "issue:44898", "accept": false, "reason": "A CPU offload device mismatch in Bark-small is not the same bug as Perceiver failing on non-default resolution despite interpolate_pos_encoding=True." }, { "left": "issue:43295", "right": "issue:45356", "accept": false, "reason": "Both are tokenizer regressions, but 43295 is about processor.tokenizer/image handling in custom code, while 45356 is codec handling and a misleading warning for Kimi-K2.5. Different fixes." }, { "left": "issue:43504", "right": "issue:44610", "accept": false, "reason": "BEiT legacy-field loading and OmDet-Turbo processor/model resolution mismatch are separate model-specific issues." }, { "left": "issue:44625", "right": "issue:44877", "accept": false, "reason": "Qwen3.5 num_labels propagation and granite_speech strict-config loading are different config-validation bugs with different symptoms and fixes." }, { "left": "issue:43653", "right": "issue:44568", "accept": false, "reason": "Both concern tokenizer special tokens, but BigBird mask-token registration and mDeBERTa add_special_tokens BOS/EOS handling are different model-specific defects." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 53, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:40444", "issue:41720", "issue:43525", "issue:43577", "issue:43606", "issue:43643", "issue:43697", "issue:43720", "issue:43819", "issue:43906", "issue:43927", "issue:44464", "issue:44466", "issue:44743", "issue:44811", "issue:44843", "issue:44991", "issue:45005" ], "soft_pairs": [ "issue:44464|issue:44743", "issue:41720|issue:43906", "issue:43720|issue:43927", "issue:43606|issue:43927", "issue:44466|issue:45005", "issue:43525|issue:43643", "issue:43819|issue:44811", "issue:44843|issue:44991", "issue:40444|issue:44811", "issue:43577|issue:43697" ], "prepared_review_unit_hash": "b03d4d6c9e2fb00362c7488c0be381721a69db4607b6120c31a56f016e3de911", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13647, "estimated_input_tokens": 3412, "estimated_eval_tokens": 7080 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a grab bag of unrelated issues. A few pairs are thematically close (tied weights, tokenizer loading, config fields), but none look like the same concrete bug/change strongly enough to merge.", "confidence": 0.92, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "No single issue cleanly represents the cluster because the items span several unrelated bugs; if forced, the most discussion-heavy issue is the broadest candidate, but it is not a true cluster canonical.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44464", "right": "issue:44743", "accept": false, "reason": "Both concern generation behavior, but the failure modes differ: chunked generation with compiled forward vs recurrent state reset with cache/seq_len>1." }, { "left": "issue:41720", "right": "issue:43906", "accept": false, "reason": "The second item is only an isolated reproduction reference; there is no clear evidence it is the same Qwen3 auto-device-map cudaErrorAssert bug." }, { "left": "issue:43720", "right": "issue:43927", "accept": false, "reason": "Different code paths and symptoms: BitNet packed-weight loading vs DiaConfig losing custom token IDs after save/load." }, { "left": "issue:43606", "right": "issue:43927", "accept": false, "reason": "CPU-offload device mismatch in bark-small is unrelated to DiaConfig token ID loss and generation IndexError." }, { "left": "issue:44466", "right": "issue:45005", "accept": false, "reason": "Both mention tied weights in v5, but one is a device-dependent serialization bug and the other is a broader translation-model tied-weights issue; not the same concrete bug." }, { "left": "issue:43525", "right": "issue:43643", "accept": false, "reason": "Both involve config fields, but one is a missing pad_token_id on Llama4Config while the other is AutoConfig with trust_remote_code returning incomplete fields." }, { "left": "issue:43819", "right": "issue:44811", "accept": false, "reason": "Unrelated failures: DAC latent reconstruction mismatch vs Whisper batch_decode ignoring skip_special_tokens." }, { "left": "issue:44843", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer-loading related, but one is an offline/model_info regression in _patch_mistral_regex and the other is a model-specific tokenizer load failure; not clearly the same bug." }, { "left": "issue:40444", "right": "issue:44811", "accept": false, "reason": "Multimodal finetuning with multiple images per prompt is unrelated to Whisper processor batch_decode behavior." }, { "left": "issue:43577", "right": "issue:43697", "accept": false, "reason": "Different subsystems and symptoms: BLIP2 dtype loading remains float32 vs RTDetrV2 output differences in v5." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 54, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:24643", "issue:28282", "issue:29127", "issue:30064", "issue:32090", "issue:33357", "issue:33666", "issue:38175", "issue:43122", "issue:43295", "issue:43720", "issue:43906", "issue:44361", "issue:44479", "issue:44936", "issue:44977", "issue:45356" ], "soft_pairs": [ "issue:43720|issue:44361", "issue:24643|issue:29127", "issue:38175|issue:43295", "issue:44977|issue:45356", "issue:44479|issue:44936", "issue:43122|issue:43906", "issue:30064|issue:33666", "issue:28282|issue:33357", "issue:28282|issue:32090" ], "prepared_review_unit_hash": "34c8ef6df6450c6888799b3ba9c124db01185419baae5dac2d3b63a05970a06d", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12582, "estimated_input_tokens": 3146, "estimated_eval_tokens": 6548 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly unrelated issues across different subsystems (training, tokenization, vision processing, environment/import, and model-specific regressions). None of the soft pairs look like the same underlying bug or change, so all soft edges should be rejected.", "confidence": 0.95, "canonical_issue_reason": "No single canonical issue stands out: the items cover distinct failure modes and models, with no strong duplicate cluster among them.", "canonical_pr_reason": null, "best_issue_reason": "There is no global best issue for deduplication because the set is not a coherent duplicate group; each item appears to represent a separate problem.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43720", "right": "issue:44361", "accept": false, "reason": "Different bugs: BitNet accelerate-loading/unpacked weights vs MLukeTokenizer task-time AttributeError. No shared concrete code path." }, { "left": "issue:24643", "right": "issue:29127", "accept": false, "reason": "DeepSpeed training weight-shape error vs LayoutLMv3 box validation/error-message clarity. Related only by being training/model issues, not the same bug." }, { "left": "issue:38175", "right": "issue:43295", "accept": false, "reason": "SigLIP2 zero probabilities vs processor.tokenizer regression with images/custom model code. Different symptoms and likely different code paths." }, { "left": "issue:44977", "right": "issue:45356", "accept": false, "reason": "Qwen3.5 flash-attention generation bug vs Kimi-K2.5 tokenizer codec/regression warning. Different models and failure mechanisms." }, { "left": "issue:44479", "right": "issue:44936", "accept": false, "reason": "Video-input regression for Qwen VL models vs trainer.evaluate after train. Unrelated areas and no shared underlying defect." }, { "left": "issue:43122", "right": "issue:43906", "accept": false, "reason": "Tokenization regression across versions vs isolated reproduction of another issue; the titles are too generic to \u092a\u0941\u0937\u094d\u091f\u093f same bug, and no concrete overlap is shown." }, { "left": "issue:30064", "right": "issue:33666", "accept": false, "reason": "Void segmentation map processing vs Qwen2-VL multi-GPU training. Different functionality and likely different components." }, { "left": "issue:28282", "right": "issue:33357", "accept": false, "reason": "Missing PyTorch import error vs MacOS bus error with community CLIP model. Environment/import failure is not the same as runtime bus error." }, { "left": "issue:28282", "right": "issue:32090", "accept": false, "reason": "Missing PyTorch import error vs NoneType broadcast error in Trainer. Separate setup/runtime issues with no shared code path." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 55, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:28282", "issue:35141", "issue:43329", "issue:43452", "issue:43504", "issue:43531", "issue:43577", "issue:43606", "issue:43653", "issue:43742", "issue:44451", "issue:44464", "issue:44554", "issue:44849", "issue:44977", "issue:45325", "issue:45357" ], "soft_pairs": [ "issue:43504|issue:43606", "issue:28282|issue:35141", "issue:44464|issue:44977", "issue:43577|issue:44554", "issue:43531|issue:43742", "issue:43329|issue:45325", "issue:43504|issue:43653", "issue:43452|issue:44451", "issue:44849|issue:45357" ], "prepared_review_unit_hash": "d3ebdeefd14cae4b21fa78a988c41449c2e9c0216fa1e3db491ec82913aff91e", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12734, "estimated_input_tokens": 3184, "estimated_eval_tokens": 6624 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a grab bag of unrelated Transformers bugs across different model families and code paths. None of the soft pairs look like true duplicates; they only share broad themes like loading, generation, or attention.", "confidence": 0.92, "canonical_issue_reason": "Issue 43504 is the most concrete, self-contained model-loading bug in the set and has a clear failure mode tied to a specific class. The rest are separate bugs in different subsystems or model families.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43504 is a strong representative because it describes a specific reproducible load failure and is easier to unify than the broader or more model-specific reports. However, the cluster does not appear to contain a true duplicate group overall.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43504", "right": "issue:43606", "accept": false, "reason": "Different problems: Beit pretrained loading with a legacy field vs. Bark CPU offload device mismatch. Same general area of model loading, but not the same bug." }, { "left": "issue:28282", "right": "issue:35141", "accept": false, "reason": "One is an ImportError for missing PyTorch; the other is a weight reinitialization bug after resizing embeddings. No shared code-path problem." }, { "left": "issue:44464", "right": "issue:44977", "accept": false, "reason": "Both involve generation correctness, but one is about compiled forward chunked generation and the other about Qwen3.5 flash-attention behavior. Too different to be duplicates." }, { "left": "issue:43577", "right": "issue:44554", "accept": false, "reason": "Blip2 dtype propagation on load is unrelated to an MPS attention correctness issue when value head dim differs from query." }, { "left": "issue:43531", "right": "issue:43742", "accept": false, "reason": "Qwen3-MoE sliding_window behavior and MobileLLM key errors are different model-specific loading/runtime issues." }, { "left": "issue:43329", "right": "issue:45325", "accept": false, "reason": "Video multimodal token counting undefined names vs. Qwen2.5-VL rope index scaling for still images. Different code paths and symptoms." }, { "left": "issue:43504", "right": "issue:43653", "accept": false, "reason": "Beit legacy-field loading failure is unrelated to BigBirdTokenizer mask token special-token registration/empty decode output." }, { "left": "issue:43452", "right": "issue:44451", "accept": false, "reason": "Both are from_pretrained-related, but one is gguf_file breaking tokenizer/model loading and the other is a specific ScandiBERT load regression. Not the same underlying bug." }, { "left": "issue:44849", "right": "issue:45357", "accept": false, "reason": "Qwen3.5 output_hidden_states bug and incorrect visual encoder keys on save_pretrained are distinct regressions affecting different parts of the model lifecycle." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 56, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:32090", "issue:33666", "issue:41720", "issue:43012", "issue:43334", "issue:43540", "issue:43572", "issue:43577", "issue:43716", "issue:43742", "issue:43927", "issue:44361", "issue:44464", "issue:44479", "issue:44625", "issue:44821", "issue:45200", "issue:45229" ], "soft_pairs": [ "issue:43927|issue:44361", "issue:43012|issue:43577", "issue:45200|issue:45229", "issue:44479|issue:44625", "issue:41720|issue:44464", "issue:43540|issue:43716", "issue:43334|issue:43572", "issue:32090|issue:33666", "issue:43742|issue:44821" ], "prepared_review_unit_hash": "5093722a773347929b40d09f53953c4a45b27d251a2f1448e2aee57aef972655", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13043, "estimated_input_tokens": 3261, "estimated_eval_tokens": 6778 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the soft pairs share only broad surface themes (token/config errors, dtype issues, video/image/model-loading bugs) but do not appear to be the same underlying defect. I would reject all soft edges and avoid collapsing these issues into one canonical duplicate.", "confidence": 0.95, "canonical_issue_reason": "No safe canonical issue: the cluster mixes unrelated bugs across different models and code paths, so none of the issues can represent the whole set as a duplicate target.", "canonical_pr_reason": null, "best_issue_reason": "No issue is a strong global representative; the items are too diverse. If forced to anchor one, pick a concrete self-contained bug rather than a broad symptom, but it would not subsume the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43927", "right": "issue:44361", "accept": false, "reason": "Both are error reports around missing config/tokenizer state, but they affect different models and different failure points; not the same bug." }, { "left": "issue:43012", "right": "issue:43577", "accept": false, "reason": "Both mention dtype/precision, but one is a PyTorch warning during compile while the other is a load-time dtype propagation bug in Blip2; different code paths and fixes." }, { "left": "issue:45200", "right": "issue:45229", "accept": false, "reason": "One is about missing mm_token_type_ids defaults for text-only fine-tuning; the other is multi-GPU inference OOM. Same model family, different problems." }, { "left": "issue:44479", "right": "issue:44625", "accept": false, "reason": "Video-input regression across Qwen VL variants is unrelated to num_labels propagation into text config." }, { "left": "issue:41720", "right": "issue:44464", "accept": false, "reason": "CUDA assert with auto device mapping is a different failure mode from inconsistent chunked generation under compiled forward." }, { "left": "issue:43540", "right": "issue:43716", "accept": false, "reason": "Video-input ValueError in Qwen3OmniMoe and Mistral-3 image preprocessor dtype mismatch are distinct model-specific issues." }, { "left": "issue:43334", "right": "issue:43572", "accept": false, "reason": "Both are missing pad token field regressions, but they concern different config classes/models and are not clearly the same underlying defect." }, { "left": "issue:32090", "right": "issue:33666", "accept": false, "reason": "Trainer _gpu_broadcast_one NoneType error and Qwen2-VL multi-GPU training are too broad to treat as the same concrete bug." }, { "left": "issue:43742", "right": "issue:44821", "accept": false, "reason": "MobileLLM key error and AutoImageProcessor URL loading failure are different loading problems with different surfaces and likely different fixes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 57, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:32090", "issue:33666", "issue:34567", "issue:36010", "issue:43262", "issue:43278", "issue:43493", "issue:43525", "issue:43572", "issue:43688", "issue:43756", "issue:43825", "issue:43844", "issue:43873", "issue:44488", "issue:44843", "issue:44913", "issue:45084" ], "soft_pairs": [ "issue:32090|issue:34567", "issue:43525|issue:43572", "issue:43756|issue:44913", "issue:43278|issue:43688", "issue:43493|issue:43825", "issue:43262|issue:45084", "issue:43688|issue:43873", "issue:44488|issue:44843", "issue:33666|issue:36010", "issue:43278|issue:43844" ], "prepared_review_unit_hash": "6bab5ebc1ef30fb6c17c409fd7c5bb2be821dc748984900605e60975b0daf84d", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13404, "estimated_input_tokens": 3351, "estimated_eval_tokens": 6958 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated and only share superficial wording around training, configs, or model-loading. None of the soft pairs look like true duplicates of the same concrete bug or change.", "confidence": 0.95, "canonical_issue_reason": "No clear canonical issue: the set spans distinct bugs across trainer internals, model config fields, tokenizer/offline behavior, audio chat templates, quantization/offloading, and model-specific architecture regressions.", "canonical_pr_reason": null, "best_issue_reason": "No single issue is a good global representative here; the strongest theme is broad Hugging Face model/runtime regressions, but the items are too diverse to collapse into one issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:32090", "right": "issue:34567", "accept": false, "reason": "Different bugs: one is a Trainer GPU broadcast TypeError, the other is TrainerState token-count tracking not updating." }, { "left": "issue:43525", "right": "issue:43572", "accept": false, "reason": "Both involve missing config fields, but they affect different models and different attributes (`pad_token_id` vs `pad_token_idx`), so this is not the same concrete bug." }, { "left": "issue:43756", "right": "issue:44913", "accept": false, "reason": "Both touch rotary settings, but one is a Smollm3 RoPE-layer mismatch and the other is GPTNeoX config reload behavior; distinct failure modes and code paths." }, { "left": "issue:43278", "right": "issue:43688", "accept": false, "reason": "Unrelated: dtype drift in evaluation vs. auxiliary-loss normalization in MoE models." }, { "left": "issue:43493", "right": "issue:43825", "accept": false, "reason": "Completely different areas: SigLIP2 implementation discrepancy vs. an incorrect pipeline error message about translation support." }, { "left": "issue:43262", "right": "issue:45084", "accept": false, "reason": "Different subsystems and symptoms: audio chat-template sampling-rate defaulting vs. a template compilation TypeError." }, { "left": "issue:43688", "right": "issue:43873", "accept": false, "reason": "Different problems: MoE auxiliary-loss normalization vs. offloading behavior with quantization." }, { "left": "issue:44488", "right": "issue:44843", "accept": false, "reason": "Both relate to model loading, but one is a specific model failing to load and the other is an offline-mode break in `_patch_mistral_regex`; not the same bug." }, { "left": "issue:33666", "right": "issue:36010", "accept": false, "reason": "Different issues: Qwen2-VL multi-GPU training vs. an ImportError for `GenerationMixin`." }, { "left": "issue:43278", "right": "issue:43844", "accept": false, "reason": "Both concern training behavior, but one is BF16/FP32 dtype drift and the other is gradient growth under DeepSpeed ZeRO-3; no concrete overlap." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 58, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33666", "issue:36331", "issue:39692", "issue:43257", "issue:43299", "issue:43493", "issue:43540", "issue:43756", "issue:43844", "issue:44060", "issue:44423", "issue:44734", "issue:44964", "issue:44977", "issue:45072", "issue:45198", "issue:45405" ], "soft_pairs": [ "issue:43756|issue:44060", "issue:43257|issue:43299", "issue:43540|issue:44977", "issue:45072|issue:45198", "issue:39692|issue:43493", "issue:43493|issue:43844", "issue:33666|issue:36331", "issue:44964|issue:45405", "issue:44423|issue:44734" ], "prepared_review_unit_hash": "9afcfe82faa95b2a4a3ad52288b2a9112ac562b6d1cd32211165d3d1422bcc97", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12680, "estimated_input_tokens": 3170, "estimated_eval_tokens": 6596 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a grab-bag of unrelated Transformers issues across different models and subsystems. None of the soft pairs look like the same underlying bug or change, so no duplicates should be merged here.", "confidence": 0.96, "canonical_issue_reason": "No single issue is canonical: the set spans distinct model families, subsystems, and failure modes rather than one shared regression.", "canonical_pr_reason": "No PRs are present in the cluster.", "best_issue_reason": "issue:43299 is the strongest standalone issue because it is a concrete, version-specific loading regression with a clear model scope and actionable symptom.", "best_pr_reason": "No PRs are present in the cluster.", "soft_edge_verdicts": [ { "left": "issue:43756", "right": "issue:44060", "accept": false, "reason": "Different models and failures: Smollm3 RoPE-layer selection vs Qwen3-Next tied-weights warning. Not the same bug." }, { "left": "issue:43257", "right": "issue:43299", "accept": false, "reason": "Both mention Qwen3/MoE loading, but one is an accelerate+deepspeed conversion issue and the other is a transformers version regression for Qwen3VL. Different concrete code paths." }, { "left": "issue:43540", "right": "issue:44977", "accept": false, "reason": "Qwen3OmniMoe video processing error vs Qwen3.5 flash-attention generation issue. Same broad family only, not the same defect." }, { "left": "issue:45072", "right": "issue:45198", "accept": false, "reason": "Unrelated model families and symptoms: dtype mismatch in inference vs Wav2Vec2 save/tokenization failure." }, { "left": "issue:39692", "right": "issue:43493", "accept": false, "reason": "Both are SigLIP2-related, but one is a broken documentation example and the other is an implementation discrepancy with JAX. Separate issues." }, { "left": "issue:43493", "right": "issue:43844", "accept": false, "reason": "SigLIP2 implementation mismatch vs DeepSpeed ZeRO-3 gradient growth during random-init training. Different subsystems and failure modes." }, { "left": "issue:33666", "right": "issue:36331", "accept": false, "reason": "Multi-GPU training request/issue for Qwen2-VL is not the same as the Trainer API TypeError about num_items_in_batch." }, { "left": "issue:44964", "right": "issue:45405", "accept": false, "reason": "Phi-4 multimodal loading regression vs PEFT minimum-version bump. No shared underlying bug." }, { "left": "issue:44423", "right": "issue:44734", "accept": false, "reason": "Both are serving-related, but one crashes on multimodal continuous batching and the other on KV-cache continuation indexing. Different code paths." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 59, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33357", "issue:33666", "issue:39692", "issue:43295", "issue:43329", "issue:43572", "issue:43606", "issue:43646", "issue:43688", "issue:43746", "issue:43824", "issue:43994", "issue:44291", "issue:44466", "issue:44493", "issue:44521", "issue:44589", "issue:45290" ], "soft_pairs": [ "issue:43824|issue:44589", "issue:43646|issue:44466", "issue:43572|issue:44291", "issue:43688|issue:43746", "issue:43994|issue:44493", "issue:44521|issue:45290", "issue:43329|issue:43606", "issue:33357|issue:33666", "issue:39692|issue:43295" ], "prepared_review_unit_hash": "57ad5eaab264002492fad135fe9c6d35a1938e2e3ab1bc31e49b5975468a8edc", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13350, "estimated_input_tokens": 3338, "estimated_eval_tokens": 6932 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated regression/bug reports spanning multimodal chat templating, v5 initialization, serialization, and model-specific bugs. No soft pair is close enough to merge as the same underlying fix.", "confidence": 0.87, "canonical_issue_reason": "issue:43824 is the most central/high-signal item here, with the most discussion and inbound references, but it should be treated as an anchor issue rather than a true duplicate cluster representative.", "canonical_pr_reason": null, "best_issue_reason": "issue:43824 is the best standalone issue in this set because it is clearly described, has broad impact, and has the strongest evidence of community traction.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43824", "right": "issue:44589", "accept": false, "reason": "Both mention model/runtime failures, but one is an import/export problem for Qwen2.5-VL while the other is a storage-type error for Float8; different code paths and fixes." }, { "left": "issue:43646", "right": "issue:44466", "accept": false, "reason": "Both are v5-related regressions, but one breaks custom model initialization and the other changes lm_head.weight serialization behavior; not the same bug." }, { "left": "issue:43572", "right": "issue:44291", "accept": false, "reason": "Both involve init/config compatibility in v5, but one is a missing config field and the other is an unexpected constructor argument from init_empty_weights; distinct failures." }, { "left": "issue:43688", "right": "issue:43746", "accept": false, "reason": "One is an auxiliary-loss normalization bug in MoE models, the other is a PEFT/local-checkpoint loading issue for GraniteSpeech; unrelated." }, { "left": "issue:43994", "right": "issue:44493", "accept": false, "reason": "Both affect model outputs, but the first is a SigLIP2 AutoModel/pipeline misbehavior and the second is a position-id key warning across many models; too different to unify." }, { "left": "issue:44521", "right": "issue:45290", "accept": false, "reason": "Both are about apply_chat_template, but one is all-zero assistant masks for multimodal inputs while the other is a crash on tool-call messages with no content; different trigger and symptom." }, { "left": "issue:43329", "right": "issue:43606", "accept": false, "reason": "Different subsystems and failure modes: multimodal token counting/video branch bug versus CPU-offload device mismatch in Bark." }, { "left": "issue:33357", "right": "issue:33666", "accept": false, "reason": "One is a MacOS bus error with a community CLIP model; the other is a Qwen2-VL multi-GPU training request. No shared bug." }, { "left": "issue:39692", "right": "issue:43295", "accept": false, "reason": "Both mention multimodal processor/model API friction, but one is a documentation-example mismatch plus quantization failure and the other is a regression around processor.tokenizer access; not the same concrete issue." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 60, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:15354", "issue:38175", "issue:39401", "issue:43232", "issue:43257", "issue:43278", "issue:43540", "issue:43575", "issue:43701", "issue:44315", "issue:44554", "issue:44568", "issue:44792", "issue:44849", "issue:45200", "issue:45405", "issue:45446" ], "soft_pairs": [ "issue:43232|issue:44849", "issue:43575|issue:45405", "issue:39401|issue:44568", "issue:43257|issue:45405", "issue:44792|issue:45405", "issue:43540|issue:45200", "issue:15354|issue:38175", "issue:43278|issue:43701", "issue:44554|issue:45446", "issue:44315|issue:44849" ], "prepared_review_unit_hash": "63a8cfb326b2784333358f61c73c5485b4c6a977a172e3bf24e93add7d7d65e0", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12757, "estimated_input_tokens": 3190, "estimated_eval_tokens": 6636 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is not a duplicate set; it mixes unrelated bugs across different models, tokenizers, generation paths, and environment/version checks. No soft pair looks like the same underlying issue, and there are no PRs to consider.", "confidence": 0.98, "canonical_issue_reason": "No single issue is a clear canonical representative because the items are largely unrelated and span different products/subsystems with distinct failure modes.", "canonical_pr_reason": null, "best_issue_reason": "No best issue candidate: the cluster does not form a coherent duplicate group, so choosing any one issue would be arbitrary.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43232", "right": "issue:44849", "accept": false, "reason": "Different bugs: one is generation kwargs handling after sync_gpus, the other is a Qwen3.5 output_hidden_states issue." }, { "left": "issue:43575", "right": "issue:45405", "accept": false, "reason": "Qwen2 tensor-parallel OOM and a PEFT version bump are unrelated." }, { "left": "issue:39401", "right": "issue:44568", "accept": false, "reason": "Tokenizer offset_mapping mismatch is a different problem from missing BOS/EOS with add_special_tokens." }, { "left": "issue:43257", "right": "issue:45405", "accept": false, "reason": "MoE weight conversion under accelerate+deepspeed is unrelated to an unreleased PEFT minimum version." }, { "left": "issue:44792", "right": "issue:45405", "accept": false, "reason": "A janus image-generation test failure does not match a dependency version bump issue." }, { "left": "issue:43540", "right": "issue:45200", "accept": false, "reason": "Video-input validation in Qwen3OmniMoe and Gemma 4 mm_token_type_ids defaults are different code paths and symptoms." }, { "left": "issue:15354", "right": "issue:38175", "accept": false, "reason": "TorchScript GeneratorExp export failure and zero probabilities in siglip2 are unrelated model behaviors." }, { "left": "issue:43278", "right": "issue:43701", "accept": false, "reason": "Embedding dtype drift in evaluate is distinct from a resume_from_checkpoint key mismatch." }, { "left": "issue:44554", "right": "issue:45446", "accept": false, "reason": "MPS attention correctness with mismatched head dims is unrelated to a PyTorch version guard for AuxRequest import." }, { "left": "issue:44315", "right": "issue:44849", "accept": false, "reason": "Liger Kernel not applied with model_init and Qwen3.5 hidden_states handling are separate bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 61, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:37428", "issue:43334", "issue:43525", "issue:43582", "issue:43606", "issue:43844", "issue:43957", "issue:44112", "issue:44442", "issue:44466", "issue:44554", "issue:44964", "issue:44977", "issue:45127", "issue:45198", "issue:45200", "issue:45325", "issue:45356" ], "soft_pairs": [ "issue:43582|issue:44977", "issue:43844|issue:44964", "issue:37428|issue:43957", "issue:43606|issue:44112", "issue:45200|issue:45325", "issue:44554|issue:45356", "issue:43334|issue:43525", "issue:44442|issue:45198", "issue:44466|issue:45127" ], "prepared_review_unit_hash": "432c26896363e823d2845c029ce74dddb01e25fb818e3cc43bbb091f184b4143", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13448, "estimated_input_tokens": 3362, "estimated_eval_tokens": 6980 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This set is mostly a grab-bag of unrelated Transformers bugs across model loading, tokenizer/config regressions, device/offload issues, and attention/serialization problems. I don\u2019t see any pair that is confidently the same underlying bug or change from the available titles alone.", "confidence": 0.22, "canonical_issue_reason": "`issue:43957` is the broadest and most representative report: a concrete Transformers v5 model-loading regression with multiple affected models and a clear shared code path (`torch.device(\"meta\")` loading).", "canonical_pr_reason": null, "best_issue_reason": "`issue:43957` has the best combination of scope and specificity among this set: it names the failure mode, version range, and multiple impacted models, making it the strongest standalone bug report here.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43582", "right": "issue:44977", "accept": false, "reason": "Different failures: Apple Silicon allocator warmup `TypeError` vs Qwen3.5 flash-attention generation bug." }, { "left": "issue:43844", "right": "issue:44964", "accept": false, "reason": "Unrelated problems: ZeRO-3 gradient instability during training vs a model-loading failure for Phi-4 multimodal." }, { "left": "issue:37428", "right": "issue:43957", "accept": false, "reason": "Both involve loading-related regressions, but one is an import error for a flash-attention helper while the other is a meta-device loading bug; no clear same root cause." }, { "left": "issue:43606", "right": "issue:44112", "accept": false, "reason": "Both mention device mismatches, but the affected models and concrete failure modes differ: CPU offload on bark-small vs a stale GraniteSpeech CI test." }, { "left": "issue:45200", "right": "issue:45325", "accept": false, "reason": "Both are multimodal-related, but they describe different bugs in different model families and different mechanisms (`mm_token_type_ids` defaulting vs rope index scaling)." }, { "left": "issue:44554", "right": "issue:45356", "accept": false, "reason": "Completely different areas: MPS attention correctness vs tokenizer codec/regex regression." }, { "left": "issue:43334", "right": "issue:43525", "accept": false, "reason": "Same symptom (`pad_token_id` missing), but different model configs and no evidence they share the exact same underlying change or fix." }, { "left": "issue:44442", "right": "issue:45198", "accept": false, "reason": "Different tokenizer-related failures in different model families; not enough to treat as one bug." }, { "left": "issue:44466", "right": "issue:45127", "accept": false, "reason": "Serialization of tied weights depending on device is unrelated to LoRA merging with extended vocabulary and model collapse." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 62, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:37428", "issue:40990", "issue:43295", "issue:43388", "issue:43493", "issue:43526", "issue:43572", "issue:43701", "issue:43824", "issue:43844", "issue:43873", "issue:44464", "issue:44589", "issue:44610", "issue:44625", "issue:44898", "issue:44936", "issue:45412" ], "soft_pairs": [ "issue:44898|issue:45412", "issue:43388|issue:43526", "issue:37428|issue:43824", "issue:43295|issue:44610", "issue:40990|issue:44625", "issue:43493|issue:43701", "issue:43873|issue:44464", "issue:43844|issue:44936", "issue:43572|issue:44589" ], "prepared_review_unit_hash": "72413086539acc69bcc9314454f3dd8151a515a575e43b6c5f136c5fef7776cc", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13205, "estimated_input_tokens": 3302, "estimated_eval_tokens": 6860 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a loose collection of unrelated issue reports across different models and subsystems. The soft pairs are only superficially similar by wording; none look like the same underlying bug or change.", "confidence": 0.96, "canonical_issue_reason": "No canonical issue: the items describe distinct failures, regressions, and model-specific behaviors rather than one shared bug.", "canonical_pr_reason": null, "best_issue_reason": "No single issue is a good global representative because the cluster is heterogeneous and not a duplicate set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44898", "right": "issue:45412", "accept": false, "reason": "Different problems: Perceiver non-default image size interpolation vs RT-DETR memory not being freed. Different model families and failure modes." }, { "left": "issue:43388", "right": "issue:43526", "accept": false, "reason": "Both involve labels, but one is Trainer metric-gathering trimming the last batch and the other is BeitImageProcessorFast reduce_labels returning only one label. Different code paths and bugs." }, { "left": "issue:37428", "right": "issue:43824", "accept": false, "reason": "Both are import errors, but for different symbols and modules: flash attention helper vs Qwen2.5-VL class export. Not the same missing export." }, { "left": "issue:43295", "right": "issue:44610", "accept": false, "reason": "Regression in custom processor/tokenizer image passing vs OmDet-Turbo processor/model image-size mismatch. Different subsystems and fixes." }, { "left": "issue:40990", "right": "issue:44625", "accept": false, "reason": "OpenAI GPT-OSS perplexity issue vs Qwen3.5 num_labels propagation. Unrelated model behavior and configuration bugs." }, { "left": "issue:43493", "right": "issue:43701", "accept": false, "reason": "SigLIP2 implementation discrepancy vs resume_from_checkpoint key mismatch. Different functionality and no shared code-path problem." }, { "left": "issue:43873", "right": "issue:44464", "accept": false, "reason": "Quantization/offloading behavior vs chunked generation with compiled forward. Both mention runtime behavior, but they are distinct mechanisms and bugs." }, { "left": "issue:43844", "right": "issue:44936", "accept": false, "reason": "ZeRO-3 gradient growth during training vs trainer.evaluate() failing after trainer.train(). Different training issues and root causes." }, { "left": "issue:43572", "right": "issue:44589", "accept": false, "reason": "StableLmConfig missing pad_token_idx after update vs storage type lookup error for Float8_e4m3fnStorage. Different versioning/config and serialization problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 63, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38175", "issue:43572", "issue:43575", "issue:43819", "issue:43824", "issue:43975", "issue:44360", "issue:44464", "issue:44514", "issue:44521", "issue:44561", "issue:44625", "issue:44849", "issue:44871", "issue:44987", "issue:45003", "issue:45083", "issue:45412" ], "soft_pairs": [ "issue:43572|issue:44987", "issue:44625|issue:44871", "issue:44514|issue:44521", "issue:44360|issue:44561", "issue:43824|issue:43975", "issue:38175|issue:45412", "issue:44849|issue:45003", "issue:43819|issue:45083", "issue:43575|issue:44464" ], "prepared_review_unit_hash": "a2e62d57337148b45e35d892fca80c4ed776c9af6905f28bf6e3028293554002", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13024, "estimated_input_tokens": 3256, "estimated_eval_tokens": 6768 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly unrelated issue reports that only share broad Transformers themes. I do not see any pair that clearly describes the same underlying bug or change, so all soft edges should be rejected.", "confidence": 0.94, "canonical_issue_reason": "Issue 44521 is the most representative cluster member: it is open, has multiple references/discussion, and describes a concrete, reproducible multimodal `apply_chat_template` bug.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44521 is the strongest standalone issue because it is active, detailed, and tied to a specific user-facing regression with clear reproduction context.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43572", "right": "issue:44987", "accept": false, "reason": "Different failures: a missing `pad_token_idx` config field versus a model load failure for a specific repo. No shared code path is evident." }, { "left": "issue:44625", "right": "issue:44871", "accept": false, "reason": "One is about Qwen3.5 label propagation, the other about Gemma EOS token configuration. Different models and different config bugs." }, { "left": "issue:44514", "right": "issue:44521", "accept": false, "reason": "Both involve `apply_chat_template`, but one is a batching crash with `padding=False` while the other is zeroed assistant masks. Related area, not the same concrete bug." }, { "left": "issue:44360", "right": "issue:44561", "accept": false, "reason": "Unrelated topics: DSA indexer activation logic versus removal of `is_torch_fx_available` breaking trust-remote-code models." }, { "left": "issue:43824", "right": "issue:43975", "accept": false, "reason": "ImportError for a Qwen2.5-VL class versus DeepSeek detokenization behavior. Different models and different failure modes." }, { "left": "issue:38175", "right": "issue:45412", "accept": false, "reason": "SigLIP probability output issue and RT-DETR memory cleanup issue are unrelated model/runtime bugs." }, { "left": "issue:44849", "right": "issue:45003", "accept": false, "reason": "Qwen3.5 hidden-state output bug versus unsafe `sys.modules` access in `modeling_utils`. No clear shared underlying defect." }, { "left": "issue:43819", "right": "issue:45083", "accept": false, "reason": "DAC latent/forward mismatch is a codec bug; `_get_feat_extract_output_lengths` in qwen3_omni_moe is a separate feature-extraction helper issue." }, { "left": "issue:43575", "right": "issue:44464", "accept": false, "reason": "Tensor-parallel OOM when loading a model versus inconsistent chunked generation with compiled forward. Different symptoms, different likely causes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 64, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38175", "issue:42915", "issue:43278", "issue:43295", "issue:43450", "issue:43493", "issue:43606", "issue:43824", "issue:43827", "issue:43856", "issue:43873", "issue:43901", "issue:43994", "issue:44568", "issue:44610", "issue:44857", "issue:44991", "issue:45083" ], "soft_pairs": [ "issue:42915|issue:43824", "issue:43295|issue:43873", "issue:43278|issue:43856", "issue:43295|issue:43450", "issue:43295|issue:43994", "issue:44568|issue:44991", "issue:43606|issue:45083", "issue:43827|issue:43901", "issue:44610|issue:44857", "issue:38175|issue:43493" ], "prepared_review_unit_hash": "3f064bd4395ba9a0d8b25cc4c05660bcb274b750f920769572b82bb676317847", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13439, "estimated_input_tokens": 3360, "estimated_eval_tokens": 6976 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Mostly a mixed bag of unrelated issues; the only plausible duplicate cluster is the SigLIP2 incorrect-output reports, while the rest are separate bugs or docs regressions.", "confidence": 0.67, "canonical_issue_reason": "issue:43994 is the clearest and broadest SigLIP2 user-facing report, covering incorrect outputs across AutoModel and pipeline usage and likely subsuming the narrower symptom reports.", "canonical_pr_reason": null, "best_issue_reason": "issue:43994 is the strongest representative issue overall because it captures the concrete broken behavior most directly and broadly.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42915", "right": "issue:43824", "accept": false, "reason": "Different models and failures: FineGrainedFP8Config on Qwen3Moe vs a missing Qwen2.5-VL import; not the same bug." }, { "left": "issue:43295", "right": "issue:43873", "accept": false, "reason": "One is a processor.tokenizer regression, the other is quantization/offload behavior; same general area but not one concrete defect." }, { "left": "issue:43278", "right": "issue:43856", "accept": false, "reason": "Both mention dtype/memory, but they describe different Qwen3 MoE problems with no clear shared code-path." }, { "left": "issue:43295", "right": "issue:43450", "accept": false, "reason": "Processor.tokenizer regression and batched video shape bug are distinct processor issues." }, { "left": "issue:43295", "right": "issue:43994", "accept": false, "reason": "Custom model processor API breakage is unrelated to SigLIP2 incorrect outputs." }, { "left": "issue:44568", "right": "issue:44991", "accept": false, "reason": "Both are tokenizer-related, but for different models and different failure modes." }, { "left": "issue:43606", "right": "issue:45083", "accept": false, "reason": "CPU-offload device mismatch and a Qwen3 Omni MoE helper-function issue are unrelated." }, { "left": "issue:43827", "right": "issue:43901", "accept": false, "reason": "Both are docs issues, but they concern different API references and should not be merged as the same defect." }, { "left": "issue:44610", "right": "issue:44857", "accept": false, "reason": "OmDet-Turbo input-size mismatch and LwDetrImageLoss AMP crash are separate model/runtime bugs." }, { "left": "issue:38175", "right": "issue:43493", "accept": true, "reason": "Both point to the same SigLIP2 wrong-output problem; one reports zero probabilities, the other describes a HF-vs-JAX implementation discrepancy." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 65, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:37428", "issue:43425", "issue:43450", "issue:43493", "issue:43653", "issue:44112", "issue:44351", "issue:44448", "issue:44462", "issue:44488", "issue:44561", "issue:44568", "issue:44743", "issue:44779", "issue:45127", "issue:45245", "issue:45290", "issue:45381" ], "soft_pairs": [ "issue:45127|issue:45245", "issue:44462|issue:44561", "issue:44488|issue:44568", "issue:43450|issue:43493", "issue:44561|issue:44779", "issue:37428|issue:43425", "issue:45290|issue:45381", "issue:43653|issue:44112", "issue:37428|issue:44351", "issue:44448|issue:44743" ], "prepared_review_unit_hash": "c5153429bd99c6c30eeb45d503aa2736fee7d626d0ff8850c19634509ac0bf99", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13458, "estimated_input_tokens": 3365, "estimated_eval_tokens": 6986 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a set of soft-similarity false positives: the issues span unrelated tokenizer, video, cache, import, CI, and model-specific regressions. I do not see any pair that should be merged as the same underlying bug.", "confidence": 0.94, "canonical_issue_reason": "Issue 44462 is the cleanest concrete regression in the set (AutoTokenizer/tokenizer.json loading) and the most plausible anchor, but the overall cluster is still heterogeneous.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44462 is the most broadly user-facing and reusable bug report here, but it is not a good duplicate target for the other items.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45127", "right": "issue:45245", "accept": false, "reason": "Extended-vocabulary LoRA collapse is a model-merging/output-quality bug; the 2^24 categories error is a categorical feature-limit RuntimeError. Different failure modes and code paths." }, { "left": "issue:44462", "right": "issue:44561", "accept": false, "reason": "AutoTokenizer ignoring tokenizer.json is a loading-resolution bug; removal of is_torch_fx_available breaks trust_remote_code imports. Different subsystems and symptoms." }, { "left": "issue:44488", "right": "issue:44568", "accept": false, "reason": "Loading cjvt/sleng-bert failing and mdeberta-v3-base not adding BOS/EOS with add_special_tokens=True are both tokenizer issues, but not the same concrete bug." }, { "left": "issue:43450", "right": "issue:43493", "accept": false, "reason": "Batched video processor shape mismatch and SigLIP2 HF/JAX discrepancy concern different components and likely different root causes." }, { "left": "issue:44561", "right": "issue:44779", "accept": false, "reason": "A trust_remote_code import break from a removed helper is unrelated to DeepSeek tokenizer producing incorrect results in v5." }, { "left": "issue:37428", "right": "issue:43425", "accept": false, "reason": "Both mention compatibility/import problems, but one is a missing flash-attention helper import and the other is a Torch 2.10 incompatibility report; not the same bug." }, { "left": "issue:45290", "right": "issue:45381", "accept": false, "reason": "Chat template crashes on assistant tool-call messages, while the Qwen2.5-VL report is about incorrect vision_position_ids for video input. Different paths." }, { "left": "issue:43653", "right": "issue:44112", "accept": false, "reason": "BigBirdTokenizer special-token registration causing empty decode is unrelated to a GraniteSpeech CI device-override test failure." }, { "left": "issue:37428", "right": "issue:44351", "accept": false, "reason": "These are both import errors, but they involve different missing symbols (_flash_supports_window_size vs HybridCache) and different affected code paths." }, { "left": "issue:44448", "right": "issue:44743", "accept": false, "reason": "Pegasus v4/v5 output drift and Qwen3_5 recurrent-state reset with cache are distinct model-specific regressions." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 66, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29942", "issue:32090", "issue:39692", "issue:43262", "issue:43278", "issue:43450", "issue:43526", "issue:43575", "issue:43611", "issue:43819", "issue:43825", "issue:43931", "issue:44112", "issue:44186", "issue:44265", "issue:45198", "issue:45200", "issue:45381" ], "soft_pairs": [ "issue:44186|issue:45198", "issue:43450|issue:43575", "issue:39692|issue:43526", "issue:29942|issue:32090", "issue:43278|issue:43825", "issue:44265|issue:45198", "issue:43819|issue:44112", "issue:43611|issue:43931", "issue:43262|issue:43450", "issue:45200|issue:45381" ], "prepared_review_unit_hash": "6d8c0f28f4c2ce06c468eda6e8041416fd3c725f954e61ed48cdacfe2e8f4332", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13488, "estimated_input_tokens": 3372, "estimated_eval_tokens": 7000 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No duplicate cluster emerges here: the issues span unrelated subsystems and failure modes (tokenization, loading, export, CI, docs, audio/video processors). All soft edges should be rejected.", "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44186", "right": "issue:45198", "accept": false, "reason": "Different models and bugs: LayoutLMv2 NER/padding crash vs Wav2Vec2 save_pretrained/tokenization failure." }, { "left": "issue:43450", "right": "issue:43575", "accept": false, "reason": "Unrelated problems: batched video shape bug vs tensor-parallel OOM when loading a Qwen2 model." }, { "left": "issue:39692", "right": "issue:43526", "accept": false, "reason": "Docs/example mismatch and quantization failure are unrelated to BeitImageProcessorFast reduce_labels behavior." }, { "left": "issue:29942", "right": "issue:32090", "accept": false, "reason": "Flash Attention 2 test failures and Trainer _gpu_broadcast_one NoneType errors are different code paths." }, { "left": "issue:43278", "right": "issue:43825", "accept": false, "reason": "Embedding dtype regression in eval is unrelated to pipeline translation support/error-message behavior." }, { "left": "issue:44265", "right": "issue:45198", "accept": false, "reason": "torch.export/torch_compilable_check failures are a different issue from Wav2Vec2 save/tokenization problems." }, { "left": "issue:43819", "right": "issue:44112", "accept": false, "reason": "DAC from_latents/forward mismatch is unrelated to the GraniteSpeech stale device override CI failure." }, { "left": "issue:43611", "right": "issue:43931", "accept": false, "reason": "Base_model_prefix loading regression and Qwen3-VL weight-shape mismatch are distinct loading bugs." }, { "left": "issue:43262", "right": "issue:43450", "accept": false, "reason": "Audio chat-template sampling-rate default bug is not the same as batched video output-shape bug." }, { "left": "issue:45200", "right": "issue:45381", "accept": false, "reason": "Gemma 4 multimodal token-type-id defaults and Qwen2.5-VL video position-id issues are separate problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 67, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29127", "issue:32090", "issue:38175", "issue:39692", "issue:43335", "issue:43404", "issue:43450", "issue:43526", "issue:43550", "issue:43819", "issue:44448", "issue:44493", "issue:44779", "issue:44855", "issue:44977", "issue:45083", "issue:45200", "issue:45229" ], "soft_pairs": [ "issue:43404|issue:45200", "issue:44855|issue:45083", "issue:43819|issue:44493", "issue:39692|issue:44493", "issue:44448|issue:44779", "issue:38175|issue:44493", "issue:44977|issue:45229", "issue:43450|issue:43526", "issue:43550|issue:45083", "issue:43335|issue:45083", "issue:43335|issue:43550", "issue:29127|issue:32090" ], "prepared_review_unit_hash": "2af89e0499d195ac955af2f3b50d101da436088a633b5c945a18dcd838e6618b", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 12, "serialized_chars": 14160, "estimated_input_tokens": 3540, "estimated_eval_tokens": 7336 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No convincing duplicate cluster here: the items span unrelated models, processors, configs, and runtime failures. All soft edges should be rejected.", "confidence": 0.96, "canonical_issue_reason": "Issue 44493 is the broadest-sounding report, but the set is too heterogeneous to support a real canonical duplicate issue.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44493 is the closest to a cross-model regression umbrella, but it still does not subsume the other reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43404", "right": "issue:45200", "accept": false, "reason": "Different model families and different failure modes: weight tying in Mistral3 vs required multimodal token type ids in Gemma 4." }, { "left": "issue:44855", "right": "issue:45083", "accept": false, "reason": "One is a Python 3.13 import/torch.jit parsing error in DebertaV2; the other is a helper-length calculation bug in qwen3_omni_moe." }, { "left": "issue:43819", "right": "issue:44493", "accept": false, "reason": "DAC forward/latent mismatch is a codec bug, while 44493 is a position-id key regression across many models." }, { "left": "issue:39692", "right": "issue:44493", "accept": false, "reason": "SigLIP2 docs/quantization example issues are documentation and example failures, not the same bug as a position-id mismatch." }, { "left": "issue:44448", "right": "issue:44779", "accept": false, "reason": "Both are version regressions, but one is Pegasus generation output drift and the other is Deepseek tokenizer correctness; different code paths." }, { "left": "issue:38175", "right": "issue:44493", "accept": false, "reason": "Unexpected zero probabilities in SigLIP2 is a model output issue, not the same underlying problem as unexpected position-id keys." }, { "left": "issue:44977", "right": "issue:45229", "accept": false, "reason": "Flash-attention generation failure in Qwen3.5 is unrelated to Gemma 4 multi-GPU CUDA OOM." }, { "left": "issue:43450", "right": "issue:43526", "accept": false, "reason": "Both touch processors, but batched video shape handling and Beit reduce_labels behavior are distinct bugs in different components." }, { "left": "issue:43550", "right": "issue:45083", "accept": false, "reason": "torch.compile SDPA failure in Bamba is unrelated to qwen3_omni_moe feature-length helper behavior." }, { "left": "issue:43335", "right": "issue:45083", "accept": false, "reason": "SwitchTransformers sparse-layer creation on zero layers is a config construction bug, not the same as the qwen3_omni_moe helper issue." }, { "left": "issue:43335", "right": "issue:43550", "accept": false, "reason": "Different subsystems and symptoms: SwitchTransformers config logic vs Bamba torch.compile/SDPA failure." }, { "left": "issue:29127", "right": "issue:32090", "accept": false, "reason": "LayoutLMv3 error-message clarity and Trainer/_gpu_broadcast_one NoneType failure are unrelated issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 68, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:40444", "issue:43066", "issue:43335", "issue:43504", "issue:43526", "issue:43606", "issue:43653", "issue:43723", "issue:44448", "issue:44462", "issue:44554", "issue:44857", "issue:44898", "issue:44977", "issue:45072", "issue:45198", "issue:45372", "issue:45406" ], "soft_pairs": [ "issue:44857|issue:45072", "issue:43066|issue:44462", "issue:40444|issue:43526", "issue:44448|issue:44977", "issue:43606|issue:44857", "issue:43723|issue:44554", "issue:45372|issue:45406", "issue:43335|issue:43653", "issue:44898|issue:45198", "issue:43335|issue:43504" ], "prepared_review_unit_hash": "95875dde5e0d1cd1607e57653af88cf98e9143a73d988747f32a19d11daa27d3", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13586, "estimated_input_tokens": 3397, "estimated_eval_tokens": 7050 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a heterogeneous mix of unrelated Transformers bugs (tokenizers, vision models, model config, runtime/device issues, and processor loading). None of the soft pairs look like the same underlying defect, so no duplicate merge is warranted.", "confidence": 0.96, "canonical_issue_reason": "No canonical issue: the items cover distinct bugs across different models and subsystems, and the soft pairs do not share a single concrete failure mode.", "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44857", "right": "issue:45072", "accept": false, "reason": "Both involve dtype/device-related failures, but they affect different models and different paths (LwDetr loss under AMP/CUDA vs SwitchTransformers/TimmWrapperModel bfloat16 inference)." }, { "left": "issue:43066", "right": "issue:44462", "accept": false, "reason": "Both are tokenizer-loading regressions in v5, but one is about decoder type selection and the other about AutoTokenizer ignoring tokenizer.json; different bugs and fixes." }, { "left": "issue:40444", "right": "issue:43526", "accept": false, "reason": "These are unrelated vision/data-processing issues: Qwen2.5-VL multi-image IterableDataset finetuning vs BeitImageProcessorFast reduce_labels behavior." }, { "left": "issue:44448", "right": "issue:44977", "accept": false, "reason": "Different models and symptoms: Pegasus output regression across v4/v5 vs Qwen3.5 generation breaking with flash-attention." }, { "left": "issue:43606", "right": "issue:44857", "accept": false, "reason": "Both are runtime failures, but one is a CPU offload device mismatch for bark-small and the other is a mixed-precision loss crash for LwDetr; not the same code-path bug." }, { "left": "issue:43723", "right": "issue:44554", "accept": false, "reason": "Tokenizer AutoTokenizer loading in v5 is unrelated to the MPS attention correctness issue when value head dim differs from query." }, { "left": "issue:45372", "right": "issue:45406", "accept": false, "reason": "Both concern Gemma 4 processor handling, but one is a mistral_common import failure and the other is a missing _tokenizer attribute in serve; different concrete failures." }, { "left": "issue:43335", "right": "issue:43653", "accept": false, "reason": "Unrelated bugs: SwitchTransformers sparse layer construction vs BigBirdTokenizer mask token registration/empty decode output." }, { "left": "issue:44898", "right": "issue:45198", "accept": false, "reason": "Different models and failure modes: Perceiver image classification at non-default resolution vs Wav2Vec2 save_pretrained/tokenization failure." }, { "left": "issue:43335", "right": "issue:43504", "accept": false, "reason": "These affect different model families and different defects: SwitchTransformers layer creation vs Beit semantic-segmentation preset loading with a legacy field." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 69, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:34689", "issue:35141", "issue:37428", "issue:41720", "issue:43278", "issue:43329", "issue:43335", "issue:43550", "issue:43653", "issue:43873", "issue:44112", "issue:44493", "issue:44987", "issue:45072", "issue:45083", "issue:45229", "issue:45372", "issue:45459" ], "soft_pairs": [ "issue:43335|issue:44112", "issue:43653|issue:45083", "issue:43335|issue:45072", "issue:37428|issue:44987", "issue:43873|issue:44493", "issue:43873|issue:45229", "issue:43550|issue:44112", "issue:34689|issue:35141", "issue:45372|issue:45459", "issue:41720|issue:45229", "issue:43278|issue:43329" ], "prepared_review_unit_hash": "248b5abaed01824c55b59a608f2fd29f6fcfa43aef9cf970bf0626dc9426d20e", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13916, "estimated_input_tokens": 3479, "estimated_eval_tokens": 7214 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No convincing duplicate pairs: the listed issues span unrelated subsystems and failure modes, so the soft-similarity matches look like false positives.", "confidence": 0.97, "canonical_issue_reason": "No clear canonical issue exists here; the items are heterogeneous (config bugs, import errors, tokenizer issues, device-mapping, quantization, and multimodal processor problems).", "canonical_pr_reason": null, "best_issue_reason": "No issue is a better representative duplicate target than the others, because none of the soft-linked items describe the same underlying bug.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43335", "right": "issue:44112", "accept": false, "reason": "Different bugs: SwitchTransformers sparse-layer config vs a GraniteSpeech CI device-override failure." }, { "left": "issue:43653", "right": "issue:45083", "accept": false, "reason": "Tokenizer special-token registration bug vs qwen3_omni_moe feature-length helper behavior; unrelated code paths." }, { "left": "issue:43335", "right": "issue:45072", "accept": false, "reason": "Config-layer creation bug and bfloat16 dtype mismatch are separate failure modes." }, { "left": "issue:37428", "right": "issue:44987", "accept": false, "reason": "Flash-attention import error vs model loading failure for a specific repo; no shared underlying defect." }, { "left": "issue:43873", "right": "issue:44493", "accept": false, "reason": "Quantization/offloading behavior vs unexpected position-id keys; different symptoms and likely different causes." }, { "left": "issue:43873", "right": "issue:45229", "accept": false, "reason": "Both involve inference/memory, but one is quantization offloading and the other is multi-GPU OOM; not the same concrete bug." }, { "left": "issue:43550", "right": "issue:44112", "accept": false, "reason": "torch.compile + SDPA issue in Bamba is unrelated to GraniteSpeech CI stale device override behavior." }, { "left": "issue:34689", "right": "issue:35141", "accept": false, "reason": "Llama 3.2 vision model loading regression is not the same as embedding resize/reinitialization with untied embeddings." }, { "left": "issue:45372", "right": "issue:45459", "accept": false, "reason": "Gemma 4 processor import failure from mistral_common is unrelated to protobuf error masking in tokenizers." }, { "left": "issue:41720", "right": "issue:45229", "accept": false, "reason": "Qwen3 auto device-mapping cuda assert and Gemma4 multi-GPU OOM are distinct runtime problems." }, { "left": "issue:43278", "right": "issue:43329", "accept": false, "reason": "Embedding dtype drift in evaluate is unrelated to an undefined video-token counting branch." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 70, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29942", "issue:32090", "issue:33357", "issue:35141", "issue:42915", "issue:42994", "issue:43065", "issue:43720", "issue:43976", "issue:43994", "issue:44112", "issue:44442", "issue:44448", "issue:44466", "issue:45072", "issue:45412", "issue:45440" ], "soft_pairs": [ "issue:43720|issue:44112", "issue:43976|issue:45440", "issue:29942|issue:35141", "issue:43065|issue:43994", "issue:43065|issue:45412", "issue:44442|issue:45072", "issue:44448|issue:44466", "issue:32090|issue:33357", "issue:43994|issue:45412", "issue:42915|issue:42994" ], "prepared_review_unit_hash": "2f977fb1502feef38350e6a2537876f69859dd86ccd1f1cf378c216361a83ae8", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12823, "estimated_input_tokens": 3206, "estimated_eval_tokens": 6668 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No soft pair looks like the same underlying bug or change. The cluster is a set of unrelated issue reports connected only by broad superficial similarity (CI failures, model-specific bugs, quantization, serialization, etc.).", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43720", "right": "issue:44112", "accept": false, "reason": "Both are CI-related failures, but they concern different models and different failure modes: BitNet accelerated loading vs GraniteSpeech stale device override tests." }, { "left": "issue:43976", "right": "issue:45440", "accept": false, "reason": "Unrelated bugs: a Python compatibility issue in Transformers 5.1.0 versus a divergence between native DeepseekV3 and a remote implementation." }, { "left": "issue:29942", "right": "issue:35141", "accept": false, "reason": "Different code paths and symptoms: Flash Attention 2 test failures versus token embedding resizing/reinitialization in post_init." }, { "left": "issue:43065", "right": "issue:43994", "accept": false, "reason": "Different models and failures: a dummy Conv2d in Sam3PixelDecoder versus incorrect outputs for Siglip2 via AutoModel/pipeline." }, { "left": "issue:43065", "right": "issue:45412", "accept": false, "reason": "No shared bug: Sam3PixelDecoder dummy layer versus RT-DETR memory not being released on deletion." }, { "left": "issue:44442", "right": "issue:45072", "accept": false, "reason": "Different subsystems and problems: tokenizer loading failure versus bfloat16 dtype mismatches in inference." }, { "left": "issue:44448", "right": "issue:44466", "accept": false, "reason": "Both involve v5 behavior changes, but one is generation/output discrepancy and the other is serialization/tied-weight inconsistency; not the same concrete bug." }, { "left": "issue:32090", "right": "issue:33357", "accept": false, "reason": "Different failure modes and platforms: Trainer _gpu_broadcast_one TypeError versus MacOS bus error with a community CLIP model." }, { "left": "issue:43994", "right": "issue:45412", "accept": false, "reason": "No concrete overlap: Siglip2 nonsensical outputs versus RT-DETR memory cleanup issues." }, { "left": "issue:42915", "right": "issue:42994", "accept": false, "reason": "Both are quantization-related, but one is a model/config failure with FineGrainedFP8Config and the other is a generic quantized model saving bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 71, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:25251", "issue:32090", "issue:43257", "issue:43278", "issue:43381", "issue:43653", "issue:43756", "issue:43824", "issue:44206", "issue:44279", "issue:44442", "issue:44479", "issue:44743", "issue:44871", "issue:44977", "issue:45103", "issue:45372", "issue:45440" ], "soft_pairs": [ "issue:43257|issue:43278", "issue:43278|issue:43381", "issue:25251|issue:32090", "issue:44871|issue:44977", "issue:43653|issue:44442", "issue:44206|issue:44479", "issue:43756|issue:45440", "issue:43653|issue:44743", "issue:45103|issue:45372", "issue:43824|issue:44279" ], "prepared_review_unit_hash": "1fed6875f2c5534139a03005990cb06720f9904824e7bd524c1d01c5e18f4a36", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13564, "estimated_input_tokens": 3391, "estimated_eval_tokens": 7038 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The candidate pairs are only superficially similar (shared subsystem words like tokenizer, generation, eval, or model-loading), but they describe different concrete bugs and code paths. I would reject all soft edges and treat this cluster as unrelated issues rather than duplicates.", "confidence": 0.84, "canonical_issue_reason": "issue:43824 is the strongest representative by signal: it has a clear runtime failure, multiple inbound references, and a concrete import regression in the transformers namespace.", "canonical_pr_reason": null, "best_issue_reason": "issue:43824 is the best standalone issue in the set because it is specific, reproducible, and appears to have broader impact than the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43257", "right": "issue:43278", "accept": false, "reason": "Different bugs: Qwen3 MOE conversion under accelerate/deepspeed vs embedding dtype drift in evaluation." }, { "left": "issue:43278", "right": "issue:43381", "accept": false, "reason": "Both involve eval-time behavior, but one is a dtype change and the other is gradient-checkpointing incompatibility; not the same failure." }, { "left": "issue:25251", "right": "issue:32090", "accept": false, "reason": "Completely unrelated: pipeline top_k output shape change vs Trainer GPU broadcast TypeError." }, { "left": "issue:44871", "right": "issue:44977", "accept": false, "reason": "Both are generation/config issues, but one is eos_token_id mismatch in Gemma-3 and the other is a flash-attention regression in Qwen3.5." }, { "left": "issue:43653", "right": "issue:44442", "accept": false, "reason": "Both are tokenizer-related, but they affect different tokenizers and different concrete failures." }, { "left": "issue:44206", "right": "issue:44479", "accept": false, "reason": "Different regressions: unsupported feature-extractor arg vs video-input regression across Qwen VL variants." }, { "left": "issue:43756", "right": "issue:45440", "accept": false, "reason": "Both mention model implementation parity, but they concern different architectures and different divergence points." }, { "left": "issue:43653", "right": "issue:44743", "accept": false, "reason": "Tokenizer special-token registration bug is unrelated to recurrent-state reset when using cache." }, { "left": "issue:45103", "right": "issue:45372", "accept": false, "reason": "Different subsystems and failures: auto_docstring annotations handling vs Gemma 4 processor import breakage." }, { "left": "issue:43824", "right": "issue:44279", "accept": false, "reason": "Specific Qwen2_5_VL import failure is not the same as a generic transformers dependency issue." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 72, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29127", "issue:29942", "issue:30064", "issue:32090", "issue:33290", "issue:33357", "issue:34567", "issue:39692", "issue:43334", "issue:43504", "issue:43531", "issue:44387", "issue:44610", "issue:44857", "issue:44977", "issue:45229", "issue:45245", "issue:45412" ], "soft_pairs": [ "issue:44610|issue:45412", "issue:29942|issue:30064", "issue:33357|issue:39692", "issue:29127|issue:34567", "issue:43504|issue:44857", "issue:32090|issue:33290", "issue:43334|issue:43531", "issue:44977|issue:45245", "issue:44387|issue:45229" ], "prepared_review_unit_hash": "7f76889840eedab9f3a316b80164abb5e6b6cd1000f5b82632abcf30c4b3513a", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13024, "estimated_input_tokens": 3256, "estimated_eval_tokens": 6768 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "All soft pairs look like false positives: they span different models, error surfaces, and code paths, so there is no single duplicate cluster representative here.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44610", "right": "issue:45412", "accept": false, "reason": "Different problems: OmDet-Turbo processor/model input-size mismatch vs RT-DETR memory not being released after deletion. No shared underlying bug or fix." }, { "left": "issue:29942", "right": "issue:30064", "accept": false, "reason": "Flash Attention 2 test failures are unrelated to image processor handling of void segmentation maps; different subsystem and failure mode." }, { "left": "issue:33357", "right": "issue:39692", "accept": false, "reason": "MacOS bus error on a CLIP model is not the same as SigLIP2 documentation example errors/quantization failure." }, { "left": "issue:29127", "right": "issue:34567", "accept": false, "reason": "LayoutLMv3 box/info error message clarity is unrelated to TrainerState num_input_tokens_seen not updating." }, { "left": "issue:43504", "right": "issue:44857", "accept": false, "reason": "BEiT pretrained preset legacy-field loading bug is different from LwDetrImageLoss crashing under float16 AMP on CUDA." }, { "left": "issue:32090", "right": "issue:33290", "accept": false, "reason": "Trainer NoneType broadcast error and Adafactor+DeepSpeed OOM are distinct issues with different root causes." }, { "left": "issue:43334", "right": "issue:43531", "accept": false, "reason": "Qwen3-VL missing pad_token_id when loading is separate from Qwen3-MoE sliding_window behavior." }, { "left": "issue:44977", "right": "issue:45245", "accept": false, "reason": "Qwen3.5 flash-attention generation failure is unrelated to the 2^24 category-count runtime limit." }, { "left": "issue:44387", "right": "issue:45229", "accept": false, "reason": "Both mention OOM, but one is int4 quantization memory regression in Transformers 5.x and the other is Gemma4 multi-GPU inference memory exhaustion; not the same code-path problem." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 73, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29127", "issue:29942", "issue:33357", "issue:36010", "issue:42915", "issue:43232", "issue:43262", "issue:43381", "issue:43388", "issue:43526", "issue:43550", "issue:43701", "issue:43931", "issue:44186", "issue:44206", "issue:44442", "issue:44857", "issue:44898" ], "soft_pairs": [ "issue:43526|issue:44186", "issue:43262|issue:44206", "issue:43381|issue:43701", "issue:44442|issue:44898", "issue:29942|issue:36010", "issue:43232|issue:43388", "issue:29127|issue:33357", "issue:42915|issue:43931", "issue:43526|issue:44857", "issue:43550|issue:43701" ], "prepared_review_unit_hash": "8dbc9e4d118c5ecdd0f9c25aefbf1bdc95a4e3521c191ac037dbcb1184a1d736", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13407, "estimated_input_tokens": 3352, "estimated_eval_tokens": 6960 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated, spanning different models, processors, and failure modes. I do not see any soft pair that looks like the same underlying bug or change.", "confidence": 0.97, "canonical_issue_reason": "Issue 44186 is the most concrete and narrowly scoped report in the set, with a specific crash path in LayoutLMv2Tokenizer and clear reproduction context.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44186 is a strong representative bug report because it is actionable, well-scoped, and clearly tied to one component; the rest are similarly isolated but not better representatives of the overall set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43526", "right": "issue:44186", "accept": false, "reason": "Different components and failures: BeitImageProcessorFast label reduction vs LayoutLMv2Tokenizer crashes on NER/batched padding. Not the same bug." }, { "left": "issue:43262", "right": "issue:44206", "accept": false, "reason": "Both involve audio, but one is about chat template sampling-rate defaults and the other is a feature extractor passing an unsupported center argument. Different code paths and symptoms." }, { "left": "issue:43381", "right": "issue:43701", "accept": false, "reason": "Gradient checkpointing in eval mode is unrelated to resume_from_checkpoint key mismatches." }, { "left": "issue:44442", "right": "issue:44898", "accept": false, "reason": "Tokenizer loading failure and Perceiver image resolution/interpolation failure are distinct issues in different subsystems." }, { "left": "issue:29942", "right": "issue:36010", "accept": false, "reason": "Failing Flash Attention 2 tests is unrelated to an ImportError for GenerationMixin." }, { "left": "issue:43232", "right": "issue:43388", "accept": false, "reason": "Different training/generation paths: generation kwargs update after sync_gpus vs gather_for_metrics dropping tuple labels." }, { "left": "issue:29127", "right": "issue:33357", "accept": false, "reason": "LayoutLMv3 error messaging for insufficient boxes is unrelated to a MacOS bus error with a CLIP model." }, { "left": "issue:42915", "right": "issue:43931", "accept": false, "reason": "Both mention Qwen, but one is a FineGrainedFP8Config failure and the other is a weight-shape mismatch during model loading. Not the same concrete defect." }, { "left": "issue:43526", "right": "issue:44857", "accept": false, "reason": "Both touch label-related code, but one is BeitImageProcessorFast label reduction and the other is LwDetrImageLoss crashing under AMP/CUDA. Different bugs." }, { "left": "issue:43550", "right": "issue:43701", "accept": false, "reason": "torch.compile with SDPA in Bamba-9B-v2 is unrelated to checkpoint resumption key mismatches." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 74, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29127", "issue:35141", "issue:43316", "issue:43525", "issue:43526", "issue:43606", "issue:43701", "issue:43761", "issue:43994", "issue:44188", "issue:44206", "issue:44479", "issue:44743", "issue:44811", "issue:44964", "issue:45003", "issue:45290", "issue:45440" ], "soft_pairs": [ "issue:43606|issue:43701", "issue:29127|issue:35141", "issue:43526|issue:43994", "issue:45003|issue:45440", "issue:43761|issue:44188", "issue:44743|issue:44811", "issue:43316|issue:43525", "issue:44479|issue:45290", "issue:43994|issue:44206", "issue:44964|issue:45440" ], "prepared_review_unit_hash": "97106f796005a158b8599d1bbb7d80a1723029fc86b251352805aa4a713a6df1", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13556, "estimated_input_tokens": 3389, "estimated_eval_tokens": 7034 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No soft pair looks like the same underlying bug or change; they mostly share only broad topic overlap. I would not merge any pair. Among the issues, `modeling_utils unsafely accesses sys.modules[]` is the most broadly applicable code-path bug and the best representative issue here.", "confidence": 0.92, "canonical_issue_reason": "`issue:45003` is the most cross-cutting and code-centric report: a utility-level unsafe `sys.modules` access can affect many call paths, making it a strong representative issue.", "canonical_pr_reason": null, "best_issue_reason": "`issue:45003` is the strongest global issue candidate because it describes a general runtime-safety bug rather than a model-specific symptom.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43606", "right": "issue:43701", "accept": false, "reason": "Different bugs: one is CPU-offload device mismatch in a model, the other is a checkpoint key mismatch during resume." }, { "left": "issue:29127", "right": "issue:35141", "accept": false, "reason": "Unrelated subsystems: LayoutLMv3 error messaging vs token-embedding resizing/post-init reinitialization." }, { "left": "issue:43526", "right": "issue:43994", "accept": false, "reason": "Different model/processor bugs: BEiT label reduction vs SIGLIP2 AutoModel/pipeline output corruption." }, { "left": "issue:45003", "right": "issue:45440", "accept": false, "reason": "One is a utility safety bug in `modeling_utils`; the other is a model parity regression for DeepSeekV3MoE." }, { "left": "issue:43761", "right": "issue:44188", "accept": false, "reason": "Both involve model execution behavior, but the concrete failures differ: CLIP hidden states vs attention-kernel branching under `torch.compile`." }, { "left": "issue:44743", "right": "issue:44811", "accept": false, "reason": "Different code paths and symptoms: Qwen recurrent-state reset vs Whisper `batch_decode` token skipping." }, { "left": "issue:43316", "right": "issue:43525", "accept": false, "reason": "Both are config/API discrepancies, but they concern different models and different missing/uneven attributes." }, { "left": "issue:44479", "right": "issue:45290", "accept": false, "reason": "Distinct regressions: video-input handling for Qwen VL variants vs chat-template tokenization with tool-call assistant messages." }, { "left": "issue:43994", "right": "issue:44206", "accept": false, "reason": "Different failure modes: SIGLIP2 inference produces bad outputs, while LASR feature extraction crashes on an unsupported argument." }, { "left": "issue:44964", "right": "issue:45440", "accept": false, "reason": "Separate model-specific problems: Phi-4 multimodal loading failure vs DeepSeekV3MoE implementation divergence." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 75, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29942", "issue:33290", "issue:33666", "issue:34567", "issue:34689", "issue:41628", "issue:43575", "issue:43611", "issue:43646", "issue:43653", "issue:43673", "issue:43701", "issue:43931", "issue:44077", "issue:44387", "issue:44743", "issue:45216", "issue:45372" ], "soft_pairs": [ "issue:43931|issue:45216", "issue:43646|issue:44077", "issue:43673|issue:44743", "issue:43575|issue:43701", "issue:29942|issue:33666", "issue:43575|issue:44387", "issue:41628|issue:45372", "issue:43653|issue:43701", "issue:33290|issue:34567", "issue:34689|issue:43611" ], "prepared_review_unit_hash": "ae1d081d3ee7e66292682a58a25dd094d0984714dd313d979d4453f83a2d97ee", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13307, "estimated_input_tokens": 3327, "estimated_eval_tokens": 6910 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are a heterogeneous set of unrelated issues. The soft-similarity pairs share broad themes like loading, OOM, or checkpointing, but they describe different failure modes, code paths, or regressions and should not be merged as duplicates.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43931", "right": "issue:45216", "accept": false, "reason": "Both concern model loading regressions, but one is a weight-shape mismatch for Qwen3-VL-30B and the other is a save_pretrained checkpoint regression for Qwen3.5; different symptoms and paths." }, { "left": "issue:43646", "right": "issue:44077", "accept": false, "reason": "Both mention Transformers 5.0 behavior changes, but one is about custom model initialization and the other about patchtsmixer post_init being disallowed; not the same bug." }, { "left": "issue:43673", "right": "issue:44743", "accept": false, "reason": "One is a missing GenerationMixin cache in chunked_prefill, the other is recurrent state resetting in modular_qwen3_5; different generation/state-management defects." }, { "left": "issue:43575", "right": "issue:43701", "accept": false, "reason": "Both involve loading/training failures, but one is tensor-parallel OOM for Qwen2-57B and the other is a resume_from_checkpoint key mismatch; unrelated root causes." }, { "left": "issue:29942", "right": "issue:33666", "accept": false, "reason": "Flash Attention 2 test failures are unrelated to Qwen2-VL multi-GPU training; no shared concrete bug or fix path." }, { "left": "issue:43575", "right": "issue:44387", "accept": false, "reason": "Both can manifest as OOM, but one is TP load OOM for a specific Qwen model while the other is increased CUDA reserved memory under int4 quantization; different mechanisms and fixes." }, { "left": "issue:41628", "right": "issue:45372", "accept": false, "reason": "Both are import-related, but they involve different missing symbols from different dependencies and different processor-loading failures." }, { "left": "issue:43653", "right": "issue:43701", "accept": false, "reason": "BigBirdTokenizer special-token decode bug is unrelated to resume_from_checkpoint key mismatch; one is tokenizer registration, the other checkpoint key handling." }, { "left": "issue:33290", "right": "issue:34567", "accept": false, "reason": "AdaFactor+DeepSpeed OOM and TrainerState num_input_tokens_seen not updating are different training issues with no shared code-path bug." }, { "left": "issue:34689", "right": "issue:43611", "accept": false, "reason": "Both are model-loading regressions, but one is a specific Llama 3.2 90B Vision breakage in 4.46.2 and the other is a 5.0 base_model_prefix regression; different eras and failure modes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 76, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33290", "issue:34689", "issue:35141", "issue:39692", "issue:43381", "issue:43493", "issue:43749", "issue:43827", "issue:43873", "issue:43931", "issue:44077", "issue:44162", "issue:44190", "issue:44206", "issue:44479", "issue:44496", "issue:44792", "issue:44871" ], "soft_pairs": [ "issue:43493|issue:43827", "issue:44077|issue:44479", "issue:39692|issue:44206", "issue:43381|issue:43873", "issue:33290|issue:35141", "issue:44162|issue:44792", "issue:44190|issue:44496", "issue:43873|issue:44871", "issue:33290|issue:34689", "issue:43749|issue:43931" ], "prepared_review_unit_hash": "6fdd79af3342e0610da2778283dcef17ae28e80f097bd8e7754edd574571faac", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13445, "estimated_input_tokens": 3362, "estimated_eval_tokens": 6980 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: it mixes unrelated bugs across training, loading, quantization, docs, model-specific regressions, and test failures. None of the proposed soft pairs look like the same underlying issue or change, so there is no canonical duplicate target.", "confidence": 0.96, "canonical_issue_reason": "No single canonical issue: the items describe distinct defects in different code paths and models (for example, optimizer OOM, embedding reinit, loading errors, docs mistakes, and video/regression bugs).", "canonical_pr_reason": null, "best_issue_reason": "No best issue for deduplication. The cluster does not converge on one underlying bug; choosing any item would misrepresent the rest as duplicates.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43493", "right": "issue:43827", "accept": false, "reason": "Different problems: SigLIP2 implementation discrepancy versus a docs reference to removed `pipeline()`. One is model behavior, the other documentation cleanup." }, { "left": "issue:44077", "right": "issue:44479", "accept": false, "reason": "Unrelated bugs: a `patchtsmixer` post-init API restriction versus a video-input regression affecting Qwen VL models." }, { "left": "issue:39692", "right": "issue:44206", "accept": false, "reason": "Both touch docs/model utilities, but they are different failures: SigLIP2 example mistakes versus `LasrFeatureExtractor` passing an unsupported `center` arg." }, { "left": "issue:43381", "right": "issue:43873", "accept": false, "reason": "Distinct runtime paths: gradient checkpointing in eval mode versus quantization/offloading behavior. Not the same defect." }, { "left": "issue:33290", "right": "issue:35141", "accept": false, "reason": "Different root causes and subsystems: DeepSpeed Adafactor OOM versus token-embedding resize causing output embedding reinitialization." }, { "left": "issue:44162", "right": "issue:44792", "accept": false, "reason": "Separate model-specific reports: ESM2 broken versus a Janus image-generation test failure. No shared underlying bug." }, { "left": "issue:44190", "right": "issue:44496", "accept": false, "reason": "Dataset-loading failure in a training script is unrelated to an unrecognized model/config loading error for Olmo-Hybrid." }, { "left": "issue:43873", "right": "issue:44871", "accept": false, "reason": "Quantization/offloading behavior and Gemma `eos_token_id` config mismatch are different issues with different symptoms and fixes." }, { "left": "issue:33290", "right": "issue:34689", "accept": false, "reason": "One is an Adafactor/DeepSpeed memory issue; the other is a model-loading breakage for Llama 3.2 Vision. Not the same code-path problem." }, { "left": "issue:43749", "right": "issue:43931", "accept": false, "reason": "FSDP CPU-RAM-efficient loading and Qwen3-VL weight-shape mismatch are separate loading/regression bugs, not mergeable as one fix." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 77, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33290", "issue:36331", "issue:42175", "issue:43316", "issue:43381", "issue:43388", "issue:43404", "issue:43441", "issue:43540", "issue:43643", "issue:43688", "issue:43716", "issue:43856", "issue:44514", "issue:44855", "issue:44877", "issue:44913", "issue:45137" ], "soft_pairs": [ "issue:43643|issue:44913", "issue:43540|issue:44514", "issue:44855|issue:45137", "issue:43316|issue:44877", "issue:42175|issue:43316", "issue:43381|issue:43856", "issue:43388|issue:43688", "issue:33290|issue:36331", "issue:43404|issue:43441", "issue:43316|issue:43716" ], "prepared_review_unit_hash": "2223d7af11b4b7bd8cb7857075391e1dcb152bc1758af582b738da23894937a5", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13420, "estimated_input_tokens": 3355, "estimated_eval_tokens": 6966 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No soft pair looks like a true duplicate; the list is a mix of unrelated bugs, with only a loose config/API theme appearing in a few titles.", "confidence": 0.95, "canonical_issue_reason": "Issue 43316 is the broadest and most reusable config/API inconsistency in the set, but it is not a duplicate representative for the others.", "canonical_pr_reason": null, "best_issue_reason": "43316 is the best single triage anchor because it is concrete, user-facing, and closest to the recurring config-loading/API theme among the items.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43643", "right": "issue:44913", "accept": false, "reason": "Both are config loading/reload inconsistencies, but one loses fields with trust_remote_code and the other resets rotary_pct on reload; different bugs and fixes." }, { "left": "issue:43540", "right": "issue:44514", "accept": false, "reason": "Both involve multimodal preprocessing, but one is a Qwen3OmniMoe video ValueError and the other is a Qwen2_5_VLProcessor batching/padding crash; not the same code path." }, { "left": "issue:44855", "right": "issue:45137", "accept": false, "reason": "Completely different failures: TorchScript parsing on Python 3.13 vs a DeepSpeed ZeRO3 deque underflow." }, { "left": "issue:43316", "right": "issue:44877", "accept": false, "reason": "Both touch config handling, but Gemma3TextConfig API mismatch and granite_speech strict loading are distinct model-specific problems." }, { "left": "issue:42175", "right": "issue:43316", "accept": false, "reason": "Dependency/extra-install packaging issue versus a config API discrepancy; no shared underlying bug." }, { "left": "issue:43381", "right": "issue:43856", "accept": false, "reason": "Gradient checkpointing in eval mode and Qwen3 MoE memory usage are different training/runtime issues." }, { "left": "issue:43388", "right": "issue:43688", "accept": false, "reason": "Label truncation in gather_for_metrics and auxiliary-loss normalization are unrelated evaluation vs training bugs." }, { "left": "issue:33290", "right": "issue:36331", "accept": false, "reason": "DeepSpeed OOM with Adafactor and a CustomTrainer compute_loss signature break are unrelated." }, { "left": "issue:43404", "right": "issue:43441", "accept": false, "reason": "Both are Mistral-family issues, but one is tied weights in generation and the other is a FlashAttention failure; different concrete code paths." }, { "left": "issue:43316", "right": "issue:43716", "accept": false, "reason": "Both are model/config mismatch reports, but one is an API discrepancy in config objects and the other is an image preprocessor dtype mismatch; not the same bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 78, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:16998", "issue:22355", "issue:25251", "issue:28282", "issue:34689", "issue:36010", "issue:41950", "issue:43065", "issue:43295", "issue:43475", "issue:43761", "issue:43825", "issue:44479", "issue:44617", "issue:44734", "issue:45127", "issue:45200", "issue:45406" ], "soft_pairs": [ "issue:43065|issue:44617", "issue:45127|issue:45200", "issue:34689|issue:36010", "issue:41950|issue:43475", "issue:16998|issue:22355", "issue:43761|issue:44479", "issue:25251|issue:28282", "issue:43295|issue:43825", "issue:44734|issue:45406" ], "prepared_review_unit_hash": "e9befd85b2e4b7e8fe51714d31d9b8ee04816dea63f77170c3b29070a65cae22", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13173, "estimated_input_tokens": 3294, "estimated_eval_tokens": 6844 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "All soft pairs are false positives: they share broad transformer/video/processor language, but each pair points to a different subsystem, model family, or failure mode. No duplicate cluster emerges here.", "confidence": 0.97, "canonical_issue_reason": "None of the issues form a single duplicate bug cluster; the pairwise similarities are superficial and span unrelated regressions, questions, and API breakages.", "canonical_pr_reason": "No PRs were provided, and there is no clear shared code change to anchor a canonical PR.", "best_issue_reason": "No single issue is a good canonical representative for this set because the items are heterogeneous rather than duplicates.", "best_pr_reason": "No PR candidate is available, and no merged code change is implied by these issues.", "soft_edge_verdicts": [ { "left": "issue:43065", "right": "issue:44617", "accept": false, "reason": "Different problems: a dummy Conv2d in Sam3PixelDecoder vs CUDA OOM in Sam3Video; not the same bug or fix." }, { "left": "issue:45127", "right": "issue:45200", "accept": false, "reason": "LoRA merge/model collapse on tied embeddings is unrelated to Gemma 4 mm_token_type_ids being required for text-only fine-tuning." }, { "left": "issue:34689", "right": "issue:36010", "accept": false, "reason": "One is a Llama 3.2 Vision model-loading regression; the other is a missing GenerationMixin import. Same area, different failure." }, { "left": "issue:41950", "right": "issue:43475", "accept": false, "reason": "Pipeline selecting image processors for video classification is unrelated to SAM3VisionEncoderOutput missing fpn_position_embeddings." }, { "left": "issue:16998", "right": "issue:22355", "accept": false, "reason": "A model_max_length question for DeBERTa-V3 is not the same as a missing transformers.onnx module import error." }, { "left": "issue:43761", "right": "issue:44479", "accept": false, "reason": "CLIPVisionModel hidden_states regression and Qwen video-input regression affect different models and code paths." }, { "left": "issue:25251", "right": "issue:28282", "accept": false, "reason": "top_k changing pipeline output nesting is unrelated to AutoModel requiring PyTorch; different symptoms and fixes." }, { "left": "issue:43295", "right": "issue:43825", "accept": false, "reason": "processor.tokenizer/image-to-tokenizer regression is separate from an incorrect translation-task error message in pipeline()." }, { "left": "issue:44734", "right": "issue:45406", "accept": false, "reason": "KV-cache continuation indexing crash in transformers serve is unrelated to Gemma4Processor missing _tokenizer." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 79, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43295", "issue:43452", "issue:43646", "issue:43761", "issue:43906", "issue:43931", "issue:44060", "issue:44077", "issue:44242", "issue:44479", "issue:44496", "issue:44521", "issue:44534", "issue:44734", "issue:44871", "issue:44913", "issue:45290", "issue:45381" ], "soft_pairs": [ "issue:43295|issue:44871", "issue:44077|issue:44534", "issue:43452|issue:44496", "issue:43761|issue:44242", "issue:43646|issue:44913", "issue:43906|issue:45290", "issue:43931|issue:44060", "issue:44521|issue:45381", "issue:44479|issue:44734" ], "prepared_review_unit_hash": "078e1684d713e8cac1ac9f63a9a6b369c6eed8090c78365a35a9f511346aa668", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13319, "estimated_input_tokens": 3330, "estimated_eval_tokens": 6916 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are not duplicates of one another; they span unrelated regressions in tokenization/model loading, config serialization, multimodal chat templating, model initialization, and serving paths.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43295", "right": "issue:44871", "accept": false, "reason": "Both mention tokenizer/model config regressions, but one is about processor.tokenizer/image passthrough and the other is an eos_token_id mismatch in Gemma-3; different bugs." }, { "left": "issue:44077", "right": "issue:44534", "accept": false, "reason": "One concerns patchtsmixer post_init being disallowed; the other is about non-persistent buffers being filled with junk in v5. Separate failure modes." }, { "left": "issue:43452", "right": "issue:44496", "accept": false, "reason": "Both are model-loading errors, but gguf_file/AutoTokenizer loading and an unrecognized model_type in config.json are distinct root causes." }, { "left": "issue:43761", "right": "issue:44242", "accept": false, "reason": "CLIPVisionModel hidden_states output and router load-balancing loss are unrelated code paths and symptoms." }, { "left": "issue:43646", "right": "issue:44913", "accept": false, "reason": "Custom model initialization breakage is broader and different from GPTNeoX rotary_pct not persisting on reload." }, { "left": "issue:43906", "right": "issue:45290", "accept": false, "reason": "The first is an isolated repro of another issue; the second is a specific apply_chat_template crash with tool calls and empty content. Not the same bug." }, { "left": "issue:43931", "right": "issue:44060", "accept": false, "reason": "Qwen3-VL weight-shape mismatch and Qwen3-Next tied-weights warning are separate model/config issues." }, { "left": "issue:44521", "right": "issue:45381", "accept": false, "reason": "Both touch multimodal chat/video behavior, but one is all-zero assistant masks from apply_chat_template and the other is incorrect vision_position_ids for qwen2.5-vl video input." }, { "left": "issue:44479", "right": "issue:44734", "accept": false, "reason": "Video input regression in several Qwen VL models and a serving KV-cache indexing crash are different concrete problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 80, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:32090", "issue:35141", "issue:41720", "issue:42617", "issue:43065", "issue:43575", "issue:43742", "issue:43827", "issue:43994", "issue:44464", "issue:44610", "issue:44792", "issue:44871", "issue:44933", "issue:44977", "issue:45245", "issue:45362" ], "soft_pairs": [ "issue:42617|issue:43575", "issue:43575|issue:43742", "issue:44933|issue:45362", "issue:44792|issue:44977", "issue:43065|issue:44610", "issue:41720|issue:45245", "issue:44464|issue:44871", "issue:41720|issue:43575", "issue:32090|issue:35141", "issue:43827|issue:43994" ], "prepared_review_unit_hash": "fa2899739729f71c70a927b99ac75c0b92bda506dd0de77a85a8c938ba1cec6d", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12706, "estimated_input_tokens": 3177, "estimated_eval_tokens": 6610 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous and spans unrelated issues in training, model loading, multimodal processing, generation, docs, and device placement. None of the soft-edge pairs look like true duplicates or the same underlying fix.", "confidence": 0.93, "canonical_issue_reason": "No single canonical issue fits this cluster: the items describe different bugs and regressions across unrelated code paths, so forcing one canonical issue would be misleading.", "canonical_pr_reason": null, "best_issue_reason": "No global best issue: the cluster is too mixed to use any one issue as a representative duplicate target.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42617 \u2014 Not able to run 3d_parallel.py", "right": "issue:43575 \u2014 Load `Qwen2-57B-A14B-Instruct` with tp lead to OOM", "accept": false, "reason": "Different failure modes and scopes: a script execution problem versus tensor-parallel load OOM for a specific model." }, { "left": "issue:43575 \u2014 Load `Qwen2-57B-A14B-Instruct` with tp lead to OOM", "right": "issue:43742 \u2014 Key error when loading facebook/MobileLLM-125M", "accept": false, "reason": "Both are loading-time issues, but the concrete errors and models differ completely (OOM vs KeyError)." }, { "left": "issue:44933 \u2014 Nonexistant import from image_utils", "right": "issue:45362 \u2014 Qwen3.5-35B crashes with transformers chat", "accept": false, "reason": "An import/export problem is not the same underlying bug as a chat/runtime crash in a model integration." }, { "left": "issue:44792 \u2014 Failed test case `test_model_generate_images` for janus model", "right": "issue:44977 \u2014 Qwen3.5 cannot generate normally with flash-attention", "accept": false, "reason": "Different models, different code paths, and different symptoms; one is a test failure for image generation, the other a flash-attention generation bug." }, { "left": "issue:43065 \u2014 Dummy `nn.Conv2d` in `Sam3PixelDecoder`", "right": "issue:44610 \u2014 [BUG] OmDet-Turbo processor produces 640px inputs but the model expects 224px", "accept": false, "reason": "These concern different multimodal components and different mismatches; they do not appear to be the same fixable defect." }, { "left": "issue:41720 \u2014 Qwen3 with auto device mapping fails due to cudaErrorAssert on A800", "right": "issue:45245 \u2014 RuntimeError: number of categories cannot exceed 2^24", "accept": false, "reason": "A CUDA assert during device mapping is unrelated to a category-count limit error." }, { "left": "issue:44464 \u2014 Chunked generation produces inconsistent outputs when using compiled forward", "right": "issue:44871 \u2014 [Gemma-3] Inconsistent eos_token_id configuration: tokenizer has single value (1) but model.config has list [1, 106]", "accept": false, "reason": "One is a compiled-forward/chunked-generation correctness issue; the other is a configuration mismatch in eos token IDs." }, { "left": "issue:41720 \u2014 Qwen3 with auto device mapping fails due to cudaErrorAssert on A800", "right": "issue:43575 \u2014 Load `Qwen2-57B-A14B-Instruct` with tp lead to OOM", "accept": false, "reason": "Both involve large-model loading, but the concrete problem is different: device-mapping CUDA assert versus tensor-parallel OOM." }, { "left": "issue:32090 \u2014 [Error] with Trainer: TypeError: Unsupported types () passed to `_gpu_broadcast_one`.", "right": "issue:35141 \u2014 resizing token embeddings causes output embedding to be reinitialized in `post_init` when `tie_word_embedding` is False", "accept": false, "reason": "A distributed training broadcast TypeError is unrelated to embedding reinitialization after resize." }, { "left": "issue:43827 \u2014 Summarization/Translation docs still reference pipeline() after v5 pipeline removals", "right": "issue:43994 \u2014 google/siglip2-base-patch16-224 produces nonsensical results with AutoModel and pipeline", "accept": false, "reason": "The first is stale documentation; the second is a runtime/model-behavior bug. They do not share the same underlying change or fix." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 81, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43335", "issue:43388", "issue:43540", "issue:43582", "issue:43650", "issue:43761", "issue:43976", "issue:44190", "issue:44220", "issue:44355", "issue:44534", "issue:44734", "issue:44743", "issue:44855", "issue:44913", "issue:44964", "issue:45127", "issue:45357" ], "soft_pairs": [ "issue:44534|issue:44913", "issue:43388|issue:43582", "issue:43650|issue:44220", "issue:45127|issue:45357", "issue:44190|issue:44964", "issue:43761|issue:44743", "issue:44355|issue:44855", "issue:43335|issue:44855", "issue:43540|issue:44734", "issue:43976|issue:44855" ], "prepared_review_unit_hash": "f21482d50ceb61c2f9e378f4a54d6c1477c3f5a4faa96f525ada2789b61bcf5f", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13524, "estimated_input_tokens": 3381, "estimated_eval_tokens": 7018 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the issues span unrelated bugs in configs, data loading, serving, model serialization, and Python-version-specific failures. None of the soft pairs look like the same underlying problem, so no duplicate merge is warranted.", "confidence": 0.97, "canonical_issue_reason": "No single canonical issue fits the cluster; the items describe distinct bugs with different code paths, models, and failure modes.", "canonical_pr_reason": null, "best_issue_reason": "No global best issue for deduplication purposes; none of the issues subsumes the others or serves as a representative duplicate target.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44534", "right": "issue:44913", "accept": false, "reason": "Different problems: one is about non-persistent buffers becoming junk, the other about GPTNeoXConfig losing rotary_pct on reload." }, { "left": "issue:43388", "right": "issue:43582", "accept": false, "reason": "Unrelated subsystems and failures: metric gathering vs Apple Silicon allocator warmup TypeError." }, { "left": "issue:43650", "right": "issue:44220", "accept": false, "reason": "Both are audio/data-related at a broad level, but the reported bugs are different: a generic data placeholder issue vs _torch_extract_fbank_features() behavior." }, { "left": "issue:45127", "right": "issue:45357", "accept": false, "reason": "Both concern model persistence/changes, but one is about LoRA merging with extended vocab and tied embeddings; the other is about incorrect visual encoder keys in save_pretrained." }, { "left": "issue:44190", "right": "issue:44964", "accept": false, "reason": "Both are loading failures, but for different artifacts and causes: a local dataset in an example script vs a specific multimodal model with latest transformers." }, { "left": "issue:43761", "right": "issue:44743", "accept": false, "reason": "Different concrete bugs: CLIPVisionModel not returning hidden states vs recurrent states resetting with cache and seq_len>1." }, { "left": "issue:44355", "right": "issue:44855", "accept": false, "reason": "Both are import/runtime errors, but one concerns compiled Python files generally while the other is a Python 3.13 torch.jit parsing/IndentationError regression." }, { "left": "issue:43335", "right": "issue:44855", "accept": false, "reason": "Completely different failures: SwitchTransformers sparse layer creation vs DebertaV2Model import parsing on Python 3.13." }, { "left": "issue:43540", "right": "issue:44734", "accept": false, "reason": "Different model/runtime paths: Qwen3OmniMoe video input ValueError vs transformers serve KV-cache continuation indexing crash." }, { "left": "issue:43976", "right": "issue:44855", "accept": false, "reason": "Both are Python-version compatibility issues, but the specific breakages differ: one claims 5.1.0 fails on Python 3.9+, the other is a Python 3.13 torch.jit parsing problem." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 82, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36246", "issue:39692", "issue:41628", "issue:41950", "issue:43116", "issue:43295", "issue:43335", "issue:43404", "issue:43644", "issue:44016", "issue:44077", "issue:44079", "issue:44190", "issue:44496", "issue:44913", "issue:45216", "issue:45276", "issue:45372" ], "soft_pairs": [ "issue:44496|issue:45372", "issue:43644|issue:44913", "issue:43116|issue:44190", "issue:44077|issue:44079", "issue:41950|issue:44016", "issue:43295|issue:45276", "issue:36246|issue:41628", "issue:43404|issue:45216", "issue:43335|issue:44190", "issue:39692|issue:44190" ], "prepared_review_unit_hash": "2829e52108d65a1195e0fa348a8e6e6949ea96a659175f50727c167a225051f6", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13735, "estimated_input_tokens": 3434, "estimated_eval_tokens": 7124 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: it mixes unrelated import failures, model/config regressions, docs/script problems, and pipeline bugs. None of the soft pairs looks like the same underlying issue, so no duplicate merge is justified.", "confidence": 0.31, "canonical_issue_reason": "There is no strong canonical duplicate here because the items span different code paths and failure modes. If one issue must be used as a representative anchor, 45372 is the most actionable recent regression with concrete impact and reproduction details.", "canonical_pr_reason": null, "best_issue_reason": "45372 is the best single representative issue in this set because it is recent, clearly user-facing, and describes a concrete regression with a specific import failure. That said, it is not a duplicate of the others.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44496", "right": "issue:45372", "accept": false, "reason": "Both are loading-related, but one is an unrecognized model/config problem and the other is a Gemma 4 processor import dependency breakage. Different root causes and fixes." }, { "left": "issue:43644", "right": "issue:44913", "accept": false, "reason": "Both concern persistence/reload behavior, but one is about junk-filled non-persistent buffers and the other is about rotary_pct not round-tripping in config. Distinct bugs." }, { "left": "issue:43116", "right": "issue:44190", "accept": false, "reason": "Different example scripts and different failures: multi-label classification returns empty results vs local dataset loading in image classification. Not the same underlying bug." }, { "left": "issue:44077", "right": "issue:44079", "accept": false, "reason": "PatchTSMixer post_init validation and ModelOutput key assignment are unrelated implementation bugs." }, { "left": "issue:41950", "right": "issue:44016", "accept": false, "reason": "A pipeline processor-selection bug and a notebook syntax error are unrelated." }, { "left": "issue:43295", "right": "issue:45276", "accept": false, "reason": "One is a regression around processor.tokenizer/image passing, the other is resize_token_embeddings not updating Gemma 4 embedding/output weights. Different code paths." }, { "left": "issue:36246", "right": "issue:41628", "accept": false, "reason": "Both are import errors, but one is for a specific Qwen2.5-VL processor class and the other is for the top-level AutoImageProcessor API. Not the same bug." }, { "left": "issue:43404", "right": "issue:45216", "accept": false, "reason": "Mistral3 lm_head tying and Qwen3.5 save_pretrained checkpoint serialization are separate model-specific regressions." }, { "left": "issue:43335", "right": "issue:44190", "accept": false, "reason": "SwitchTransformers sparse-layer construction and local dataset loading in an example script are unrelated." }, { "left": "issue:39692", "right": "issue:44190", "accept": false, "reason": "SigLIP2 docs example errors and image-classification dataset loading are different issues, with different repros and fixes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 83, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36246", "issue:38175", "issue:38617", "issue:42175", "issue:43065", "issue:43479", "issue:44016", "issue:44186", "issue:44220", "issue:44246", "issue:44279", "issue:44683", "issue:44734", "issue:44871", "issue:45003", "issue:45245", "issue:45375", "issue:45446" ], "soft_pairs": [ "issue:36246|issue:38617", "issue:44871|issue:45245", "issue:38175|issue:43065", "issue:43479|issue:45375", "issue:44186|issue:44734", "issue:42175|issue:44279", "issue:44016|issue:44220", "issue:44246|issue:45003", "issue:44683|issue:45446" ], "prepared_review_unit_hash": "fc374c088bbb287fd9d747f3eda15b8374172a223fb3d67355a869e7ce6178ba", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13107, "estimated_input_tokens": 3277, "estimated_eval_tokens": 6810 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No soft pair appears to be the same underlying bug or change; the set is a mix of unrelated import errors, model/config issues, tokenizer/server crashes, and performance/compatibility reports. I would not merge any of the candidate pairs.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:36246", "right": "issue:38617", "accept": false, "reason": "Both are ImportError reports, but they reference different missing symbols in different modules and likely separate regressions." }, { "left": "issue:44871", "right": "issue:45245", "accept": false, "reason": "One is an eos_token_id config inconsistency; the other is a categorical cardinality runtime limit. Different failure modes and code paths." }, { "left": "issue:38175", "right": "issue:43065", "accept": false, "reason": "Different models and different bugs: zero probabilities in SigLIP2 vs a dummy Conv2d in Sam3 pixel decoder." }, { "left": "issue:43479", "right": "issue:45375", "accept": false, "reason": "Both concern config handling, but one is default-init behavior for None while the other is a missing field being dropped by strict serialization; not the same concrete bug." }, { "left": "issue:44186", "right": "issue:44734", "accept": false, "reason": "Tokenizer crashes on NER/padding versus server KV-cache continuation tensor indexing; unrelated code paths." }, { "left": "issue:42175", "right": "issue:44279", "accept": false, "reason": "The first is specifically about TensorFlow not being included with the torch extra; the second is a vague dependency issue without the same concrete symptom or fix." }, { "left": "issue:44016", "right": "issue:44220", "accept": false, "reason": "A notebook syntax error and an audio feature-extraction bug are unrelated." }, { "left": "issue:44246", "right": "issue:45003", "accept": false, "reason": "Both touch import behavior, but one reports slow imports while the other is about unsafe sys.modules access; not enough to treat as the same bug." }, { "left": "issue:44683", "right": "issue:45446", "accept": false, "reason": "Both mention flex_attention/PyTorch versioning, but they target different problems: compiled flex_attention failing on torch>=2.9 vs a version check for AuxRequest import." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 84, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42907", "issue:43295", "issue:43299", "issue:43441", "issue:43643", "issue:43673", "issue:43854", "issue:44016", "issue:44062", "issue:44112", "issue:44336", "issue:44466", "issue:44496", "issue:44792", "issue:44913", "issue:44964", "issue:45216", "issue:45335" ], "soft_pairs": [ "issue:44466|issue:44913", "issue:44112|issue:44792", "issue:42907|issue:43441", "issue:43854|issue:44792", "issue:43299|issue:45216", "issue:43295|issue:45335", "issue:44016|issue:44062", "issue:43643|issue:44496", "issue:43673|issue:44336", "issue:44112|issue:44964" ], "prepared_review_unit_hash": "18f941e82f7126c87094ff37842efb6494066ca29c48b3838e4459ff006af737", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13536, "estimated_input_tokens": 3384, "estimated_eval_tokens": 7024 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is not a duplicate set: the issues span unrelated model families and distinct failure modes (loading, saving, config, tests, and notebook/doc problems). No soft edge pair looks like the same underlying bug or change.", "confidence": 0.94, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44466", "right": "issue:44913", "accept": false, "reason": "Both mention serialization/reload behavior, but one is about tied lm_head.weight saving and the other about GPTNeoX rotary_pct being dropped on reload; different code paths and symptoms." }, { "left": "issue:44112", "right": "issue:44792", "accept": false, "reason": "Both are test failures, but they target different models and failures (GraniteSpeech device override CI vs janus image generation). Not the same bug." }, { "left": "issue:42907", "right": "issue:43441", "accept": false, "reason": "Same model family, but one is about saving dequantized Ministral/Devstral models and the other about FlashAttention inference failure in Ministral-3; distinct problems." }, { "left": "issue:43854", "right": "issue:44792", "accept": false, "reason": "Different models and scenarios: GLM-4.7-Flash loading in unit tests vs janus image-generation test failure." }, { "left": "issue:43299", "right": "issue:45216", "accept": false, "reason": "Both are version regressions in Qwen models, but one is Qwen3VL MoE loading and the other is Qwen3.5 save_pretrained checkpoint corruption; not the same concrete bug." }, { "left": "issue:43295", "right": "issue:45335", "accept": false, "reason": "Different behaviors and models: processor.tokenizer/image-handling regression vs t5gemma resize_token_embeddings not updating decoder.embed_tokens." }, { "left": "issue:44016", "right": "issue:44062", "accept": false, "reason": "A notebook syntax error and an AddedToken keyword-argument TypeError are unrelated." }, { "left": "issue:43643", "right": "issue:44496", "accept": false, "reason": "Both involve config loading, but one is missing fields with trust_remote_code and the other is an unrecognized model lacking model_type; different root causes." }, { "left": "issue:43673", "right": "issue:44336", "accept": false, "reason": "Generation cache/chunked_prefill regression is unrelated to ANSI codes emitted by loading_report outside a terminal." }, { "left": "issue:44112", "right": "issue:44964", "accept": false, "reason": "GraniteSpeech CI test flake and Phi-4-multimodal loading failure are different models and failure modes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 85, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43299", "issue:43526", "issue:43824", "issue:43901", "issue:43976", "issue:44075", "issue:44242", "issue:44279", "issue:44368", "issue:44462", "issue:44496", "issue:44933", "issue:44987", "issue:45042", "issue:45081", "issue:45200", "issue:45357" ], "soft_pairs": [ "issue:43976|issue:44987", "issue:43824|issue:44933", "issue:44075|issue:44368", "issue:43299|issue:45357", "issue:43976|issue:44279", "issue:44496|issue:45042", "issue:44242|issue:45200", "issue:43526|issue:43901", "issue:44462|issue:45081" ], "prepared_review_unit_hash": "c47968d6014fb6a86d87bc88f59c2efe4f0de8a6389e9c7fa7345ab5fe371270", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12652, "estimated_input_tokens": 3163, "estimated_eval_tokens": 6582 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "All soft pairs look like false positives: they share broad themes like loading, tokenizers, or Qwen, but each points to a different bug, model family, or code path. No pull requests are present, and there is no single canonical issue among these unrelated reports.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43976", "right": "issue:44987", "accept": false, "reason": "One is a Python/version-compatibility failure for Transformers 5.1.0; the other is a model-loading regression for physical-intelligence/fast. Different failure modes and subsystems." }, { "left": "issue:43824", "right": "issue:44933", "accept": false, "reason": "Both involve import errors, but for different symbols and modules: Qwen2.5-VL export vs a non-existent image_utils import. Not the same underlying bug." }, { "left": "issue:44075", "right": "issue:44368", "accept": false, "reason": "SGD optimizer arguments not being applied is unrelated to the Qwen3.5 tie_word_embeddings warning during LoRA fine-tuning." }, { "left": "issue:43299", "right": "issue:45357", "accept": false, "reason": "Both mention Qwen models, but one is a load-time MoE model breakage and the other is a save_pretrained key regression for visual encoder state. Different code paths." }, { "left": "issue:43976", "right": "issue:44279", "accept": false, "reason": "Generic dependency trouble is too broad; these describe different issues, one about Python support and one about a transformers dependency failure." }, { "left": "issue:44496", "right": "issue:45042", "accept": false, "reason": "Unrecognized model/config.json handling is unrelated to PIL image processors incorrectly requiring torchvision." }, { "left": "issue:44242", "right": "issue:45200", "accept": false, "reason": "Load-balancing loss gating with router logits is a model-loss computation bug, while mm_token_type_ids defaults for text-only fine-tuning is a separate input-default issue." }, { "left": "issue:43526", "right": "issue:43901", "accept": false, "reason": "A BeitImageProcessorFast label-reduction bug is unrelated to a documentation mismatch about return_all_scores in TextClassificationPipeline." }, { "left": "issue:44462", "right": "issue:45081", "accept": false, "reason": "Both are tokenizer-related, but one is AutoTokenizer ignoring tokenizer.json and the other is a crash in fix_mistral_regex during Mistral tokenizer patching. Different failures." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 86, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41950", "issue:43577", "issue:43650", "issue:43854", "issue:43901", "issue:44279", "issue:44297", "issue:44351", "issue:44462", "issue:44479", "issue:44496", "issue:44521", "issue:44964", "issue:45216", "issue:45245", "issue:45278", "issue:45325", "issue:45405" ], "soft_pairs": [ "issue:44521|issue:45325", "issue:43577|issue:45216", "issue:43901|issue:44496", "issue:44351|issue:45278", "issue:43854|issue:44964", "issue:44297|issue:44462", "issue:44479|issue:45245", "issue:44279|issue:45405", "issue:41950|issue:43650" ], "prepared_review_unit_hash": "a94bf2d165a4f56e6f2bfb20dcd6c7f526003a70c9a5466d6e5112014086bbc8", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13080, "estimated_input_tokens": 3270, "estimated_eval_tokens": 6796 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: it mixes unrelated bug reports about multimodal chat templates, tokenizer serialization, model loading/import errors, and versioning regressions. None of the soft pairs looks like a safe duplicate merge.", "confidence": 0.86, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:44521 is the strongest standalone issue in the set because it has the most inbound references and discussion activity, and it describes a concrete, reproducible multimodal bug; however, it does not represent the rest of the cluster well enough to serve as a true duplicate canonical.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44521", "right": "issue:45325", "accept": false, "reason": "Both involve multimodal/video behavior, but they target different failures in different code paths: assistant mask generation vs rope/position-id scaling. Not the same underlying bug." }, { "left": "issue:43577", "right": "issue:45216", "accept": false, "reason": "Different models and different defects: Blip2 dtype loading vs Qwen3.5 checkpoint saving. No shared concrete fix." }, { "left": "issue:43901", "right": "issue:44496", "accept": false, "reason": "Docs mismatch for TextClassificationPipeline is unrelated to an unrecognized-model/config loading error for Olmo-Hybrid-Instruct. Different subsystems and symptoms." }, { "left": "issue:44351", "right": "issue:45278", "accept": false, "reason": "Both mention import errors, but one is a specific missing symbol and the other is a broad post-upgrade breakage report. Too generic to treat as the same bug." }, { "left": "issue:43854", "right": "issue:44964", "accept": false, "reason": "Both are model-load failures, but for different models and likely different compatibility issues. Not enough evidence of one shared root cause." }, { "left": "issue:44297", "right": "issue:44462", "accept": false, "reason": "Tokenizer save_pretrained writing the wrong tokenizer_class is not the same as AutoTokenizer ignoring tokenizer.json from a repo. Related area, but different concrete bugs." }, { "left": "issue:44479", "right": "issue:45245", "accept": false, "reason": "A video-input regression for specific Qwen VL models is unrelated to a categories-limit runtime error. Different failure mode and code path." }, { "left": "issue:44279", "right": "issue:45405", "accept": false, "reason": "A vague dependency issue is not the same as a specific MIN_PEFT_VERSION bump to an unreleased PyPI version. No clear duplicate relationship." }, { "left": "issue:41950", "right": "issue:43650", "accept": false, "reason": "Video-classification pipeline processor lookup and a vague 'ADD THE DATA' report do not describe the same bug or change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 87, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38175", "issue:39692", "issue:41950", "issue:43295", "issue:43299", "issue:43479", "issue:43525", "issue:43854", "issue:43901", "issue:43976", "issue:44462", "issue:44496", "issue:44617", "issue:44779", "issue:44877", "issue:45020", "issue:45405" ], "soft_pairs": [ "issue:41950|issue:45020", "issue:44462|issue:44779", "issue:43976|issue:45405", "issue:43479|issue:43525", "issue:38175|issue:44617", "issue:44496|issue:44877", "issue:43295|issue:44779", "issue:43299|issue:43854", "issue:39692|issue:43901" ], "prepared_review_unit_hash": "00d290caf864b237cbae5eab55898b4a16fd9a264f612ffe96d94a31b199a570", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12486, "estimated_input_tokens": 3122, "estimated_eval_tokens": 6500 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This set is not a true duplicate cluster; it contains several unrelated Transformers issues that only share broad thematic similarity (tokenizers, model loading, configs, docs, regressions). None of the soft edges look like the same underlying bug/change.", "confidence": 0.91, "canonical_issue_reason": "No single issue cleanly represents the set because the items span distinct root causes and subsystems; treating any one as canonical would incorrectly merge unrelated reports.", "canonical_pr_reason": null, "best_issue_reason": "No issue is a good global canonical candidate. If forced, the broadest/highest-level report is the remote_code regression, but it still does not subsume the other bugs.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41950", "right": "issue:45020", "accept": false, "reason": "Different failures: one is a video-classification processor lookup bug, the other is a broad remote_code regression. Same general area, not the same concrete defect." }, { "left": "issue:44462", "right": "issue:44779", "accept": false, "reason": "Both concern tokenizers, but one is about AutoTokenizer ignoring repository tokenizer.json and the other is Deepseek tokenization correctness in v5; different code paths and likely different fixes." }, { "left": "issue:43976", "right": "issue:45405", "accept": false, "reason": "Unrelated problems: Python version compatibility vs an unreleased PEFT version bump. No shared underlying bug." }, { "left": "issue:43479", "right": "issue:43525", "accept": false, "reason": "Both are config-related, but one is multimodal default sub-config initialization and the other is a missing pad_token_id attribute on Llama4Config; different root causes." }, { "left": "issue:38175", "right": "issue:44617", "accept": false, "reason": "Completely different symptoms and subsystems: SigLIP2 zero probabilities vs SAM3Video CUDA OOM." }, { "left": "issue:44496", "right": "issue:44877", "accept": false, "reason": "Both involve loading models from config, but one is missing model_type and the other is strict config handling for granite_speech; not the same bug." }, { "left": "issue:43295", "right": "issue:44779", "accept": false, "reason": "Processor/tokenizer regression vs Deepseek tokenizer output bug. They touch tokenizers but not the same concrete failure." }, { "left": "issue:43299", "right": "issue:43854", "accept": false, "reason": "Different models and load failures (Qwen3VL MoE vs GLM-4.7-Flash). Similar class of issue, not the same underlying change." }, { "left": "issue:39692", "right": "issue:43901", "accept": false, "reason": "Both mention docs, but one is a SigLIP2 example/model mismatch with quantization failure and the other is TextClassificationPipeline return_all_scores docs drift. Different topics and fixes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 88, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42994", "issue:43065", "issue:43317", "issue:43441", "issue:43450", "issue:43550", "issue:43650", "issue:43723", "issue:44016", "issue:44162", "issue:44261", "issue:44263", "issue:44279", "issue:44291", "issue:44683", "issue:44898", "issue:44964", "issue:45290" ], "soft_pairs": [ "issue:43441|issue:44683", "issue:43723|issue:44162", "issue:43450|issue:44279", "issue:44016|issue:44291", "issue:42994|issue:43317", "issue:43550|issue:44964", "issue:43650|issue:45290", "issue:43065|issue:44898", "issue:44261|issue:44263" ], "prepared_review_unit_hash": "96de49bec7aa3d69a8f6bee8e42e9e7fb1a2c80d106048284dcf1450a4d3a9ab", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 12939, "estimated_input_tokens": 3235, "estimated_eval_tokens": 6726 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This set does not look like a single duplicate cluster. The soft-similarity pairs mostly share only broad transformer/model-loading themes, but they involve different models, code paths, and failure modes, so none should be merged as duplicates.", "confidence": 0.68, "canonical_issue_reason": "Issue 44291 is the most concrete and triage-ready report: it has a clear exception, a version boundary (transformers >= 5.0.0rc0), and a specific loading path (`init_empty_weights`) that makes it a good anchor if one issue must represent this loose set.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44291 is the strongest representative overall because it is narrowly scoped, reproducible, and clearly tied to a specific regression rather than a vague or model-specific failure.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43441", "right": "issue:44683", "accept": false, "reason": "Both involve attention backends, but one is a Ministral-3 FlashAttention failure in Transformers v5 RC and the other is a compiled flex_attention failure on torch >= 2.9. Different backend, version trigger, and code path." }, { "left": "issue:43723", "right": "issue:44162", "accept": false, "reason": "Tokenizer loading in `AutoTokenizer.from_pretrained` is unrelated to the ESM2 workflow breakage; these point to different components and symptoms." }, { "left": "issue:43450", "right": "issue:44279", "accept": false, "reason": "A video processor shape bug and a vague dependency issue are not the same underlying problem; there is no shared concrete code path." }, { "left": "issue:44016", "right": "issue:44291", "accept": false, "reason": "Notebook syntax error is unrelated to the `init_empty_weights` TypeError when loading a model in v5 RC." }, { "left": "issue:42994", "right": "issue:43317", "accept": false, "reason": "Both are quantization-adjacent, but one is about saving a quantized model and the other about loading a dequantized model with `device_map=auto` and CPU/GPU offload; different operations and failure modes." }, { "left": "issue:43550", "right": "issue:44964", "accept": false, "reason": "These are different model-specific failures: Bamba-9B-v2 with torch.compile/SDPA versus Phi-4 multimodal loading on latest transformers." }, { "left": "issue:43650", "right": "issue:45290", "accept": false, "reason": "`ADD THE DATA` is not a technical bug report and does not match the chat-template tool-call crash." }, { "left": "issue:43065", "right": "issue:44898", "accept": false, "reason": "Dummy Conv2d in Sam3PixelDecoder and Perceiver image classification interpolation are different model implementations and different bug classes." }, { "left": "issue:44261", "right": "issue:44263", "accept": false, "reason": "A missing `rms_norm_eps` precision/config issue is unrelated to the `torch.split()` return-value bug in GlmMoeDsaIndexer; only superficial ML terminology overlaps." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 89, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42175", "issue:43122", "issue:43262", "issue:43352", "issue:43650", "issue:43673", "issue:43761", "issue:43976", "issue:44466", "issue:44521", "issue:44623", "issue:44671", "issue:44749", "issue:44779", "issue:45250", "issue:45276", "issue:45397" ], "soft_pairs": [ "issue:43262|issue:44521", "issue:44466|issue:45276", "issue:44671|issue:44779", "issue:43122|issue:43650", "issue:43673|issue:43761", "issue:43352|issue:45250", "issue:44623|issue:44749", "issue:44623|issue:45397", "issue:42175|issue:43976" ], "prepared_review_unit_hash": "b2850710b49cbe846b3d59db549dccbc8a0bf3308e4517ce6d1136fa3ceb5e36", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12491, "estimated_input_tokens": 3123, "estimated_eval_tokens": 6502 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly heterogeneous; the soft pairs share surface terms but do not look like the same underlying bug/change. No PRs are present. The strongest single issue candidate is the open `apply_chat_template` multimodal bug, but it is not a duplicate of the other items.", "confidence": 0.88, "canonical_issue_reason": "`issue:44521` is the most central and actionable issue in the set: it is concrete, open, well-discussed, and has the most inbound references.", "canonical_pr_reason": null, "best_issue_reason": "`issue:44521` is the best representative because it is a specific regression with clear reproduction and the strongest evidence/activity among the issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43262", "right": "issue:44521", "accept": false, "reason": "Both involve `apply_chat_template`, but one is about audio sampling-rate defaults and the other about all-zero assistant masks for multimodal inputs; different bugs and code paths." }, { "left": "issue:44466", "right": "issue:45276", "accept": false, "reason": "Both touch weights/embeddings, but one is inconsistent `lm_head.weight` serialization and the other is `resize_token_embeddings` not updating Gemma-4 embedding/output layers; not the same defect." }, { "left": "issue:44671", "right": "issue:44779", "accept": false, "reason": "Both are v5 tokenization/model-output regressions, but CamemBERT masked-LM predictions and DeepSeek tokenizer output are separate model-specific issues." }, { "left": "issue:43122", "right": "issue:43650", "accept": false, "reason": "`Different tokenization...` and `ADD THE DATA` do not describe the same bug or change; no meaningful overlap." }, { "left": "issue:43673", "right": "issue:43761", "accept": false, "reason": "`GenerationMixin` cache missing during `chunked_prefill` and CLIPVision hidden-states being `None` are unrelated code paths and symptoms." }, { "left": "issue:43352", "right": "issue:45250", "accept": false, "reason": "One is a model-specific Flash Attention 2 support error, the other is a generic Flash Attention 2 issue; too broad and not the same concrete fix." }, { "left": "issue:44623", "right": "issue:44749", "accept": false, "reason": "`processor.save_pretrained(...) missing files` and a slowdown when filtering data after upgrade are unrelated problems." }, { "left": "issue:44623", "right": "issue:45397", "accept": false, "reason": "Missing files in `processor.save_pretrained` is a packaging/persistence bug; `gemma-4 zero3 from_pretrained` is a different loading/configuration issue." }, { "left": "issue:42175", "right": "issue:43976", "accept": false, "reason": "TensorFlow omitted from the `torch` extra and Python 3.9/3.10 compatibility are different installation/runtime problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 90, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41628", "issue:41950", "issue:43295", "issue:43475", "issue:43525", "issue:43824", "issue:43881", "issue:43901", "issue:43976", "issue:43994", "issue:44016", "issue:44188", "issue:44220", "issue:44246", "issue:44336", "issue:44589", "issue:44617", "issue:45362" ], "soft_pairs": [ "issue:43901|issue:43994", "issue:43295|issue:44617", "issue:43824|issue:43976", "issue:44246|issue:44617", "issue:43475|issue:44016", "issue:43525|issue:43881", "issue:44589|issue:45362", "issue:44188|issue:44336", "issue:41950|issue:44220", "issue:41628|issue:43976", "issue:43901|issue:44246" ], "prepared_review_unit_hash": "2b4222f450fa96735905ac6461c3265826bf6b7f5ffc7773454a3d1d88685bc3", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13535, "estimated_input_tokens": 3384, "estimated_eval_tokens": 7024 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: import errors, model-loading issues, pipeline regressions, docs bugs, kernel divergence, and notebook/runtime errors. None of the soft pairs look like the same underlying bug or a plausible single PR fix.", "confidence": 0.94, "canonical_issue_reason": null, "canonical_pr_reason": "No pull requests are present in this cluster.", "best_issue_reason": "No single issue cleanly represents the cluster because the items span unrelated failure modes and subsystems.", "best_pr_reason": "No pull requests are present in this cluster.", "soft_edge_verdicts": [ { "left": "issue:43901", "right": "issue:43994", "accept": false, "reason": "Docs about return_all_scores behavior do not match a SigLIP2 model/pipeline correctness issue." }, { "left": "issue:43295", "right": "issue:44617", "accept": false, "reason": "A processor.tokenizer regression is unrelated to a Sam3Video CUDA OOM report." }, { "left": "issue:43824", "right": "issue:43976", "accept": false, "reason": "One is a missing model import; the other is a Python version compatibility problem." }, { "left": "issue:44246", "right": "issue:44617", "accept": false, "reason": "Import latency is not the same as a video model out-of-memory failure." }, { "left": "issue:43475", "right": "issue:44016", "accept": false, "reason": "A missing SAM3 attribute and a notebook syntax error are unrelated issues." }, { "left": "issue:43525", "right": "issue:43881", "accept": false, "reason": "Missing Llama4Config.pad_token_id is a different code-path than glm-4v-9b loading failure." }, { "left": "issue:44589", "right": "issue:45362", "accept": false, "reason": "A storage-object lookup error is not the same as Qwen3.5 chat crashing." }, { "left": "issue:44188", "right": "issue:44336", "accept": false, "reason": "torch.compile attention-kernel divergence is unrelated to ANSI code emission in loading_report." }, { "left": "issue:41950", "right": "issue:44220", "accept": false, "reason": "Video-classification image-processor lookup and _torch_extract_fbank_features() involve different pipeline paths." }, { "left": "issue:41628", "right": "issue:43976", "accept": false, "reason": "An AutoImageProcessor import error is not the same as a package Python-version support issue." }, { "left": "issue:43901", "right": "issue:44246", "accept": false, "reason": "Pipeline documentation mismatch and slow import behavior are distinct problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 91, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36246", "issue:42491", "issue:43299", "issue:43531", "issue:44246", "issue:44295", "issue:44297", "issue:44336", "issue:44351", "issue:44361", "issue:44373", "issue:44462", "issue:44496", "issue:44749", "issue:45356", "issue:45362", "issue:45397" ], "soft_pairs": [ "issue:44297|issue:45397", "issue:44462|issue:45356", "issue:44361|issue:44749", "issue:44336|issue:44373", "issue:42491|issue:43299", "issue:44246|issue:44496", "issue:36246|issue:44351", "issue:44295|issue:45397", "issue:43531|issue:45362" ], "prepared_review_unit_hash": "b321dcaddb5bf4aa99fb56c9296140fe2c797431160416f62d00894199c6a555", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12585, "estimated_input_tokens": 3147, "estimated_eval_tokens": 6550 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The set is heterogeneous; none of the proposed soft edges look like true duplicates. Most pairs share only a broad model/tokenizer/import theme, but the concrete bugs and code paths differ. No PRs are present.", "confidence": 0.93, "canonical_issue_reason": "Issue 45356 is the strongest self-contained bug report in the group: it has a clear regression window, a specific affected model family, and a concrete tokenizer failure mode.", "canonical_pr_reason": null, "best_issue_reason": "45356 is the best representative issue because it is narrowly scoped, reproducible, and describes a specific regression rather than a vague symptom or unrelated maintenance issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44297", "right": "issue:45397", "accept": false, "reason": "Tokenizer_config class mismatch and Gemma-4 zero3 from_pretrained are different failures affecting different code paths." }, { "left": "issue:44462", "right": "issue:45356", "accept": false, "reason": "AutoTokenizer ignoring tokenizer.json is not the same bug as the Kimi-K2.5 codec/regex regression." }, { "left": "issue:44361", "right": "issue:44749", "accept": false, "reason": "MLukeTokenizer AttributeError and slow data filtering after upgrade are unrelated problems." }, { "left": "issue:44336", "right": "issue:44373", "accept": false, "reason": "ANSI codes in loading_report and a wrong docstring for position_ids are unrelated documentation/logging issues." }, { "left": "issue:42491", "right": "issue:43299", "accept": false, "reason": "Both mention Qwen MoE, but one is LoRA compatibility across versions and the other is Qwen3VL MoE loading; not the same concrete bug." }, { "left": "issue:44246", "right": "issue:44496", "accept": false, "reason": "Import latency and missing model_type handling are different issues with different causes." }, { "left": "issue:36246", "right": "issue:44351", "accept": false, "reason": "Missing Qwen2_5_VLImageProcessor import and missing HybridCache import are distinct symbol/export problems." }, { "left": "issue:44295", "right": "issue:45397", "accept": false, "reason": "A position_ids buffer read error and a Gemma-4 zero3 from_pretrained crash do not share the same underlying failure." }, { "left": "issue:43531", "right": "issue:45362", "accept": false, "reason": "Sliding_window behavior in Qwen3-MoE is a different issue from Qwen3.5-35B chat crashes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 92, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36246", "issue:43066", "issue:43526", "issue:43596", "issue:43673", "issue:43994", "issue:44162", "issue:44242", "issue:44336", "issue:44485", "issue:44561", "issue:44779", "issue:44857", "issue:44987", "issue:45003", "issue:45200", "issue:45276", "issue:45459" ], "soft_pairs": [ "issue:43526|issue:45003", "issue:43994|issue:45276", "issue:44336|issue:44485", "issue:43673|issue:44561", "issue:44242|issue:44857", "issue:44162|issue:45200", "issue:43066|issue:44779", "issue:44987|issue:45459", "issue:36246|issue:43596" ], "prepared_review_unit_hash": "eb9bd38da2afb356e804e283eacdaf1aa177d36ee9ae9ec9b09fe1b6bb2b527f", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13180, "estimated_input_tokens": 3295, "estimated_eval_tokens": 6846 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: the soft pairs share only broad Transformer/v5 or model-loading vocabulary, but they describe distinct bugs, models, and code paths. I would not merge any of the soft pairs as duplicates.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:44987 is the clearest standalone user-facing regression in this set: it has a concrete version boundary (>=5.1.0), a specific failure mode, and a reproducible loading path. It is the best representative of the broader \"v5 loading breakage\" theme, but not a duplicate of the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43526", "right": "issue:45003", "accept": false, "reason": "Different bugs: one is BeitImageProcessorFast returning the wrong label reduction result, the other is unsafe sys.modules access in modeling_utils. No shared code path or symptom." }, { "left": "issue:43994", "right": "issue:45276", "accept": false, "reason": "Different model families and failures: SigLIP produces bad outputs via AutoModel/pipeline, while Gemma 4 resize_token_embeddings does not propagate to per-layer/output embeddings." }, { "left": "issue:44336", "right": "issue:44485", "accept": false, "reason": "ANSI escape codes in loading_report when not attached to a terminal is unrelated to GLM-5 RoPE implementation discussion." }, { "left": "issue:43673", "right": "issue:44561", "accept": false, "reason": "Both are v5 regressions, but the concrete problems differ: missing GenerationMixin cache during chunked_prefill versus removal of is_torch_fx_available breaking trust_remote_code models." }, { "left": "issue:44242", "right": "issue:44857", "accept": false, "reason": "Different subsystems and symptoms: MoE load-balancing loss gating on output_router_logits versus a LwDetrImageLoss AMP/CUDA crash." }, { "left": "issue:44162", "right": "issue:45200", "accept": false, "reason": "Unrelated model-specific issues: ESM2 is broken broadly, while Gemma 4 requires mm_token_type_ids defaulting for text-only fine-tuning." }, { "left": "issue:43066", "right": "issue:44779", "accept": false, "reason": "Both mention tokenizers and v5, but one is about decoder type selection in Transformers v5 and the other is Deepseek tokenizer output corruption; different model-specific bugs." }, { "left": "issue:44987", "right": "issue:45459", "accept": false, "reason": "One is a loading failure for physical-intelligence/fast on transformers>=5.1.0; the other is tokenizer error handling that hides protobuf-related decode errors. Different failure points and remedies." }, { "left": "issue:36246", "right": "issue:43596", "accept": false, "reason": "Completely unrelated: Qwen2_5_VLImageProcessor import error versus a deepspeed zero3/BertModel index error." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 93, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41628", "issue:41950", "issue:42886", "issue:43232", "issue:43441", "issue:43540", "issue:44246", "issue:44295", "issue:44336", "issue:44749", "issue:44779", "issue:44871", "issue:44877", "issue:44995", "issue:45200", "issue:45229", "issue:45292", "issue:45375" ], "soft_pairs": [ "issue:44877|issue:45375", "issue:43441|issue:43540", "issue:42886|issue:45229", "issue:41628|issue:41950", "issue:44246|issue:44871", "issue:44995|issue:45200", "issue:44779|issue:45292", "issue:44295|issue:44749", "issue:43232|issue:44336" ], "prepared_review_unit_hash": "4530a47770da07c9b3520eae6e6c34dccc697c2ef410c1559e9e496ed0c43bbf", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13022, "estimated_input_tokens": 3256, "estimated_eval_tokens": 6768 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The soft pairs are thematically similar but each points to a different failure mode, model, or subsystem. None look like true duplicate bugs or mergeable PR-equivalents.", "confidence": 0.82, "canonical_issue_reason": "If one issue must represent the cluster, #44779 is the clearest standalone regression report: it names a specific model family, a version boundary (v4 to v5), and a concrete correctness failure in tokenizer behavior.", "canonical_pr_reason": null, "best_issue_reason": "#44779 is the most concrete and reproducible issue in the set, with a narrowly scoped behavior regression and clear expected-vs-actual semantics.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44877", "right": "issue:45375", "accept": false, "reason": "Both concern config strictness, but they hit different models and different missing-field symptoms; this looks like related theme, not the same underlying bug." }, { "left": "issue:43441", "right": "issue:43540", "accept": false, "reason": "Different code paths and model families: one is FlashAttention failure for Ministral-3, the other is video input handling in Qwen3OmniMoe." }, { "left": "issue:42886", "right": "issue:45229", "accept": false, "reason": "Tokenizer offline cache loading and Gemma4 multi-GPU CUDA OOM are unrelated failures with no shared concrete bug." }, { "left": "issue:41628", "right": "issue:41950", "accept": false, "reason": "One is an import/API exposure problem, the other is a video-classification pipeline processor-selection bug; similar terminology, different underlying issue." }, { "left": "issue:44246", "right": "issue:44871", "accept": false, "reason": "Import-time slowness and Gemma eos_token_id inconsistency are separate concerns with no common code-path bug." }, { "left": "issue:44995", "right": "issue:45200", "accept": false, "reason": "A stale cache crash on second forward pass is not the same as missing default mm_token_type_ids for text-only fine-tuning." }, { "left": "issue:44779", "right": "issue:45292", "accept": false, "reason": "Tokenizer correctness regression and resize_token_embeddings not updating output embeddings are different tokenizer/model-state bugs." }, { "left": "issue:44295", "right": "issue:44749", "accept": false, "reason": "Position_ids buffer access error and a performance regression in data filtering are unrelated symptoms and fixes." }, { "left": "issue:43232", "right": "issue:44336", "accept": false, "reason": "Generation kwargs handling after sync_gpus and ANSI leakage in loading_report are entirely different subsystems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 94, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43065", "issue:43257", "issue:43262", "issue:43295", "issue:43408", "issue:43475", "issue:43540", "issue:43901", "issue:44060", "issue:44077", "issue:44246", "issue:44496", "issue:44933", "issue:45003", "issue:45137", "issue:45292", "issue:45459" ], "soft_pairs": [ "issue:43901|issue:45003", "issue:43065|issue:43408", "issue:43262|issue:44246", "issue:43257|issue:45137", "issue:44496|issue:45003", "issue:43475|issue:43540", "issue:43295|issue:45292", "issue:44933|issue:45459", "issue:44060|issue:44077" ], "prepared_review_unit_hash": "610163cd1f96564331420b744525b108ca34a1a12173ec471281113244bf702f", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12533, "estimated_input_tokens": 3134, "estimated_eval_tokens": 6524 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster appears heterogeneous: the soft-linked pairs share broad jargon or subsystem names, but each describes a different bug, warning, or docs issue rather than the same underlying change. No canonical duplicate stands out.", "confidence": 0.94, "canonical_issue_reason": "No canonical issue: the items span unrelated problems across SAM3, Qwen, audio processors, import behavior, and docs, and the soft pairs do not describe the same concrete bug.", "canonical_pr_reason": null, "best_issue_reason": "No single issue is a good global representative because the cluster is not a true duplicate set; the soft links are only superficially similar.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43901", "right": "issue:45003", "accept": false, "reason": "One is a pipeline docs mismatch about `return_all_scores`; the other is unsafe `sys.modules` access in `modeling_utils`. Different symptoms and code paths." }, { "left": "issue:43065", "right": "issue:43408", "accept": false, "reason": "Both mention SAM3, but one is about a dummy `nn.Conv2d` in the pixel decoder and the other is a model-type/config mismatch warning. Not the same bug." }, { "left": "issue:43262", "right": "issue:44246", "accept": false, "reason": "Audio processor sampling-rate defaults and slow `import transformers` are unrelated issues." }, { "left": "issue:43257", "right": "issue:45137", "accept": false, "reason": "Both involve DeepSpeed, but one is Qwen3 MOE weight conversion during loading and the other is a ZeRO3 deque underflow. Different failures and fix paths." }, { "left": "issue:44496", "right": "issue:45003", "accept": false, "reason": "An unrecognized model/config error is unrelated to unsafe `sys.modules` access." }, { "left": "issue:43475", "right": "issue:43540", "accept": false, "reason": "Different models and different errors: missing `fpn_position_embeddings` in SAM3 Video vs video-processing `ValueError` in Qwen3OmniMoe." }, { "left": "issue:43295", "right": "issue:45292", "accept": false, "reason": "Custom processor/tokenizer regression is unrelated to `resize_token_embeddings` not updating output embeddings." }, { "left": "issue:44933", "right": "issue:45459", "accept": false, "reason": "Both concern import-related behavior, but one is a nonexistent image_utils import and the other is protobuf-decoder error masking in tokenization; not the same bug." }, { "left": "issue:44060", "right": "issue:44077", "accept": false, "reason": "One is a tied-weights warning in Qwen3-Next, the other is a `patchtsmixer` `post_init` policy issue. No shared code-path problem." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 95, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43232", "issue:43441", "issue:43606", "issue:43646", "issue:43653", "issue:43673", "issue:43931", "issue:44162", "issue:44368", "issue:44442", "issue:44466", "issue:44734", "issue:44749", "issue:44869", "issue:44977", "issue:45278", "issue:45335", "issue:45356" ], "soft_pairs": [ "issue:44368|issue:44977", "issue:44466|issue:45335", "issue:44869|issue:45356", "issue:43673|issue:44734", "issue:43653|issue:44749", "issue:44442|issue:44749", "issue:43232|issue:43673", "issue:43931|issue:44368", "issue:43441|issue:43606", "issue:43673|issue:45278", "issue:43646|issue:44162" ], "prepared_review_unit_hash": "5421f5fc13eefea1d1ee4317795f1b13c84bbd0b71217cbc7aed137ef9940712", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13889, "estimated_input_tokens": 3473, "estimated_eval_tokens": 7202 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is highly heterogeneous: most items are distinct regression reports across different models, tokenizers, generation paths, and upgrade symptoms. No soft pair looks like the same underlying bug or change, so I would not merge any of the candidate edges.", "confidence": 0.88, "canonical_issue_reason": "If a representative issue must be chosen, #45278 is the broadest post-upgrade regression report and the most plausible umbrella item. That said, the cluster does not form a true duplicate set.", "canonical_pr_reason": null, "best_issue_reason": "#45278 is the best single issue to anchor the cluster because it is the most general and closest to a cross-cutting upgrade regression. It is still not a true duplicate hub for the other reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44368", "right": "issue:44977", "accept": false, "reason": "Different models and failure modes: a tied-embeddings warning during LoRA fine-tuning vs flash-attention generation failure. Same general area, not the same bug." }, { "left": "issue:44466", "right": "issue:45335", "accept": false, "reason": "Both involve embeddings, but one is about inconsistent lm_head serialization/tied weights and the other about resize_token_embeddings not updating decoder.embed_tokens. Different concrete code paths." }, { "left": "issue:44869", "right": "issue:45356", "accept": false, "reason": "Tokenizer-related issues, but one crashes Whisper word-timestamp decoding on a trailing replacement character while the other is a Kimi-K2.5 codec/warning regression. Not the same bug." }, { "left": "issue:43673", "right": "issue:44734", "accept": false, "reason": "Both touch generation/cache behavior, but one is missing cache in chunked_prefill and the other is a KV-cache continuation indexing crash in serve responses. Different failure points and fixes." }, { "left": "issue:43653", "right": "issue:44749", "accept": false, "reason": "Unrelated topics: BigBirdTokenizer special-token decode behavior vs a Chinese performance regression after upgrade." }, { "left": "issue:44442", "right": "issue:44749", "accept": false, "reason": "AutoTokenizer loading failure for a specific tokenizer is unrelated to a broad slowdown regression." }, { "left": "issue:43232", "right": "issue:43673", "accept": false, "reason": "Both concern generation internals, but one is about _update_model_kwargs_for_generation after sync_gpus and the other about cache missing in chunked_prefill. Not the same underlying defect." }, { "left": "issue:43931", "right": "issue:44368", "accept": false, "reason": "A Qwen3-VL weight-shape mismatch is unrelated to a tied-embeddings warning in Qwen3.5 LoRA fine-tuning." }, { "left": "issue:43441", "right": "issue:43606", "accept": false, "reason": "Different models and different symptoms: FlashAttention failure for Ministral-3 vs device mismatch under CPU offload for bark-small." }, { "left": "issue:43673", "right": "issue:45278", "accept": false, "reason": "Cache-generation bug vs generic import errors after upgrade; too broad and not the same code-path problem." }, { "left": "issue:43646", "right": "issue:44162", "accept": false, "reason": "Custom model initialization breakage in Transformers v5 is not the same as the ESM2-specific broken workflow report." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 96, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:28282", "issue:30990", "issue:36246", "issue:42371", "issue:42673", "issue:42898", "issue:43381", "issue:43475", "issue:43704", "issue:43824", "issue:44162", "issue:44297", "issue:44623", "issue:45092", "issue:45292", "issue:45335", "issue:45381" ], "soft_pairs": [ "issue:42673|issue:43704", "issue:44297|issue:44623", "issue:43381|issue:45292", "issue:28282|issue:30990", "issue:42898|issue:44162", "issue:43381|issue:45335", "issue:36246|issue:43824", "issue:43475|issue:45381", "issue:42371|issue:45092" ], "prepared_review_unit_hash": "f0bf3d06fc3a3c45e0cfc5af4eaad0aab7704b1a6c57f09e83d0a0a58f2e7c0e", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12573, "estimated_input_tokens": 3144, "estimated_eval_tokens": 6544 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is largely heterogeneous, with most pairs clearly unrelated. The only strong duplicate is the repeated Qwen3ForCausalLM VRAM leak report, and there is also a plausible duplicate pair in the Qwen2.5-VL import-error reports.", "confidence": 0.74, "canonical_issue_reason": "issue:42673 is the best canonical issue because issue:43704 is an exact title duplicate of the same Qwen3ForCausalLM VRAM leak bug, and 42673 has much more discussion/context.", "canonical_pr_reason": null, "best_issue_reason": "issue:42673 is the strongest representative of the only clear duplicate thread in this set; the rest of the cluster is too mixed to provide a better global anchor.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42673", "right": "issue:43704", "accept": true, "reason": "Exact same bug and same title: Qwen3ForCausalLM leaking VRAM across multiple dataloader threads." }, { "left": "issue:44297", "right": "issue:44623", "accept": false, "reason": "Both involve save_pretrained, but one is a tokenizer_config mismatch and the other is missing processor files; different failures and likely different fixes." }, { "left": "issue:43381", "right": "issue:45292", "accept": false, "reason": "Gradient checkpointing in eval mode is unrelated to resize_token_embeddings not updating output_embeddings." }, { "left": "issue:28282", "right": "issue:30990", "accept": false, "reason": "One is a missing PyTorch ImportError; the other is a loading hang in sentence transformers. Different underlying problems." }, { "left": "issue:42898", "right": "issue:44162", "accept": false, "reason": "clean_up_tokenization_spaces v5 behavior change is unrelated to the ESM2 breakage report." }, { "left": "issue:43381", "right": "issue:45335", "accept": false, "reason": "Different model behaviors: eval-mode gradient checkpointing versus decoder.embed_tokens not updating after resize_token_embeddings." }, { "left": "issue:36246", "right": "issue:43824", "accept": true, "reason": "Both are Qwen2.5-VL import errors from the transformers package/module export path, and could plausibly be fixed together as the same missing-export issue." }, { "left": "issue:43475", "right": "issue:45381", "accept": false, "reason": "SAM 3 video missing attribute and Qwen2.5-VL video position-id bug are different model-specific issues." }, { "left": "issue:42371", "right": "issue:45092", "accept": false, "reason": "TF32 API settings guidance and InternVL2 meta-initialization incompatibility are not the same bug or change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 97, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41628", "issue:42222", "issue:42994", "issue:43452", "issue:43475", "issue:43525", "issue:43643", "issue:43873", "issue:44038", "issue:44623", "issue:44933", "issue:44945", "issue:45003", "issue:45081", "issue:45103", "issue:45200", "issue:45310", "issue:45375" ], "soft_pairs": [ "issue:45081|issue:45103", "issue:43452|issue:45003", "issue:41628|issue:44933", "issue:43475|issue:43643", "issue:42222|issue:45200", "issue:42994|issue:43525", "issue:45310|issue:45375", "issue:44038|issue:44623", "issue:43873|issue:44945" ], "prepared_review_unit_hash": "c831c3755d51cfe87f996ee9722baa46d36dc044d777ff2a36b0caef0e05c123", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13039, "estimated_input_tokens": 3260, "estimated_eval_tokens": 6776 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the soft pairs share superficial error patterns or model-family overlap, but they do not look like the same underlying bug/change. No PRs are present. If a single issue must represent the cluster, the broadest and most connected report is the quantization/offloading issue.", "confidence": 0.87, "canonical_issue_reason": "issue:43873 is the most broadly scoped and most referenced item in the set, with active discussion and inbound references; it reads like a central runtime behavior bug rather than a narrow model-specific symptom.", "canonical_pr_reason": null, "best_issue_reason": "issue:43873 is the best cluster representative because it is general, open, and discussion-heavy, but it should not be treated as a true duplicate anchor for the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45081", "right": "issue:45103", "accept": false, "reason": "Both are AttributeError crashes, but they affect different code paths: Mistral tokenizer regex patching vs auto-docstring kwargs processing." }, { "left": "issue:43452", "right": "issue:45003", "accept": false, "reason": "One is a GGUF loading regression in from_pretrained; the other is unsafe sys.modules access in modeling_utils. Related import/load surface, but not the same bug." }, { "left": "issue:41628", "right": "issue:44933", "accept": false, "reason": "Both mention missing imports, but one is about AutoImageProcessor export from transformers and the other is a different image_utils import problem." }, { "left": "issue:43475", "right": "issue:43643", "accept": false, "reason": "Both involve missing attributes/fields, but SAM 3 video output and trust_remote_code AutoConfig object truncation are distinct failures." }, { "left": "issue:42222", "right": "issue:45200", "accept": false, "reason": "Different models and symptoms: vitpose breakage vs Gemma 4 token-type defaults for text-only fine-tuning." }, { "left": "issue:42994", "right": "issue:43525", "accept": false, "reason": "Quantized model saving failure and Llama4Config missing pad_token_id are separate configuration/runtime issues." }, { "left": "issue:45310", "right": "issue:45375", "accept": false, "reason": "Both are Qwen3.5-related, but one is a generic from_pretrained error and the other is a specific vision-config field being dropped by @strict." }, { "left": "issue:44038", "right": "issue:44623", "accept": false, "reason": "Both touch Qwen3-VL-Moe/processor flows, but one is a model compatibility bug and the other is missing files on processor.save_pretrained." }, { "left": "issue:43873", "right": "issue:44945", "accept": false, "reason": "Quantization/offloading behavior and incorrect pipeline-parallel LLM output are unrelated code-path problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 98, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36010", "issue:41628", "issue:42831", "issue:42994", "issue:43232", "issue:43475", "issue:43525", "issue:44164", "issue:44188", "issue:44222", "issue:44351", "issue:44661", "issue:45230", "issue:45310", "issue:45362", "issue:45375", "issue:45397" ], "soft_pairs": [ "issue:45230|issue:45397", "issue:43475|issue:44661", "issue:45362|issue:45375", "issue:43232|issue:44188", "issue:41628|issue:44351", "issue:44222|issue:45310", "issue:42831|issue:44222", "issue:42994|issue:44164", "issue:43525|issue:45375", "issue:36010|issue:41628", "issue:45310|issue:45397" ], "prepared_review_unit_hash": "27cb39da0cc7de17e7978e9c54fe85b219d997650d7e4ec0ae5a75fb5ed6d9e9", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 11, "serialized_chars": 12835, "estimated_input_tokens": 3209, "estimated_eval_tokens": 6674 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is not a true duplicate set: it mixes unrelated import/export regressions, generation internals, model-specific config bugs, and serialization/save-load failures. No soft edge pair is the same underlying bug/change closely enough to merge.", "confidence": 0.92, "canonical_issue_reason": "No single canonical issue fits the whole cluster; the items are semantically heterogeneous. If forced to pick a representative anchor, it would only be for the save/from_pretrained subset, not the full cluster.", "canonical_pr_reason": null, "best_issue_reason": "issue:44164 is the most concrete, reusable bug report in the serialization/save-load subset, with a clear failure mode and broader applicability than the model-specific reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45230", "right": "issue:45397", "accept": false, "reason": "Both are bugs, but one is an unspecific report and the other is a ZeRO3/from_pretrained failure for Gemma-4; no shared code-path evidence." }, { "left": "issue:43475", "right": "issue:44661", "accept": false, "reason": "SAM 3 Video missing attribute and add-new-model-like tokenizer mapping failure are different model/tooling bugs, not one underlying change." }, { "left": "issue:45362", "right": "issue:45375", "accept": false, "reason": "Both mention Qwen3.5, but one is a chat crash and the other is a config field dropped by @strict; different failure modes." }, { "left": "issue:43232", "right": "issue:44188", "accept": false, "reason": "Generation kwargs update after sync_gpus and attention-kernel divergence under torch.compile are separate generation/compile issues." }, { "left": "issue:41628", "right": "issue:44351", "accept": false, "reason": "Import errors for different symbols; they may share a packaging theme, but not the same missing export or fix." }, { "left": "issue:44222", "right": "issue:45310", "accept": false, "reason": "FP8 save_pretrained for MoE vs Qwen3.5 MoE from_pretrained error are related only at a very high level; not one concrete bug." }, { "left": "issue:42831", "right": "issue:44222", "accept": false, "reason": "Accuracy degradation in FineGrainedFP8 is not the same as a save_pretrained failure for FP8 MoE models." }, { "left": "issue:42994", "right": "issue:44164", "accept": false, "reason": "Both involve serialization, but quantized model saving failure and extra_state handling are distinct defects." }, { "left": "issue:43525", "right": "issue:45375", "accept": false, "reason": "Missing pad_token_id in Llama4Config and missing deepstack_visual_indexes in Qwen3_5MoeVisionConfig are separate model-specific schema bugs." }, { "left": "issue:36010", "right": "issue:41628", "accept": false, "reason": "Both are import errors, but for different APIs and module paths; no evidence of the same broken export." }, { "left": "issue:45310", "right": "issue:45397", "accept": false, "reason": "Both are from_pretrained bugs, but for different models and likely different root causes, so they should not be merged." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 99, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30990", "issue:35141", "issue:42175", "issue:42898", "issue:43232", "issue:43377", "issue:43479", "issue:43673", "issue:44038", "issue:44079", "issue:44188", "issue:44242", "issue:44568", "issue:44792", "issue:44869", "issue:44933", "issue:45310" ], "soft_pairs": [ "issue:43377|issue:44242", "issue:43479|issue:44079", "issue:43232|issue:44792", "issue:30990|issue:35141", "issue:43673|issue:44188", "issue:44038|issue:45310", "issue:43479|issue:44869", "issue:42898|issue:44568", "issue:42175|issue:44933" ], "prepared_review_unit_hash": "afc3729d89683861c70ac9f1291cdd51aca770c345f703d3861acffca1540ff9", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12499, "estimated_input_tokens": 3125, "estimated_eval_tokens": 6506 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is very noisy: these issues span unrelated bugs across generation, tokenization, multimodal configs, model outputs, and backend packaging. None of the soft pairs look like the same underlying change or defect, so I would not merge any of them.", "confidence": 0.88, "canonical_issue_reason": "Issue 44038 is the least specific and most cluster-like of the set, so it is the best available anchor, but it is still not a true duplicate center because the surrounding issues are mostly unrelated.", "canonical_pr_reason": null, "best_issue_reason": "44038 is the most plausible representative only because it is a broad v5/model-regression report; however, the cluster is too heterogeneous for a strong canonical issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43377", "right": "issue:44242", "accept": false, "reason": "Different bugs: MIMI encoder padding-mask inconsistency vs MoE load-balancing loss gating on router logits." }, { "left": "issue:43479", "right": "issue:44079", "accept": false, "reason": "Config initialization from None is unrelated to ModelOutput key assignment when a prior value was None." }, { "left": "issue:43232", "right": "issue:44792", "accept": false, "reason": "Generation kwargs after sync_gpus is a generation-state bug; the Janus test failure is model-specific and not the same code path." }, { "left": "issue:30990", "right": "issue:35141", "accept": false, "reason": "Stuck loading for Sentence Transformers is unrelated to output embedding reinitialization after resizing token embeddings." }, { "left": "issue:43673", "right": "issue:44188", "accept": false, "reason": "Missing cache in chunked_prefill and attention-kernel branching under torch.compile are different generation/runtime problems." }, { "left": "issue:44038", "right": "issue:45310", "accept": false, "reason": "Both mention Qwen/MoE, but one is a vague Qwen3-VL-Moe issue and the other is a from_pretrained regression in Qwen3.5-Moe; not the same defect." }, { "left": "issue:43479", "right": "issue:44869", "accept": false, "reason": "Multimodal config defaults being reinitialized has nothing to do with Whisper timestamp decoding crashing on a trailing replacement character." }, { "left": "issue:42898", "right": "issue:44568", "accept": false, "reason": "Both are v5 tokenizer regressions, but clean_up_tokenization_spaces and add_special_tokens/BOS-EOS handling are separate behaviors and code paths." }, { "left": "issue:42175", "right": "issue:44933", "accept": false, "reason": "Torch extra dependency/backend selection is unrelated to a missing image_utils import." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 100, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42898", "issue:43278", "issue:43723", "issue:43784", "issue:43824", "issue:43976", "issue:44060", "issue:44188", "issue:44373", "issue:44568", "issue:44734", "issue:44933", "issue:44964", "issue:45103", "issue:45125", "issue:45137", "issue:45356" ], "soft_pairs": [ "issue:43784|issue:43824", "issue:43976|issue:44933", "issue:44188|issue:44373", "issue:44964|issue:45137", "issue:44060|issue:45125", "issue:44734|issue:45103", "issue:43723|issue:44568", "issue:42898|issue:45356", "issue:43278|issue:45137" ], "prepared_review_unit_hash": "fbee94b37e8834f0f6f435a9c3acfc9609a90c1ec1ecdba07be64cb0a65fb56b", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12630, "estimated_input_tokens": 3158, "estimated_eval_tokens": 6572 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The items are a loose similarity cluster, but they cover many unrelated problems: tokenizer regressions, model-loading/import errors, distributed-training bugs, a docstring issue, and a serve/runtime crash. None of the soft-edge pairs look like true duplicates of the same underlying bug or change.", "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43784", "right": "issue:43824", "accept": false, "reason": "Different failures in different import paths: sentence-transformers hits a NameError for `nn`, while Qwen2.5-VL is a missing top-level model export/import error." }, { "left": "issue:43976", "right": "issue:44933", "accept": false, "reason": "Python version compatibility regression is unrelated to a nonexistent `image_utils` import; different subsystems and symptoms." }, { "left": "issue:44188", "right": "issue:44373", "accept": false, "reason": "`torch.compile` attention-kernel divergence is a runtime kernel branching bug, while this is only a wrong docstring for `position_ids`." }, { "left": "issue:44964", "right": "issue:45137", "accept": false, "reason": "Model loading failure for Phi-4-multimodal is unrelated to a DeepSpeed ZeRO3 deque pop error; different code paths and layers." }, { "left": "issue:44060", "right": "issue:45125", "accept": false, "reason": "Both are Qwen-related, but one is a tied-weights warning bug and the other is a missing `_tp_plan` for tensor parallelism; not the same defect." }, { "left": "issue:44734", "right": "issue:45103", "accept": false, "reason": "Serve KV-cache continuation indexing crash and auto_docstring crashing under `from __future__ import annotations` are unrelated bugs in different components." }, { "left": "issue:43723", "right": "issue:44568", "accept": false, "reason": "Both involve tokenizers, but one is AutoTokenizer loading in v5 and the other is `add_special_tokens=True` failing to add BOS/EOS for a specific tokenizer; distinct behaviors." }, { "left": "issue:42898", "right": "issue:45356", "accept": false, "reason": "General `clean_up_tokenization_spaces` behavior change and Kimi-K2.5 codec-handling regression are separate tokenizer regressions with different triggers." }, { "left": "issue:43278", "right": "issue:45137", "accept": false, "reason": "Embedding dtype mismatch between train/eval has no overlap with a ZeRO3 deque underflow; entirely different training/runtime issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 101, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41628", "issue:42898", "issue:43295", "issue:43479", "issue:43644", "issue:43874", "issue:44077", "issue:44206", "issue:44242", "issue:44448", "issue:44623", "issue:44779", "issue:45092", "issue:45103", "issue:45137", "issue:45276", "issue:45310", "issue:45459" ], "soft_pairs": [ "issue:44623|issue:45310", "issue:43874|issue:45092", "issue:42898|issue:44779", "issue:44077|issue:44242", "issue:42898|issue:44448", "issue:45137|issue:45276", "issue:41628|issue:45459", "issue:42898|issue:43295", "issue:43479|issue:43644", "issue:44206|issue:45103" ], "prepared_review_unit_hash": "cf63226c95d1d0d8cbb3829cc497cdddd2f6a35e0a8e54eafff000e9f778897a", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13498, "estimated_input_tokens": 3375, "estimated_eval_tokens": 7006 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are mostly unrelated bug reports sharing broad Transformer/v5/tokenizer themes. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR case, so I would reject all soft edges.", "confidence": 0.92, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:43874 is the most concrete and self-contained bug report: it names a specific AttributeError, a specific class, and a specific failing code path, so it is the best representative issue among this set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44623", "right": "issue:45310", "accept": false, "reason": "Both involve from_pretrained/save_pretrained style regressions, but they describe different failures on different models and code paths." }, { "left": "issue:43874", "right": "issue:45092", "accept": false, "reason": "Both are multimodal/vision-related compatibility bugs, but one is a missing method on an image processor and the other is remote-code/meta-init incompatibility; not the same bug." }, { "left": "issue:42898", "right": "issue:44779", "accept": false, "reason": "Both mention tokenizer behavior changes in v5, but one is about clean_up_tokenization_spaces while the other is a DeepSeek-specific incorrect-tokenization regression." }, { "left": "issue:44077", "right": "issue:44242", "accept": false, "reason": "Completely different subsystems and failures: PatchTSMixer post_init validation versus MoE router load-balancing loss." }, { "left": "issue:42898", "right": "issue:44448", "accept": false, "reason": "Both are v4/v5 output-behavior regressions, but the concrete bugs differ: cleanup-space handling versus Pegasus generation output drift." }, { "left": "issue:45137", "right": "issue:45276", "accept": false, "reason": "Unrelated failures: DeepSpeed ZeRO3 deque crash versus Gemma4 embedding resize not propagating to tied/output embeddings." }, { "left": "issue:41628", "right": "issue:45459", "accept": false, "reason": "Different import-time/error-handling problems; one is missing AutoImageProcessor import, the other is protobuf-related tokenizer exception masking." }, { "left": "issue:42898", "right": "issue:43295", "accept": false, "reason": "Both are v5 tokenizer/processor regressions, but the actual breakage is different: cleanup-space defaults versus processor.tokenizer/image-passing behavior." }, { "left": "issue:43479", "right": "issue:43644", "accept": false, "reason": "Distinct bugs in different areas: Phi4MultimodalConfig default initialization versus non-persistent buffer initialization in v5.0.0." }, { "left": "issue:44206", "right": "issue:45103", "accept": false, "reason": "No shared underlying code path: one is an unsupported feature-arg in a feature extractor, the other is an auto-docstring crash under future annotations." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 102, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:22355", "issue:28282", "issue:42831", "issue:43723", "issue:44222", "issue:44263", "issue:44297", "issue:44360", "issue:44442", "issue:44483", "issue:44484", "issue:44514", "issue:44623", "issue:44977", "issue:45092", "issue:45103", "issue:45406", "issue:45459" ], "soft_pairs": [ "issue:44442|issue:45459", "issue:44222|issue:44297", "issue:44222|issue:44484", "issue:44222|issue:44623", "issue:42831|issue:44623", "issue:44263|issue:44360", "issue:43723|issue:44977", "issue:44483|issue:45406", "issue:22355|issue:28282", "issue:44263|issue:45092", "issue:44514|issue:45103" ], "prepared_review_unit_hash": "699c10f871016df01a95b710622c13ed6fb2119e28a938753d249b8729a3ca0d", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13615, "estimated_input_tokens": 3404, "estimated_eval_tokens": 7064 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: the soft-linked pairs mostly share broad themes like tokenizer loading, save_pretrained, or indexer behavior, but they do not look like the same underlying bug or fix. I would not merge any of the proposed soft pairs.", "confidence": 0.93, "canonical_issue_reason": "Issue 44222 is the closest cluster anchor because it sits in the densest soft-similarity neighborhood and represents the save_pretrained/serialization theme, even though the set is not a true duplicate group.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44222 is the best representative issue overall: it is concrete, centrally connected to several nearby serialization-related candidates, and is more specific than the broader, unrelated issues in the list.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44442", "right": "issue:45459", "accept": false, "reason": "Both are tokenizer-related, but one is a specific AutoTokenizer failure while the other is about protobuf-related error handling during tokenizer decode. Different code paths and fixes." }, { "left": "issue:44222", "right": "issue:44297", "accept": false, "reason": "Both mention save_pretrained, but one is FP8 model/moe saving and the other is tokenizer_config metadata mismatch. Not the same bug." }, { "left": "issue:44222", "right": "issue:44484", "accept": false, "reason": "These are different save_pretrained concerns: FP8/moe save behavior versus a question about the default max_shard_size. Not duplicate bugs." }, { "left": "issue:44222", "right": "issue:44623", "accept": false, "reason": "Processor.save_pretrained missing files and FP8 save_pretrained behavior are separate serialization problems affecting different components." }, { "left": "issue:42831", "right": "issue:44623", "accept": false, "reason": "An accuracy issue in FineGrainedFP8 is unrelated to missing files from processor.save_pretrained." }, { "left": "issue:44263", "right": "issue:44360", "accept": false, "reason": "Same broad DSA/indexer area, but one is about torch.split return values and the other about a missing ReLU. Distinct bugs and fixes." }, { "left": "issue:43723", "right": "issue:44977", "accept": false, "reason": "Tokenizer loading in v5 and Qwen3.5 flash-attention generation are unrelated failures." }, { "left": "issue:44483", "right": "issue:45406", "accept": false, "reason": "One is a /v1/chat/completions request handling problem; the other is a Gemma4Processor missing _tokenizer crash. Different underlying issues." }, { "left": "issue:22355", "right": "issue:28282", "accept": false, "reason": "Both are import errors, but one is missing transformers.onnx and the other is missing PyTorch. Different dependency/entrypoint problems." }, { "left": "issue:44263", "right": "issue:45092", "accept": false, "reason": "GlmMoeDsaIndexer torch.split return semantics and InternVL2 v5 meta-initialization incompatibility are not the same bug." }, { "left": "issue:44514", "right": "issue:45103", "accept": false, "reason": "apply_chat_template batching with padding=False and auto_docstring future-annotations AttributeError are unrelated failures." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 103, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30990", "issue:38175", "issue:43479", "issue:43606", "issue:43723", "issue:44261", "issue:44297", "issue:44373", "issue:44442", "issue:44485", "issue:44623", "issue:44871", "issue:44913", "issue:44991", "issue:45092", "issue:45310", "issue:45357" ], "soft_pairs": [ "issue:44297|issue:45310", "issue:43479|issue:44913", "issue:43606|issue:44623", "issue:43723|issue:44871", "issue:44261|issue:45092", "issue:43723|issue:44991", "issue:44373|issue:44485", "issue:43723|issue:44442", "issue:45092|issue:45357", "issue:30990|issue:38175" ], "prepared_review_unit_hash": "d37f62ba3e7ba8f3d2ea7ca9486c180c4ab18dd05731dc497fb3317ba017bb9f", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12798, "estimated_input_tokens": 3200, "estimated_eval_tokens": 6656 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a loose collection of unrelated Transformers bugs. The closest theme is tokenizer/config serialization and loading regressions in v5, but the soft pairs are not concrete duplicates.", "confidence": 0.39, "canonical_issue_reason": "issue #43723 is the broadest and most central report in the tokenizer-loading subset: it describes a generic AutoTokenizer.from_pretrained failure in v5, which makes it the best umbrella representative for the cluster's closest shared theme.", "canonical_pr_reason": null, "best_issue_reason": "#43723 is the strongest single issue to anchor triage because it is general, model-agnostic, and aligned with the repeated loading/serialization regressions seen in nearby reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44297", "right": "issue:45310", "accept": false, "reason": "Both involve model/config loading behavior, but one is a tokenizer save metadata mismatch and the other is a Qwen3.5 from_pretrained error; they do not look like the same bug or patch." }, { "left": "issue:43479", "right": "issue:44913", "accept": false, "reason": "Both are config-default/reload problems, but they affect different models, different fields, and different code paths." }, { "left": "issue:43606", "right": "issue:44623", "accept": false, "reason": "CPU offload device mismatch for bark-small is unrelated to processor.save_pretrained missing files." }, { "left": "issue:43723", "right": "issue:44871", "accept": false, "reason": "Both are tokenizer/model loading complaints, but one is a generic AutoTokenizer v5 failure and the other is a Gemma-3 eos_token_id inconsistency; no clear shared root cause." }, { "left": "issue:44261", "right": "issue:45092", "accept": false, "reason": "These concern different subsystems: MLA q_a_layernorm precision/config handling versus InternVL2 meta-initialization compatibility." }, { "left": "issue:43723", "right": "issue:44991", "accept": false, "reason": "Both mention tokenizer loading in v5, but the reports are model-specific and do not establish the same underlying failure mode." }, { "left": "issue:44373", "right": "issue:44485", "accept": false, "reason": "A docstring typo is not the same as a GLM-5 RoPE implementation discussion." }, { "left": "issue:43723", "right": "issue:44442", "accept": false, "reason": "FastSpeech2ConformerTokenizer loading failure may be adjacent to v5 tokenizer regressions, but there is not enough evidence that it is the same concrete bug as the generic AutoTokenizer failure." }, { "left": "issue:45092", "right": "issue:45357", "accept": false, "reason": "Both concern Qwen3.5/vision-multimodal behavior, but one is a from_pretrained/meta-init incompatibility and the other is an incorrect save_pretrained key regression; they are not the same issue." }, { "left": "issue:30990", "right": "issue:38175", "accept": false, "reason": "Sentence Transformers loading hangs and zero probabilities in siglip2 are unrelated symptoms affecting different models and code paths." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 104, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41628", "issue:42831", "issue:42886", "issue:42994", "issue:43066", "issue:43122", "issue:43576", "issue:43784", "issue:43824", "issue:44038", "issue:44117", "issue:44263", "issue:44295", "issue:44373", "issue:44521", "issue:44623", "issue:44704", "issue:45310" ], "soft_pairs": [ "issue:42886|issue:42994", "issue:44295|issue:44623", "issue:41628|issue:43824", "issue:42831|issue:45310", "issue:43576|issue:43784", "issue:44263|issue:44373", "issue:43122|issue:44521", "issue:44117|issue:44704", "issue:42886|issue:43066", "issue:42831|issue:44038" ], "prepared_review_unit_hash": "b5b054e90ee2e7fa5e2b974d60b9d739d868c6a1f95a091c5006799ace905469", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13140, "estimated_input_tokens": 3285, "estimated_eval_tokens": 6826 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated across different bugs in transformers (imports, tokenizers, quantization, processor save/load, and multimodal behavior). None of the soft pairs look like the same underlying problem, so no duplicate merges should be accepted.", "confidence": 0.91, "canonical_issue_reason": "No single canonical issue fits the full set: the titles and problem statements span multiple unrelated code paths rather than one duplicated bug.", "canonical_pr_reason": "No pull requests are present in the cluster.", "best_issue_reason": "If one issue must be used as the anchor, issue:44704 is the clearest actionable report with a specific failure mode and likely fix path; the rest are too heterogeneous to serve as a shared canonical.", "best_pr_reason": "No pull requests are present in the cluster.", "soft_edge_verdicts": [ { "left": "issue:42886", "right": "issue:42994", "accept": false, "reason": "Both are bug reports, but one is tokenizer cache/offline loading and the other is quantized model saving; different code paths and symptoms." }, { "left": "issue:44295", "right": "issue:44623", "accept": false, "reason": "Both mention saving/serialization, but one is about reading position_ids after registering a buffer and the other is missing files on processor.save_pretrained; not the same bug." }, { "left": "issue:41628", "right": "issue:43824", "accept": false, "reason": "Both are import errors, but for different symbols and likely different model/processor registrations; not the same missing-export issue." }, { "left": "issue:42831", "right": "issue:45310", "accept": false, "reason": "Both relate to model behavior, but FineGrainedFP8 accuracy and Qwen3.5-Moe from_pretrained failure are distinct issues with different failure modes." }, { "left": "issue:43576", "right": "issue:43784", "accept": false, "reason": "One is the v5 env command breaking, the other is a NameError when importing sentence-transformers; separate commands/code paths." }, { "left": "issue:44263", "right": "issue:44373", "accept": false, "reason": "The first is a runtime bug in torch.split usage, while the second is a docstring problem for position_ids; documentation vs code bug." }, { "left": "issue:43122", "right": "issue:44521", "accept": false, "reason": "One reports tokenization changes across versions, the other all-zero assistant masks for multimodal chat templates; different behavior and subsystems." }, { "left": "issue:44117", "right": "issue:44704", "accept": false, "reason": "Both involve from_pretrained/metadata handling, but one is a mapping-name None edge case and the other is kwargs not forwarded to cached_file; related area, not same defect." }, { "left": "issue:42886", "right": "issue:43066", "accept": false, "reason": "Offline tokenizer cache loading and wrong tokenizer decoder type are separate tokenizer regressions with different triggers and fixes." }, { "left": "issue:42831", "right": "issue:44038", "accept": false, "reason": "FineGrainedFP8 accuracy regression and Qwen3-VL-Moe bug are different model/precision issues; too broad to be duplicates." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 105, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30990", "issue:33357", "issue:39401", "issue:42371", "issue:42907", "issue:43122", "issue:43232", "issue:43937", "issue:43976", "issue:44263", "issue:44488", "issue:44514", "issue:44704", "issue:44743", "issue:44945", "issue:45245", "issue:45290" ], "soft_pairs": [ "issue:30990|issue:33357", "issue:42907|issue:44945", "issue:42371|issue:44263", "issue:44704|issue:45290", "issue:43122|issue:44514", "issue:43232|issue:45245", "issue:43976|issue:44945", "issue:43232|issue:44743", "issue:39401|issue:44488", "issue:42907|issue:43937" ], "prepared_review_unit_hash": "cc723882dd3522f817e95c1adc7be493254c6f46f2c2611f2ef4cf25c7737003", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12592, "estimated_input_tokens": 3148, "estimated_eval_tokens": 6552 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a set of unrelated Transformers bug reports that only share broad subsystem vocabulary (tokenizers, generation, processors, loading, GLM/Qwen models). None of the soft pairs look like true duplicates or mergeable PR-equivalents from the titles alone.", "confidence": 0.26, "canonical_issue_reason": "Issue 44704 is the most concrete, narrowly scoped bug report in the set and best serves as a representative issue, but the cluster is too heterogeneous to justify a strong canonical duplicate choice.", "canonical_pr_reason": null, "best_issue_reason": "Issue 44704 has a specific API path (`AutoProcessor.from_pretrained` \u2192 `cached_file`) and a clear failure mode, making it the cleanest representative among otherwise unrelated reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:30990", "right": "issue:33357", "accept": false, "reason": "Both involve loading failures, but one is Sentence Transformers loading and the other is a MacOS bus error with a CLIP model; different symptom and likely different root cause." }, { "left": "issue:42907", "right": "issue:44945", "accept": false, "reason": "Dequantized model saving and pipeline-parallel generation output errors are unrelated code paths and not the same underlying bug." }, { "left": "issue:42371", "right": "issue:44263", "accept": false, "reason": "TF32 configuration guidance and a `torch.split()` return issue in `GlmMoeDsaIndexer` are distinct concerns with no clear shared defect." }, { "left": "issue:44704", "right": "issue:45290", "accept": false, "reason": "Both touch processor/chat-template behavior, but one is missing kwargs in `from_pretrained` while the other crashes on assistant tool-call messages; different failure modes." }, { "left": "issue:43122", "right": "issue:44514", "accept": false, "reason": "Tokenizer offset changes across versions and a batched `apply_chat_template` crash are different bugs in different layers." }, { "left": "issue:43232", "right": "issue:45245", "accept": false, "reason": "Generation kwargs handling after `sync_gpus` is unrelated to a categorical cardinality runtime error." }, { "left": "issue:43976", "right": "issue:44945", "accept": false, "reason": "Python version compatibility and incorrect pipeline-parallel LLM output are not the same underlying issue." }, { "left": "issue:43232", "right": "issue:44743", "accept": false, "reason": "Both involve generation/cache mechanics, but one is about post-`sync_gpus` kwargs propagation and the other about recurrent state resets in a specific model file; too different to merge." }, { "left": "issue:39401", "right": "issue:44488", "accept": false, "reason": "Wrong `offset_mapping` for Qwen3 tokenizer and failure to load `cjvt/sleng-bert` are separate problems." }, { "left": "issue:42907", "right": "issue:43937", "accept": false, "reason": "Saving dequantized models and invalid `GenerationConfig` for GLM-5 are unrelated bugs, even if both mention GLM-family models." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 106, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:29127", "issue:30990", "issue:41628", "issue:43116", "issue:43122", "issue:43408", "issue:43475", "issue:43992", "issue:44295", "issue:44297", "issue:44360", "issue:44485", "issue:44625", "issue:44704", "issue:44779", "issue:44829", "issue:45200", "issue:45278" ], "soft_pairs": [ "issue:44625|issue:44829", "issue:43116|issue:45200", "issue:41628|issue:45278", "issue:43992|issue:44704", "issue:43408|issue:43475", "issue:44360|issue:44485", "issue:43122|issue:44779", "issue:29127|issue:30990", "issue:44295|issue:44297" ], "prepared_review_unit_hash": "e9744b5d4ffe84d62c57cfaa7073c1ac76c11a44eb719de96c78f493c2a24748", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13103, "estimated_input_tokens": 3276, "estimated_eval_tokens": 6808 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated single-bug reports across different models and utilities; no duplicate cluster emerges from the titles/descriptions alone.", "confidence": 0.95, "canonical_issue_reason": "No clear canonical issue: the set is heterogeneous, spanning distinct model-specific bugs, tokenizer regressions, and API/import problems.", "canonical_pr_reason": null, "best_issue_reason": "No single issue is a good global representative because the reports do not share one underlying failure mode.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44625", "right": "issue:44829", "accept": false, "reason": "Different problems: one is label propagation into text config, the other is training collapse with flash_attention_3." }, { "left": "issue:43116", "right": "issue:45200", "accept": false, "reason": "Different code paths and symptoms: example script multi-label output vs Gemma 4 multimodal token type defaults." }, { "left": "issue:41628", "right": "issue:45278", "accept": false, "reason": "Both are import-related, but one is a specific AutoImageProcessor import failure and the other is a broader upgrade-related import breakage." }, { "left": "issue:43992", "right": "issue:44704", "accept": false, "reason": "Unrelated: missing embed_tokens.weight during UMT5 loading vs AutoProcessor not forwarding kwargs to cached_file." }, { "left": "issue:43408", "right": "issue:43475", "accept": false, "reason": "Same SAM3 area, but distinct bugs: wrong model class warning vs missing fpn_position_embeddings attribute." }, { "left": "issue:44360", "right": "issue:44485", "accept": false, "reason": "Different topics entirely: DSA indexer activation issue vs GLM-5 RoPE implementation discussion." }, { "left": "issue:43122", "right": "issue:44779", "accept": false, "reason": "Both mention tokenizer regressions, but one is a general version-to-version tokenization change and the other is Deepseek-specific incorrect output." }, { "left": "issue:29127", "right": "issue:30990", "accept": false, "reason": "No shared bug: LayoutLMv3 box validation messaging vs Sentence Transformers loading hang." }, { "left": "issue:44295", "right": "issue:44297", "accept": false, "reason": "Different failure modes: position_ids buffer access error vs tokenizer_class mismatch in saved config." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 107, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36296", "issue:39401", "issue:41628", "issue:42371", "issue:42831", "issue:42907", "issue:43116", "issue:43452", "issue:43550", "issue:43927", "issue:44038", "issue:44222", "issue:44261", "issue:44295", "issue:44297", "issue:44373", "issue:44521", "issue:44987" ], "soft_pairs": [ "issue:39401|issue:44521", "issue:44261|issue:44373", "issue:41628|issue:44987", "issue:43550|issue:44295", "issue:42371|issue:42831", "issue:42907|issue:43452", "issue:36296|issue:43116", "issue:43927|issue:44295", "issue:44038|issue:44297", "issue:44038|issue:44222" ], "prepared_review_unit_hash": "55a630882951f158de313028d4a662c6cc10975a6288ebb486b776f83de88254", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13177, "estimated_input_tokens": 3295, "estimated_eval_tokens": 6846 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the items are mostly unrelated issue reports across tokenization, multimodal chat templating, save/load, precision/config, and training/runtime bugs. None of the soft pairs look like true duplicates, so no edge is accepted.", "confidence": 0.91, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:39401", "right": "issue:44521", "accept": false, "reason": "Both touch tokenizer/chat-template surfaces, but one is Qwen3 offset_mapping and the other is multimodal assistant_masks; different failure modes and likely different fixes." }, { "left": "issue:44261", "right": "issue:44373", "accept": false, "reason": "Missing config.rms_norm_eps causing precision drift is unrelated to a wrong position_ids docstring; one is a runtime bug, the other is documentation." }, { "left": "issue:41628", "right": "issue:44987", "accept": false, "reason": "AutoImageProcessor import failure and a transformers>=5.1.0 model-loading failure are different APIs and different code paths." }, { "left": "issue:43550", "right": "issue:44295", "accept": false, "reason": "torch.compile/SDPA failure for Bamba-9B-v2 is unrelated to reading position_ids after buffer registration; different subsystems." }, { "left": "issue:42371", "right": "issue:42831", "accept": false, "reason": "TF32 API settings guidance and FineGrainedFP8 accuracy problems are about different features and distinct bugs." }, { "left": "issue:42907", "right": "issue:43452", "accept": false, "reason": "Saving dequantized Ministrals/Devstrals and gguf_file breaking from_pretrained are both save/load related, but they involve different models and different failure points." }, { "left": "issue:36296", "right": "issue:43116", "accept": false, "reason": "Tensor parallel training bug and empty multi-label classification example output are not the same underlying issue." }, { "left": "issue:43927", "right": "issue:44295", "accept": false, "reason": "Custom token IDs being lost on save/load is unrelated to position_ids buffer access errors." }, { "left": "issue:44038", "right": "issue:44297", "accept": false, "reason": "Qwen3-VL-Moe / transformers 5.0 compatibility and tokenizer_class mismatch on save_pretrained are distinct bugs; shared serialization context is too broad." }, { "left": "issue:44038", "right": "issue:44222", "accept": false, "reason": "Both mention moe-related models, but one is a transformers 5.0 compatibility issue and the other is an FP8 save_pretrained bug; not the same code-path problem." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 108, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38617", "issue:42222", "issue:42907", "issue:43064", "issue:43122", "issue:43278", "issue:43441", "issue:43673", "issue:43784", "issue:43824", "issue:44222", "issue:44351", "issue:44912", "issue:44933", "issue:44945", "issue:44977", "issue:45278", "issue:45446" ], "soft_pairs": [ "issue:43824|issue:44351", "issue:43278|issue:44945", "issue:43122|issue:44977", "issue:42907|issue:44222", "issue:42222|issue:43441", "issue:42907|issue:44912", "issue:43784|issue:45446", "issue:43784|issue:44933", "issue:43064|issue:43673", "issue:38617|issue:45278" ], "prepared_review_unit_hash": "bd120654d29150ec064cbb7b2a75d7a672973091c64a44c22bff4e81c40ef007", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13251, "estimated_input_tokens": 3313, "estimated_eval_tokens": 6882 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated regressions spanning imports, model loading, quantization, training, and generation behavior. Several share broad themes like \"import errors\" or \"save/load problems,\" but none look like the same concrete bug or fix target.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43824", "right": "issue:44351", "accept": false, "reason": "Both are import failures, but they involve different missing symbols in different code paths: Qwen2.5-VL class export vs HybridCache export. Not the same underlying bug." }, { "left": "issue:43278", "right": "issue:44945", "accept": false, "reason": "One is a dtype mismatch on embeddings between train/eval, the other is incorrect LLM output under pipeline parallelism. Related to model execution, but not the same defect." }, { "left": "issue:43122", "right": "issue:44977", "accept": false, "reason": "Tokenization differences across versions and flash-attention generation failure are distinct problems with different subsystems and failure modes." }, { "left": "issue:42907", "right": "issue:44222", "accept": false, "reason": "Both mention quantization/save_pretrained, but one is dequantized Ministral/Devstral saving and the other is FP8 MOE save_pretrained. Different model/setup and likely different code paths." }, { "left": "issue:42222", "right": "issue:43441", "accept": false, "reason": "VitPose import breakage and Ministral flash-attention failure are unrelated model-specific bugs." }, { "left": "issue:42907", "right": "issue:44912", "accept": false, "reason": "Saving dequantized weights and MXFP4 loading falling back to bf16 are both quantization-related, but they are different behaviors and not the same bug." }, { "left": "issue:43784", "right": "issue:45446", "accept": false, "reason": "Sentence-transformers import breaking with latest transformers and an AuxRequest version-check bug in flex_attention are different import-time issues in different modules." }, { "left": "issue:43784", "right": "issue:44933", "accept": false, "reason": "Both are import-related, but one is a missing nn symbol during sentence-transformers import and the other is a non-existent image_utils import. Not the same issue." }, { "left": "issue:43064", "right": "issue:43673", "accept": false, "reason": "FSDP2/PEFT optimizer-state corruption and missing GenerationMixin cache during chunked prefill affect different subsystems and code paths." }, { "left": "issue:38617", "right": "issue:45278", "accept": false, "reason": "Both are version-related import errors, but one is a specific missing layer_type_validation symbol and the other is a broad upgrade report with many import failures. Too generic to treat as the same bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 109, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:22355", "issue:29127", "issue:33453", "issue:39401", "issue:42371", "issue:42673", "issue:43116", "issue:43493", "issue:43825", "issue:43874", "issue:43937", "issue:43976", "issue:44075", "issue:44485", "issue:44779", "issue:44936", "issue:45072", "issue:45446" ], "soft_pairs": [ "issue:42673|issue:45072", "issue:43937|issue:44485", "issue:44075|issue:44936", "issue:43116|issue:43493", "issue:22355|issue:29127", "issue:42371|issue:43874", "issue:43116|issue:43825", "issue:39401|issue:44779", "issue:43976|issue:45446", "issue:33453|issue:39401" ], "prepared_review_unit_hash": "8118bbc86fa405310dea02fe2dc9373a9c8c09d72ada2c402203bcb360cb6e6c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13230, "estimated_input_tokens": 3308, "estimated_eval_tokens": 6872 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No soft-edge pair looks like the same underlying bug or change; the cluster is a mix of unrelated issues across tokenizers, model implementations, training, version checks, and error messaging.", "confidence": 0.94, "canonical_issue_reason": "No single issue is canonical here because the items are not duplicates of one another; they span distinct bug reports and subsystems.", "canonical_pr_reason": null, "best_issue_reason": "No global best issue stands out as a representative duplicate target; the set is too heterogeneous to safely collapse around one issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42673", "right": "issue:45072", "accept": false, "reason": "Different failures: VRAM leakage in multi-dataloader use vs bfloat16 dtype mismatch in inference." }, { "left": "issue:43937", "right": "issue:44485", "accept": false, "reason": "Both mention GLM-5, but one is GenerationConfig validation and the other is RoPE implementation; different concrete bugs." }, { "left": "issue:44075", "right": "issue:44936", "accept": false, "reason": "Optimizer SGD arguments being ignored is unrelated to trainer.evaluate() failing after training." }, { "left": "issue:43116", "right": "issue:43493", "accept": false, "reason": "Example-script multi-label empty outputs and SigLIP2 HF/JAX discrepancy are different models and different code paths." }, { "left": "issue:22355", "right": "issue:29127", "accept": false, "reason": "Import failure for transformers.onnx is unrelated to a LayoutLMv3 error-message clarity issue." }, { "left": "issue:42371", "right": "issue:43874", "accept": false, "reason": "TF32 API behavior and a missing image-processor method are unrelated problems." }, { "left": "issue:43116", "right": "issue:43825", "accept": false, "reason": "Multi-label example output bug does not match a pipeline translation-task error message regression." }, { "left": "issue:39401", "right": "issue:44779", "accept": false, "reason": "Both are tokenizer bugs, but they affect different models and symptoms: offset_mapping vs incorrect tokenization results." }, { "left": "issue:43976", "right": "issue:45446", "accept": false, "reason": "These are separate version-check bugs in different areas: Python support vs PyTorch import gating." }, { "left": "issue:33453", "right": "issue:39401", "accept": false, "reason": "Tokenizer loading regression and Qwen3 offset_mapping corruption are not the same underlying defect." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 110, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:39401", "issue:42673", "issue:43421", "issue:43450", "issue:43827", "issue:43906", "issue:43931", "issue:43937", "issue:43986", "issue:44291", "issue:44484", "issue:44945", "issue:44977", "issue:45071", "issue:45083", "issue:45310", "issue:45405", "issue:45446" ], "soft_pairs": [ "issue:43931|issue:44945", "issue:43937|issue:44977", "issue:45083|issue:45446", "issue:44484|issue:45310", "issue:42673|issue:43906", "issue:43827|issue:45446", "issue:39401|issue:43421", "issue:43450|issue:43986", "issue:44291|issue:45071", "issue:45405|issue:45446" ], "prepared_review_unit_hash": "e756f03be271f26d429bf10327189f4a72d7feedb6c365192d0c4ff8e78c5269", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13336, "estimated_input_tokens": 3334, "estimated_eval_tokens": 6924 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous and does not look like a true duplicate set. The soft pairs mostly share broad subsystem keywords (tokenizers, Qwen3, video, version checks) but describe different bugs, models, or product areas.", "confidence": 0.95, "canonical_issue_reason": "No clear canonical issue: the items span unrelated bugs, regressions, docs, and version-check problems, so collapsing them into one issue would be incorrect.", "canonical_pr_reason": "No PRs are present in this cluster.", "best_issue_reason": "If one issue had to be kept as the most concrete standalone report, issue 43931 is the strongest bug-style writeup, but it is not representative of the whole cluster as a duplicate target.", "best_pr_reason": "No PRs are present in this cluster.", "soft_edge_verdicts": [ { "left": "issue:43931", "right": "issue:44945", "accept": false, "reason": "Both mention Qwen3/model loading, but one is a weight-shape mismatch in Qwen3-VL and the other is incorrect output under pipeline parallelism; different code paths and symptoms." }, { "left": "issue:43937", "right": "issue:44977", "accept": false, "reason": "Different models and failures: GLM-5 invalid GenerationConfig versus Qwen3.5 flash-attention generation issues." }, { "left": "issue:45083", "right": "issue:45446", "accept": false, "reason": "Unrelated areas: qwen3_omni_moe feature-extraction helper behavior versus flex_attention PyTorch version checking." }, { "left": "issue:44484", "right": "issue:45310", "accept": false, "reason": "One is a question about the 50GB save_pretrained shard limit; the other is a Qwen3.5 MoE from_pretrained regression." }, { "left": "issue:42673", "right": "issue:43906", "accept": false, "reason": "Both are model-loading/runtime complaints, but one is a VRAM leak in multithreaded dataloaders and the other is a separate reproduction of issue 38071." }, { "left": "issue:43827", "right": "issue:45446", "accept": false, "reason": "Docs still referencing pipeline() and an AuxRequest/flex_attention version check bug are unrelated." }, { "left": "issue:39401", "right": "issue:43421", "accept": false, "reason": "Both are tokenizer-related, but one is wrong offset_mapping for Qwen3 and the other is runtime post-processor updates when special tokens change." }, { "left": "issue:43450", "right": "issue:43986", "accept": false, "reason": "Different video-processing failures: batched shape handling versus AutoProcessor crashing without torchvision." }, { "left": "issue:44291", "right": "issue:45071", "accept": false, "reason": "Both involve compatibility/type-checking regressions, but they are distinct failures: init_empty_weights argument handling versus PretrainedConfig type checking." }, { "left": "issue:45405", "right": "issue:45446", "accept": false, "reason": "Package version pinning for PEFT is unrelated to the PyTorch version check in flex_attention." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 111, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38617", "issue:41628", "issue:42371", "issue:42673", "issue:43408", "issue:43577", "issue:43582", "issue:43742", "issue:43784", "issue:44117", "issue:44220", "issue:44261", "issue:44410", "issue:44829", "issue:45081", "issue:45216", "issue:45310" ], "soft_pairs": [ "issue:38617|issue:41628", "issue:42673|issue:43577", "issue:44117|issue:45081", "issue:44829|issue:45216", "issue:42673|issue:43582", "issue:44410|issue:45310", "issue:38617|issue:44220", "issue:42371|issue:44261", "issue:41628|issue:43784", "issue:43408|issue:43742" ], "prepared_review_unit_hash": "bad3fccda0ecd9ba68c9b22b2c84b2455298f4d3067594f9339e9c2b67c4fc33", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 13029, "estimated_input_tokens": 3258, "estimated_eval_tokens": 6772 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is highly heterogeneous: the issues span unrelated import errors, model-loading regressions, dtype propagation, backend-specific crashes, and tokenizer/model architecture bugs. No soft-edge pair looks like the same underlying defect or change, so none should be merged as duplicates.", "confidence": 0.91, "canonical_issue_reason": "No single canonical issue fits well because the cluster does not center on one bug; the items are mostly unrelated transformer regressions across different subsystems.", "canonical_pr_reason": null, "best_issue_reason": "If one issue must be chosen, 45216 is the most concrete and self-contained regression report, and it has a nearby follow-up in 45310, but it only represents a narrow slice of this mixed cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:38617", "right": "issue:41628", "accept": false, "reason": "Both are import failures, but they concern different missing symbols and likely different breakages." }, { "left": "issue:42673", "right": "issue:43577", "accept": false, "reason": "VRAM leakage in multi-threaded Qwen3ForCausalLM use is unrelated to Blip2 dtype loading behavior." }, { "left": "issue:44117", "right": "issue:45081", "accept": false, "reason": "Tokenizer mapping None handling and a Mistral regex patch crash are different code paths and failure modes." }, { "left": "issue:44829", "right": "issue:45216", "accept": false, "reason": "Flash-attention training degeneration is unrelated to a Qwen3.5 save_pretrained checkpoint regression." }, { "left": "issue:42673", "right": "issue:43582", "accept": false, "reason": "A Qwen3 VRAM leak and an Apple Silicon caching_allocator_warmup TypeError are distinct bugs." }, { "left": "issue:44410", "right": "issue:45310", "accept": false, "reason": "Missing qwen3next projections and Qwen3.5 from_pretrained errors are different model/loader issues." }, { "left": "issue:38617", "right": "issue:44220", "accept": false, "reason": "An import error for configuration_utils and a feature-extraction helper issue do not match the same underlying defect." }, { "left": "issue:42371", "right": "issue:44261", "accept": false, "reason": "TF32 API guidance and MLA layernorm epsilon precision are unrelated concerns." }, { "left": "issue:41628", "right": "issue:43784", "accept": false, "reason": "AutoImageProcessor import failure and sentence-transformers nn NameError are separate import-time regressions." }, { "left": "issue:43408", "right": "issue:43742", "accept": false, "reason": "A sam3_video vs sam3_tracker config warning and a MobileLLM key error are different model-loading problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 112, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42175", "issue:42222", "issue:42617", "issue:43066", "issue:43097", "issue:43122", "issue:43673", "issue:43937", "issue:44483", "issue:44493", "issue:44496", "issue:44933", "issue:44964", "issue:45278", "issue:45341", "issue:45459", "issue:45464" ], "soft_pairs": [ "issue:42175|issue:42222", "issue:43937|issue:44496", "issue:44483|issue:45464", "issue:44933|issue:45278", "issue:42617|issue:45341", "issue:43066|issue:43122", "issue:44964|issue:45278", "issue:43673|issue:45341", "issue:43097|issue:44493", "issue:42222|issue:45459" ], "prepared_review_unit_hash": "9ee817d2c8b2d6e4d21a8dfd81cf1c8cccd7634f4d003b7b3206766706bf1ca9", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12600, "estimated_input_tokens": 3150, "estimated_eval_tokens": 6556 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is noisy and mostly contains distinct Transformers v5 regressions across unrelated areas (packaging, model loading, tokenization, generation, API handling, and tests). I did not find a convincing duplicate pair; the best anchor is the broad import-regression report.", "confidence": 0.57, "canonical_issue_reason": "Issue 45278 is the broadest and most representative open report here: it describes a version-upgrade-related wave of import failures after moving from 4.57.0 to 5.5.0.", "canonical_pr_reason": null, "best_issue_reason": "45278 is the best single issue to anchor the cluster because it has the widest symptom surface and is the clearest umbrella-style regression report among mostly unrelated issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42175", "right": "issue:42222", "accept": false, "reason": "Different problems: packaging/backend dependency omission vs broken vitpose models; no shared concrete code path." }, { "left": "issue:43937", "right": "issue:44496", "accept": false, "reason": "Both are model-load failures, but one is a GenerationConfig validation error for GLM-5 and the other is an unrecognized-model/config issue for OLMo; distinct root causes." }, { "left": "issue:44483", "right": "issue:45464", "accept": false, "reason": "Both mention chat/completions, but one is a general request-acceptance regression and the other is a Qwen3.5 streaming-inference failure; not enough evidence they are the same bug." }, { "left": "issue:44933", "right": "issue:45278", "accept": false, "reason": "A specific missing import from image_utils is not the same as a broad post-upgrade import-error report." }, { "left": "issue:42617", "right": "issue:45341", "accept": false, "reason": "3d_parallel.py runtime failure and a testing_utils bug are unrelated." }, { "left": "issue:43066", "right": "issue:43122", "accept": false, "reason": "Tokenizer decoder-type mismatch and tokenization-output differences are related to tokenizers but do not look like the same concrete defect." }, { "left": "issue:44964", "right": "issue:45278", "accept": false, "reason": "Phi-4 multimodal model loading failure is a model-specific issue, not the same as generic import errors after upgrade." }, { "left": "issue:43673", "right": "issue:45341", "accept": false, "reason": "GenerationMixin chunked_prefill cache behavior and testing_utils are different subsystems and symptoms." }, { "left": "issue:43097", "right": "issue:44493", "accept": false, "reason": "Removal of tie_embeddings_and_encoder_decoder and unexpected position-id keys are different regressions with different failure modes." }, { "left": "issue:42222", "right": "issue:45459", "accept": false, "reason": "Broken vitpose models and protobuf-masked tokenizer errors are unrelated." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 113, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41628", "issue:42222", "issue:42994", "issue:43012", "issue:43097", "issue:43408", "issue:43441", "issue:43452", "issue:43874", "issue:43986", "issue:44261", "issue:44263", "issue:44484", "issue:44485", "issue:44829", "issue:45003", "issue:45071", "issue:45341" ], "soft_pairs": [ "issue:43986|issue:44484", "issue:43874|issue:44261", "issue:43441|issue:44829", "issue:43012|issue:43408", "issue:42222|issue:42994", "issue:44263|issue:44485", "issue:41628|issue:45071", "issue:45003|issue:45341", "issue:43097|issue:43452" ], "prepared_review_unit_hash": "b67b24270b4f37f949b00dda0fed47ed3cbc937a44553de01c498c8aa6991f75", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 9, "serialized_chars": 13032, "estimated_input_tokens": 3258, "estimated_eval_tokens": 6772 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The set is heterogeneous: the soft-similarity pairs are only topically related and do not look like the same underlying bug or change. I would not merge any of the proposed duplicates.", "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:43452 is the clearest standalone bug report here, with a concrete failing from_pretrained code path and broad user impact.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43986", "right": "issue:44484", "accept": false, "reason": "Different bugs: one is a crash loading video models via AutoProcessor without torchvision; the other is about max_shard_size in save_pretrained." }, { "left": "issue:43874", "right": "issue:44261", "accept": false, "reason": "Both mention GLM, but the failures are unrelated: missing image-patch method vs a config.rms_norm_eps precision issue in MLA q_a_layernorm." }, { "left": "issue:43441", "right": "issue:44829", "accept": false, "reason": "Both involve flash attention, but they affect different code paths and symptoms: Ministral-3 inference failure vs degenerate training in sequence classification." }, { "left": "issue:43012", "right": "issue:43408", "accept": false, "reason": "These are different warnings with different causes: bf16 compilation precision warning vs sam3_video/sam3_tracker model-type mismatch." }, { "left": "issue:42222", "right": "issue:42994", "accept": false, "reason": "vitpose model breakage and quantized model saving failure are unrelated issues in different areas." }, { "left": "issue:44263", "right": "issue:44485", "accept": false, "reason": "Different GLM bugs: torch.split return values in a MoE indexer versus the RoPE implementation." }, { "left": "issue:41628", "right": "issue:45071", "accept": false, "reason": "Importing AutoImageProcessor and breaking PretrainedConfig type checking are separate regressions with different failure modes." }, { "left": "issue:45003", "right": "issue:45341", "accept": false, "reason": "modeling_utils sys.modules access and a testing_utils bug are different files, different behavior, and not the same defect." }, { "left": "issue:43097", "right": "issue:43452", "accept": false, "reason": "One is removal of tie_embeddings_and_encoder_decoder; the other is gguf_file breaking tokenizer/model loading. Different underlying problems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 114, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:43064", "issue:43335", "issue:43408", "issue:43874", "issue:43931", "issue:43986", "issue:44038", "issue:44117", "issue:44263", "issue:44315", "issue:44368", "issue:44410", "issue:44829", "issue:44987", "issue:45071", "issue:45245", "issue:45341", "issue:45372" ], "soft_pairs": [ "issue:44117|issue:44987", "issue:43931|issue:44410", "issue:43986|issue:45341", "issue:43874|issue:44263", "issue:43408|issue:44368", "issue:43335|issue:45071", "issue:43931|issue:44038", "issue:43986|issue:45372", "issue:44829|issue:45245", "issue:43064|issue:44315" ], "prepared_review_unit_hash": "d12842344caa09825c2d43ca2e3b504fef1d5943b247f2a214062496755ef004", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13611, "estimated_input_tokens": 3403, "estimated_eval_tokens": 7062 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The soft-edge candidates span different subsystems and root causes, so none look like true duplicates or mergeable PR-equivalents. This cluster is better treated as a set of unrelated issues rather than one underlying bug/change.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44117", "right": "issue:44987", "accept": false, "reason": "Both mention loading failures, but one is a tokenizer-mapping None bug and the other is a transformers>=5.1.0 model load failure for a specific repo; different code paths and failure modes." }, { "left": "issue:43931", "right": "issue:44410", "accept": false, "reason": "Both involve Qwen/VL-style model loading, but one is a weight-shape mismatch while the other is missing projections in qwen3next layer 0; not the same bug." }, { "left": "issue:43986", "right": "issue:45341", "accept": false, "reason": "43986 is a video/AutoProcessor torchvision dependency crash; 45341 is a small testing_utils bug. No substantive overlap in bug source or fix." }, { "left": "issue:43874", "right": "issue:44263", "accept": false, "reason": "GLM46V image-processor AttributeError and GlmMoeDsaIndexer torch.split return handling are unrelated components and defects." }, { "left": "issue:43408", "right": "issue:44368", "accept": false, "reason": "These are different warnings in different models: sam3 tracker/type mismatch versus Qwen3.5 tie_word_embeddings warning." }, { "left": "issue:43335", "right": "issue:45071", "accept": false, "reason": "SwitchTransformers sparse-layer creation and PretrainedConfig type checking are unrelated issues with no shared code path." }, { "left": "issue:43931", "right": "issue:44038", "accept": false, "reason": "Both are Qwen3-VL related, but one is a loading shape mismatch and the other is a transformers 5.0 bug with Qwen3-VL-Moe; distinct failures." }, { "left": "issue:43986", "right": "issue:45372", "accept": false, "reason": "Video-model AutoProcessor/torchvision crash and Gemma 4 processor import error come from different dependency paths and model families." }, { "left": "issue:44829", "right": "issue:45245", "accept": false, "reason": "Flash-attention-3 degenerate training and category-cardinality RuntimeError are completely different problems." }, { "left": "issue:43064", "right": "issue:44315", "accept": false, "reason": "Trainer/FSDP2/PEFT optimizer-state corruption and Liger Kernel not applied with model_init are separate training-stack issues, not one concrete bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 115, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38617", "issue:43502", "issue:43531", "issue:43976", "issue:43986", "issue:43992", "issue:44062", "issue:44117", "issue:44261", "issue:44351", "issue:44410", "issue:44464", "issue:44485", "issue:44704", "issue:45071", "issue:45103", "issue:45341", "issue:45375" ], "soft_pairs": [ "issue:43976|issue:45071", "issue:44464|issue:45071", "issue:43531|issue:45341", "issue:44261|issue:44485", "issue:44704|issue:45103", "issue:44062|issue:44117", "issue:38617|issue:44351", "issue:43986|issue:44117", "issue:43502|issue:43992", "issue:44410|issue:45375" ], "prepared_review_unit_hash": "e8ce688b78164f09c7b4699f9cae4e985ace9778a36419937f8d8b85dd2f410c", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13330, "estimated_input_tokens": 3333, "estimated_eval_tokens": 6922 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These issues are mostly unrelated, with only superficial overlap in a few Transformers import/configuration topics. None of the soft pairs looks like the same underlying bug or a mergeable duplicate PR-equivalent change, so all soft edges should be rejected.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43976", "right": "issue:45071", "accept": false, "reason": "Both mention release/version compatibility, but one is a Python support regression and the other is a PretrainedConfig type-checking break; different failure modes and code paths." }, { "left": "issue:44464", "right": "issue:45071", "accept": false, "reason": "Compiled-forward generation inconsistency is unrelated to config type checking; no shared bug mechanism or fix scope." }, { "left": "issue:43531", "right": "issue:45341", "accept": false, "reason": "Qwen3-MoE sliding-window behavior and a testing_utils bug are unrelated subsystems and symptoms." }, { "left": "issue:44261", "right": "issue:44485", "accept": false, "reason": "Both concern model internals, but one is a missing RMS norm epsilon config field and the other is a RoPE implementation issue; not the same concrete bug." }, { "left": "issue:44704", "right": "issue:45103", "accept": false, "reason": "AutoProcessor kwargs forwarding and auto_docstring annotation handling are separate utility-layer problems with different code paths." }, { "left": "issue:44062", "right": "issue:44117", "accept": false, "reason": "Both involve tokenizer/pretrained loading, but one is an AddedToken construction error and the other is a mapping-name None handling bug; not the same defect." }, { "left": "issue:38617", "right": "issue:44351", "accept": false, "reason": "Both are import errors, but for different missing symbols from different modules; no evidence they stem from one shared export regression." }, { "left": "issue:43986", "right": "issue:44117", "accept": false, "reason": "AutoProcessor/video-model crash without torchvision is unrelated to TOKENIZER_MAPPING_NAMES returning None." }, { "left": "issue:43502", "right": "issue:43992", "accept": false, "reason": "local_files_only network leakage and missing embed_tokens.weight on UMT5Encoder.from_pretrained are distinct loading bugs." }, { "left": "issue:44410", "right": "issue:45375", "accept": false, "reason": "Missing qwen3next projections and a dropped config field in Qwen3_5MoeVisionConfig are different model/config issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 116, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:38617", "issue:43012", "issue:43066", "issue:43097", "issue:43299", "issue:43582", "issue:43824", "issue:43873", "issue:43906", "issue:43957", "issue:43986", "issue:43992", "issue:44038", "issue:44351", "issue:44410", "issue:44484", "issue:44829", "issue:45341" ], "soft_pairs": [ "issue:38617|issue:43097", "issue:44038|issue:44410", "issue:43582|issue:45341", "issue:43992|issue:44484", "issue:43066|issue:43906", "issue:43097|issue:43873", "issue:43824|issue:43986", "issue:43957|issue:44351", "issue:43012|issue:44829", "issue:43299|issue:44410" ], "prepared_review_unit_hash": "b7a4dea39b28b819f0954d8a9e1d5f34bd95b279b15c884ba9d438ad4664d1bf", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13340, "estimated_input_tokens": 3335, "estimated_eval_tokens": 6926 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is a loose collection of unrelated Transformers issues, mostly v5 breakages, import errors, and model-specific loading/training bugs. None of the soft pairs look like the same underlying defect, so I would not merge any of them as duplicates.", "confidence": 0.91, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:43097 is the broadest and most central v5 regression report in the set, but it should be treated as a representative issue only, not a duplicate target for the rest of the cluster.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:38617", "right": "issue:43097", "accept": false, "reason": "Both mention v5-related breakage, but one is an ImportError for a missing symbol while the other is about a removed API; different failure modes and code paths." }, { "left": "issue:44038", "right": "issue:44410", "accept": false, "reason": "These concern different Qwen3/VL problems: one is a generic Transformers 5.0 bug report, the other is a specific missing projection layer in qwen3next." }, { "left": "issue:43582", "right": "issue:45341", "accept": false, "reason": "Apple Silicon TypeError in caching allocator warmup is unrelated to a testing_utils bug." }, { "left": "issue:43992", "right": "issue:44484", "accept": false, "reason": "UMT5 weight-loading omission is a model-specific loading bug; max_shard_size default behavior in save_pretrained() is a separate API/design issue." }, { "left": "issue:43066", "right": "issue:43906", "accept": false, "reason": "Wrong tokenizer decoder type and an isolated reproduction of another issue are not the same defect; no shared code-path evidence." }, { "left": "issue:43097", "right": "issue:43873", "accept": false, "reason": "Removed tie_embeddings API and quantization offloading misbehavior are different regressions affecting different subsystems." }, { "left": "issue:43824", "right": "issue:43986", "accept": false, "reason": "Missing Qwen2_5_VL export and AutoProcessor crash without torchvision are distinct import/dependency issues." }, { "left": "issue:43957", "right": "issue:44351", "accept": false, "reason": "Meta-device model loading failures and missing HybridCache import are separate v5 regressions with different symptoms." }, { "left": "issue:43012", "right": "issue:44829", "accept": false, "reason": "A bfloat16 compilation warning is not the same as degenerate training under flash_attention_3; they involve different execution paths." }, { "left": "issue:43299", "right": "issue:44410", "accept": false, "reason": "Qwen3VL-Moe loading breakage is not the same as qwen3next missing attn projections; both are model-family-specific but unrelated." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 117, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:42886", "issue:42907", "issue:43012", "issue:43097", "issue:43352", "issue:43381", "issue:43525", "issue:43550", "issue:43644", "issue:43825", "issue:43901", "issue:43937", "issue:44117", "issue:44368", "issue:44380", "issue:44484", "issue:44655", "issue:44792" ], "soft_pairs": [ "issue:43352|issue:44380", "issue:43644|issue:44484", "issue:43825|issue:44655", "issue:43097|issue:43901", "issue:43012|issue:44368", "issue:42886|issue:43381", "issue:43937|issue:44792", "issue:42907|issue:44655", "issue:43525|issue:44117", "issue:43012|issue:43550" ], "prepared_review_unit_hash": "de1d83fcc8f22bb684dff4fd45a6d4265504da8d655ed3c667f94aa3202bfaa8", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13351, "estimated_input_tokens": 3338, "estimated_eval_tokens": 6932 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "All proposed soft pairs are only loosely similar at a topic level; they describe different bugs, configs, or subsystems and should not be merged as duplicates.", "confidence": 0.95, "canonical_issue_reason": "No single canonical issue stands out: the items span unrelated regressions and feature gaps (offline cache loading, saving dequantized models, bfloat16 warnings, config-field removal, Flash Attention support, pipeline docs, etc.).", "canonical_pr_reason": null, "best_issue_reason": "No issue is a strong global representative because the cluster is heterogeneous and none of the soft pairs are true duplicates.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43352", "right": "issue:44380", "accept": false, "reason": "Different problems: NemotronHForCausalLM lacking Flash Attention 2.0 support vs GPT2 attention scaling being ignored under SDPA/FlashAttention. Same broad area, but not the same bug or fix." }, { "left": "issue:43644", "right": "issue:44484", "accept": false, "reason": "One is about junk-filled non-persistent buffers in v5; the other asks why save_pretrained() uses 50GB max_shard_size. Both touch saving, but they are unrelated issues." }, { "left": "issue:43825", "right": "issue:44655", "accept": false, "reason": "Pipeline task-support error messaging is unrelated to saving Pipeline objects with save_pretrained(). Different code paths and different user-facing failures." }, { "left": "issue:43097", "right": "issue:43901", "accept": false, "reason": "Removed tie_embeddings_and_encoder_decoder in v5 is a model/config API change; return_all_scores documentation mismatch is a pipeline doc/behavior issue. Not the same underlying bug." }, { "left": "issue:43012", "right": "issue:44368", "accept": false, "reason": "A bfloat16 compilation warning from PyTorch is unrelated to a LoRA fine-tuning warning about tie_word_embeddings=False. Similar warning shape, different cause and fix." }, { "left": "issue:42886", "right": "issue:43381", "accept": false, "reason": "Tokenizer offline cache loading and gradient checkpointing in eval mode are unrelated behaviors in different subsystems." }, { "left": "issue:43937", "right": "issue:44792", "accept": false, "reason": "Invalid GenerationConfig for GLM-5 is not the same as a failing janus image-generation test; these are different models and failure modes." }, { "left": "issue:42907", "right": "issue:44655", "accept": false, "reason": "Saving dequantized Ministrals/Devstrals and saving Pipeline objects are both about serialization, but the concrete failure and code path differ." }, { "left": "issue:43525", "right": "issue:44117", "accept": false, "reason": "Llama4Config missing pad_token_id is a model config attribute bug; TOKENIZER_MAPPING_NAMES returning None is a tokenizer mapping assumption bug. Not duplicates." }, { "left": "issue:43012", "right": "issue:43550", "accept": false, "reason": "A bfloat16 precision warning is not the same as Bamba-9B-v2 failing under torch.compile with SDPA. Same general compute stack, different concrete issue." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 118, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36683", "issue:38617", "issue:43012", "issue:43064", "issue:43208", "issue:43352", "issue:43502", "issue:43526", "issue:43582", "issue:43618", "issue:43756", "issue:43761", "issue:43976", "issue:44380", "issue:44483", "issue:44485", "issue:44514", "issue:44683" ], "soft_pairs": [ "issue:43352|issue:43976", "issue:43208|issue:43526", "issue:43064|issue:44380", "issue:43618|issue:44380", "issue:44483|issue:44514", "issue:43618|issue:43761", "issue:36683|issue:43582", "issue:43012|issue:44683", "issue:43756|issue:44485", "issue:38617|issue:43502" ], "prepared_review_unit_hash": "d35359d46746e6845ca68e2e2b42008b624545a4a3f93f06b1a2fdf5fb34d625", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13411, "estimated_input_tokens": 3353, "estimated_eval_tokens": 6962 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The items are mostly unrelated standalone bugs; none of the suggested soft pairs looks like the same underlying issue, and there are no PRs in the set.", "confidence": 0.93, "canonical_issue_reason": "Issue 36683 is the clearest concrete bug report and has the highest discussion activity, making it the strongest single representative among these unrelated issues.", "canonical_pr_reason": null, "best_issue_reason": "Issue 36683 is the best global issue candidate because it is a specific, reproducible runtime error with strong community engagement.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43352", "right": "issue:43976", "accept": false, "reason": "Different problems: one is a model-specific Flash Attention support error, the other is a Python version compatibility issue." }, { "left": "issue:43208", "right": "issue:43526", "accept": false, "reason": "Different subsystems and symptoms: xLSTM training bugs vs a BeitImageProcessorFast label reduction bug." }, { "left": "issue:43064", "right": "issue:44380", "accept": false, "reason": "One concerns distributed optimizer state handling under FSDP2/PEFT, the other is attention scaling being ignored in GPT2 backends." }, { "left": "issue:43618", "right": "issue:44380", "accept": false, "reason": "CLIP attentions assignment and GPT2 attention scaling are distinct code paths and failure modes." }, { "left": "issue:44483", "right": "issue:44514", "accept": false, "reason": "Both touch chat/completions-related flows, but one is an API request validation problem and the other is a batched processor crash." }, { "left": "issue:43618", "right": "issue:43761", "accept": false, "reason": "Both are CLIP regressions, but one is missing attentions and the other is missing hidden states; they are separate bugs." }, { "left": "issue:36683", "right": "issue:43582", "accept": false, "reason": "Gemma3Config missing vocab_size is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." }, { "left": "issue:43012", "right": "issue:44683", "accept": false, "reason": "One is a warning about float32 precision during bfloat16 compilation; the other is a hard failure compiling flex_attention on newer torch." }, { "left": "issue:43756", "right": "issue:44485", "accept": false, "reason": "Both mention RoPE, but they concern different models and different implementation defects." }, { "left": "issue:38617", "right": "issue:43502", "accept": false, "reason": "An import failure for layer_type_validation is unrelated to unwanted network requests under local_files_only=True." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 119, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36683", "issue:42994", "issue:43299", "issue:43352", "issue:43475", "issue:43867", "issue:43901", "issue:43937", "issue:44038", "issue:44220", "issue:44230", "issue:44263", "issue:44483", "issue:44655", "issue:45216", "issue:45290", "issue:45381", "issue:45440" ], "soft_pairs": [ "issue:44483|issue:45290", "issue:43299|issue:43867", "issue:44483|issue:45381", "issue:44038|issue:44230", "issue:43901|issue:44655", "issue:42994|issue:44655", "issue:43867|issue:45216", "issue:36683|issue:43475", "issue:43352|issue:43937", "issue:44220|issue:44655", "issue:44263|issue:45440" ], "prepared_review_unit_hash": "62455df12edd22877e4612ee396ed98a3d513755f32ad6df8550c233384b0f39", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13491, "estimated_input_tokens": 3373, "estimated_eval_tokens": 7002 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No duplicate cluster here: the items span separate bugs, regressions, docs mismatches, and model-specific loading/saving issues. None of the soft pairs looks like the same underlying defect.", "confidence": 0.95, "canonical_issue_reason": "No single canonical issue stands out because the items are not duplicates; they cover unrelated subsystems and failure modes (loading, saving, chat templates, vision/video, fp8, docs).", "canonical_pr_reason": null, "best_issue_reason": "issue:45216 is the strongest standalone bug report: it\u2019s a concrete regression with a clear `save_pretrained` symptom, a specific model family, and a reproducible checkpoint corruption concern.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44483", "right": "issue:45290", "accept": false, "reason": "Different surfaces and bugs: `/v1/chat/completions` request acceptance in v5.3 vs `apply_chat_template(tokenize=True)` crashing on tool-call assistant messages." }, { "left": "issue:43299", "right": "issue:43867", "accept": false, "reason": "Both are loading-related, but one is Qwen3VL-Moe model loading regression and the other is a generic state_dict-sorting load error; no clear same code path." }, { "left": "issue:44483", "right": "issue:45381", "accept": false, "reason": "Unrelated failures: chat/completions request validation versus wrong `vision_position_ids` for Qwen2.5-VL video input." }, { "left": "issue:44038", "right": "issue:44230", "accept": false, "reason": "Same broad model family, but different topics: a Qwen3-VL-Moe bug report versus fp8 inference support request." }, { "left": "issue:43901", "right": "issue:44655", "accept": false, "reason": "Docs mismatch for `return_all_scores` is not the same as failing to save `Pipeline` objects with `save_pretrained`." }, { "left": "issue:42994", "right": "issue:44655", "accept": false, "reason": "Quantized model saving failure and pipeline-object saving failure are different artifacts and likely different code paths." }, { "left": "issue:43867", "right": "issue:45216", "accept": false, "reason": "Both involve model persistence, but one is a load error from sorted state_dict handling and the other is a Qwen3.5 `save_pretrained` regression; not the same bug." }, { "left": "issue:36683", "right": "issue:43475", "accept": false, "reason": "Different model families and missing attributes: `Gemma3Config.vocab_size` versus `Sam3VisionEncoderOutput.fpn_position_embeddings`." }, { "left": "issue:43352", "right": "issue:43937", "accept": false, "reason": "Separate model-specific issues: Flash Attention 2.0 unsupported for NemotronH vs invalid `GenerationConfig` for GLM-5." }, { "left": "issue:44220", "right": "issue:44655", "accept": false, "reason": "Audio feature extraction bug and pipeline save failure are unrelated code paths." }, { "left": "issue:44263", "right": "issue:45440", "accept": false, "reason": "`torch.split()` return handling in `GlmMoeDsaIndexer` is a different issue from `DeepseekV3MoE` diverging from the remote implementation." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 120, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36683", "issue:43010", "issue:43064", "issue:43066", "issue:43525", "issue:43618", "issue:43976", "issue:43992", "issue:44077", "issue:44188", "issue:44242", "issue:44380", "issue:44492", "issue:44938", "issue:45216", "issue:45278", "issue:45310", "issue:45381" ], "soft_pairs": [ "issue:43066|issue:45381", "issue:43064|issue:44492", "issue:43992|issue:45310", "issue:44077|issue:44380", "issue:36683|issue:44077", "issue:43976|issue:44938", "issue:36683|issue:43525", "issue:44188|issue:44380", "issue:43618|issue:45216", "issue:36683|issue:44242", "issue:43010|issue:45278" ], "prepared_review_unit_hash": "14515bba002171ed2a00e24be88e46d43933010a0a7268c83109a9b95c2da5dc", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13580, "estimated_input_tokens": 3395, "estimated_eval_tokens": 7046 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: the soft pairs mostly share only broad Transformers vocabulary (configs, loading, Python, attention, from_pretrained) but point to different models, code paths, and failure modes. I do not see any pair that should be merged as the same underlying bug/change.", "confidence": 0.91, "canonical_issue_reason": "No strong canonical duplicate emerges; the issues span unrelated regressions across model configs, loading, training, attention, and Python compatibility. If one issue must serve as the cluster representative, #45278 is the broadest regression-style report, but it does not subsume the others.", "canonical_pr_reason": null, "best_issue_reason": "#45278 is the most general and cluster-like item: it describes a broad upgrade regression with multiple import failures, so it is the best representative issue only in a loose sense. It still should not be treated as a true duplicate umbrella for the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43066", "right": "issue:45381", "accept": false, "reason": "Different failures: tokenizer decoder type in v5 vs Qwen2.5-VL video position IDs. Same library, but not the same bug or fix." }, { "left": "issue:43064", "right": "issue:44492", "accept": false, "reason": "Optimizer/FSDP state corruption is unrelated to a cache-strategy typo. No shared code-path problem." }, { "left": "issue:43992", "right": "issue:45310", "accept": false, "reason": "Both involve from_pretrained loading, but they concern different models and different missing weights/error paths." }, { "left": "issue:44077", "right": "issue:44380", "accept": false, "reason": "PatchTSMixer post_init validation and GPT2 attention scaling under SDPA/FlashAttention are separate issues in different subsystems." }, { "left": "issue:36683", "right": "issue:44077", "accept": false, "reason": "Gemma3Config missing vocab_size is a config-attribute bug; PatchTSMixer optional post_init is a model-construction rule change. Not the same underlying problem." }, { "left": "issue:43976", "right": "issue:44938", "accept": false, "reason": "Both mention Python-version compatibility, but one is a Transformers 5.1.0 support issue and the other is a Python 3.14 load failure; too broad and likely different causes." }, { "left": "issue:36683", "right": "issue:43525", "accept": false, "reason": "Missing vocab_size in Gemma3Config and missing pad_token_id in Llama4Config are analogous but distinct model-config omissions, not one concrete bug." }, { "left": "issue:44188", "right": "issue:44380", "accept": false, "reason": "torch.compile branching divergence in attention kernels is a different failure mode from GPT2 attention scaling being ignored in backend paths." }, { "left": "issue:43618", "right": "issue:45216", "accept": false, "reason": "CLIPOutput attentions assignment and Qwen3.5 save_pretrained checkpoint corruption are unrelated changes/failures." }, { "left": "issue:36683", "right": "issue:44242", "accept": false, "reason": "Gemma3 config attribute access and MoE load-balancing loss gating are unrelated code paths." }, { "left": "issue:43010", "right": "issue:45278", "accept": false, "reason": "A no_grad decoration request for cache updates is unrelated to broad import errors after upgrading from 4.57.0 to 5.5.0." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 121, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36683", "issue:41669", "issue:43010", "issue:43064", "issue:43352", "issue:43441", "issue:43867", "issue:43931", "issue:43976", "issue:44246", "issue:44360", "issue:44380", "issue:44938", "issue:45071", "issue:45357", "issue:45375", "issue:45381", "issue:45440" ], "soft_pairs": [ "issue:41669|issue:44246", "issue:43441|issue:44380", "issue:36683|issue:43352", "issue:43352|issue:43931", "issue:43867|issue:45375", "issue:36683|issue:43064", "issue:44938|issue:45071", "issue:43867|issue:45357", "issue:44360|issue:45440", "issue:43010|issue:43976", "issue:44938|issue:45381" ], "prepared_review_unit_hash": "3a6d0a5086d58c5d53b12c9c6e1ce1c6e5b715c3c792f198569c2e0289c50ea0", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13612, "estimated_input_tokens": 3403, "estimated_eval_tokens": 7062 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The set is thematically broad but not duplicate-tight; most pairs share only a subsystem or symptom class. I found no pair that clearly looks like the same underlying bug/change, so all soft edges are rejected.", "confidence": 0.36, "canonical_issue_reason": "issue:41669 is the most concrete and actionable item in the set: it identifies a specific import-time regression cause and proposed fix, whereas related import complaints are much more generic.", "canonical_pr_reason": null, "best_issue_reason": "issue:41669 best represents a dedup/triage anchor because it names a clear root cause and measurable impact, making it the strongest candidate to subsume nearby import-performance reports.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41669", "right": "issue:44246", "accept": false, "reason": "Both mention slow imports, but one is a specific import-* cause while the other is a vague intermittent import complaint; not clearly the same bug." }, { "left": "issue:43441", "right": "issue:44380", "accept": false, "reason": "Both involve FlashAttention/backends, but they describe different models and different failure modes." }, { "left": "issue:36683", "right": "issue:43352", "accept": false, "reason": "Gemma3 config missing vocab_size and Nemotron FlashAttention support are unrelated failures." }, { "left": "issue:43352", "right": "issue:43931", "accept": false, "reason": "FlashAttention support error vs. Qwen3-VL weight-shape mismatch are different code paths and symptoms." }, { "left": "issue:43867", "right": "issue:45375", "accept": false, "reason": "Model loading error with sorted state_dict is not the same as a missing config field being dropped by strict serialization." }, { "left": "issue:36683", "right": "issue:43064", "accept": false, "reason": "Config attribute error and FSDP2/PEFT optimizer-state corruption are unrelated." }, { "left": "issue:44938", "right": "issue:45071", "accept": false, "reason": "Python 3.14 import/load failure and PretrainedConfig type-checking regression are distinct versioning/type issues." }, { "left": "issue:43867", "right": "issue:45357", "accept": false, "reason": "Both touch model save/load behavior, but one is about sorted state_dict loading and the other about incorrect save_pretrained key mapping." }, { "left": "issue:44360", "right": "issue:45440", "accept": false, "reason": "DSA ReLU discussion and DeepseekV3MoE divergence are different model/implementation problems." }, { "left": "issue:43010", "right": "issue:43976", "accept": false, "reason": "torch.no_grad decoration for update methods is unrelated to the Python version compatibility issue." }, { "left": "issue:44938", "right": "issue:45381", "accept": false, "reason": "Python 3.14 loading failure and Qwen2.5-VL video position-id bug are unrelated." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 122, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:28282", "issue:33453", "issue:36296", "issue:38617", "issue:42890", "issue:43575", "issue:43618", "issue:43644", "issue:43867", "issue:43992", "issue:44077", "issue:44112", "issue:44484", "issue:44857", "issue:44938", "issue:44964", "issue:44977" ], "soft_pairs": [ "issue:43618|issue:44077", "issue:43644|issue:44938", "issue:38617|issue:43992", "issue:43867|issue:44484", "issue:36296|issue:43575", "issue:42890|issue:44964", "issue:42890|issue:44112", "issue:43618|issue:44857", "issue:28282|issue:33453", "issue:43618|issue:44977" ], "prepared_review_unit_hash": "48f507129843ced7b78353a68725225ce657bebdf67a64a6a048c67cd6733389", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 10, "serialized_chars": 12614, "estimated_input_tokens": 3154, "estimated_eval_tokens": 6564 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is heterogeneous: the soft pairs share only broad themes like loading, CI, or tensor parallelism, but not the same concrete bug or change. I would reject all soft edges and treat this as a non-duplicate mix of unrelated issues.", "confidence": 0.94, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43618", "right": "issue:44077", "accept": false, "reason": "Different model-specific bugs: CLIPOutput attention handling vs PatchTSMixer post_init policy. No shared code-path failure." }, { "left": "issue:43644", "right": "issue:44938", "accept": false, "reason": "One is about corrupt non-persistent buffers in Transformers 5.0.0; the other is a Python 3.14 import/load failure. Too different to be the same bug." }, { "left": "issue:38617", "right": "issue:43992", "accept": false, "reason": "An ImportError for a missing symbol in configuration_utils is unrelated to UMT5Encoder omitting embed_tokens.weight during from_pretrained." }, { "left": "issue:43867", "right": "issue:44484", "accept": false, "reason": "Model loading failure due to sorted state_dict is not the same as a question about max_shard_size in save_pretrained." }, { "left": "issue:36296", "right": "issue:43575", "accept": false, "reason": "Both mention tensor parallelism, but one is a training bug and the other is OOM while loading a specific model. Not the same underlying defect." }, { "left": "issue:42890", "right": "issue:44964", "accept": false, "reason": "A flaky SamHQ integration test missing set_seed is unrelated to a latest-transformers model loading failure for Phi-4 multimodal." }, { "left": "issue:42890", "right": "issue:44112", "accept": false, "reason": "Both are CI/test issues, but they affect different tests and failure modes; no evidence of one shared bug." }, { "left": "issue:43618", "right": "issue:44857", "accept": false, "reason": "CLIPOutput attentions assignment and LwDetrImageLoss AMP/CUDA crashes are different model/runtime problems." }, { "left": "issue:28282", "right": "issue:33453", "accept": false, "reason": "PyTorch-not-found ImportError is unrelated to a tokenizer-loading regression." }, { "left": "issue:43618", "right": "issue:44977", "accept": false, "reason": "CLIP attention-output behavior and Qwen3.5 flash-attention generation issues are distinct model-specific bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 123, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:30990", "issue:31795", "issue:42548", "issue:42890", "issue:43010", "issue:43065", "issue:43232", "issue:43502", "issue:43723", "issue:44062", "issue:44075", "issue:44222", "issue:44351", "issue:44387", "issue:44556", "issue:45125", "issue:45341", "issue:45375" ], "soft_pairs": [ "issue:43502|issue:44062", "issue:43010|issue:44075", "issue:42890|issue:45341", "issue:43723|issue:44556", "issue:44387|issue:44556", "issue:44222|issue:45125", "issue:42548|issue:44351", "issue:43010|issue:43232", "issue:43010|issue:43065", "issue:45125|issue:45375", "issue:30990|issue:31795" ], "prepared_review_unit_hash": "464d5438e083004b5aee06994973fde99b35a0f756a1d956658105fd5e9a0491", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13524, "estimated_input_tokens": 3381, "estimated_eval_tokens": 7018 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "No soft-edge pair looks like the same underlying bug/change; all should remain separate. The clearest standalone issue in this set is the `local_files_only=True` network-leak report.", "confidence": 0.91, "canonical_issue_reason": "issue:43502 is the most concrete, reproducible runtime bug here and has clear user impact; it is the best single issue representative among otherwise unrelated reports.", "canonical_pr_reason": null, "best_issue_reason": "issue:43502 stands out as a precise, actionable bug report with a direct behavioral failure (`local_files_only=True` still causing API calls).", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43502", "right": "issue:44062", "accept": false, "reason": "Both involve loading/import problems, but one is a network/offline leak and the other is an `AddedToken` constructor error; different code paths and fixes." }, { "left": "issue:43010", "right": "issue:44075", "accept": false, "reason": "`@torch.no_grad` on cache/layer updates is unrelated to SGD optimizer arguments; different subsystems and failure modes." }, { "left": "issue:42890", "right": "issue:45341", "accept": false, "reason": "Both mention testing, but one is about missing `set_seed()` in a specific integration test and the other is a bug in `testing_utils.py`; not the same bug." }, { "left": "issue:43723", "right": "issue:44556", "accept": false, "reason": "Both are loading-related, but one is tokenizer loading in v5 and the other is checkpoint reload compatibility after version upgrade; too broad and not the same concrete issue." }, { "left": "issue:44387", "right": "issue:44556", "accept": false, "reason": "Int4 quantization OOM from reserved memory is a distinct performance/memory bug from checkpoint reload failure after upgrade." }, { "left": "issue:44222", "right": "issue:45125", "accept": false, "reason": "FP8 `save_pretrained` for MoE and missing `_tp_plan` for tensor parallelism are separate model/config issues." }, { "left": "issue:42548", "right": "issue:44351", "accept": false, "reason": "Both are import-name errors, but they concern different symbols (`PreTrainedModel` vs `HybridCache`) and likely different missing re-exports." }, { "left": "issue:43010", "right": "issue:43232", "accept": false, "reason": "One is about `update(...)` needing `no_grad`; the other is a generation kwargs update bug after `sync_gpus`\u2014different code paths." }, { "left": "issue:43010", "right": "issue:43065", "accept": false, "reason": "A `no_grad` decoration request is unrelated to a dummy `nn.Conv2d` in `Sam3PixelDecoder`." }, { "left": "issue:45125", "right": "issue:45375", "accept": false, "reason": "Missing `_tp_plan` for tensor parallelism and missing `deepstack_visual_indexes` in a vision config are different config-schema bugs." }, { "left": "issue:30990", "right": "issue:31795", "accept": false, "reason": "Sentence Transformers loading hang and documentation confusion about `input_ids`/`past_key_values` are unrelated issues." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 124, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:36296", "issue:38175", "issue:42890", "issue:43010", "issue:43299", "issue:43502", "issue:44075", "issue:44315", "issue:44355", "issue:44360", "issue:44368", "issue:44373", "issue:44485", "issue:44556", "issue:44655", "issue:44749", "issue:44961", "issue:45071" ], "soft_pairs": [ "issue:44355|issue:44749", "issue:43502|issue:44485", "issue:44315|issue:44360", "issue:43010|issue:44368", "issue:36296|issue:44075", "issue:42890|issue:44373", "issue:38175|issue:43502", "issue:43502|issue:44961", "issue:43299|issue:44556", "issue:44355|issue:44655", "issue:44355|issue:45071" ], "prepared_review_unit_hash": "5309623403a2c4b9442e51c1a8396b2a4e2769c953e601c50589a83968883013", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13524, "estimated_input_tokens": 3381, "estimated_eval_tokens": 7018 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The cluster is highly heterogeneous: model-loading bugs, config/type-checking issues, flaky tests, docs, and unrelated warnings. None of the soft pairs appear to be the same underlying bug or change, so no duplicate merges are warranted.", "confidence": 0.93, "canonical_issue_reason": "No strong canonical duplicate exists in this set; if one issue must be chosen, #43502 is the most concrete and broadly user-impacting bug report.", "canonical_pr_reason": null, "best_issue_reason": "#43502 is the best representative issue because it describes a clear, reproducible runtime regression with direct user impact, while the others are narrower or more incidental.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44355", "right": "issue:44749", "accept": false, "reason": "Different failures: compiled Python file errors vs slowdown after upgrading and filtering data. Not the same code-path bug." }, { "left": "issue:43502", "right": "issue:44485", "accept": false, "reason": "local_files_only API requests is unrelated to GLM-5 RoPE implementation." }, { "left": "issue:44315", "right": "issue:44360", "accept": false, "reason": "Model_init/Liger kernel application bug and DSA indexer ReLU discussion are unrelated." }, { "left": "issue:43010", "right": "issue:44368", "accept": false, "reason": "torch.no_grad on cache/layer update methods is unrelated to tie_word_embeddings warnings in LoRA fine-tuning." }, { "left": "issue:36296", "right": "issue:44075", "accept": false, "reason": "Tensor parallel training bug and SGD argument handling are different problems in different code paths." }, { "left": "issue:42890", "right": "issue:44373", "accept": false, "reason": "Flaky integration tests due to missing set_seed and wrong position_ids docstring are unrelated." }, { "left": "issue:38175", "right": "issue:43502", "accept": false, "reason": "Unexpected zero probabilities in SigLIP2 and unwanted API requests under local_files_only are distinct bugs." }, { "left": "issue:43502", "right": "issue:44961", "accept": false, "reason": "The racoon issue has no clear overlap with local_files_only API requests." }, { "left": "issue:43299", "right": "issue:44556", "accept": false, "reason": "Both concern version-related loading, but they affect different models and failure modes; not the same concrete bug." }, { "left": "issue:44355", "right": "issue:44655", "accept": false, "reason": "Compiled Python file errors and save_pretrained pipeline object failures are unrelated." }, { "left": "issue:44355", "right": "issue:45071", "accept": false, "reason": "Running compiled Python files and PretrainedConfig type checking are separate issues with different symptoms and fixes." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 125, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:33357", "issue:33453", "issue:34689", "issue:36296", "issue:38617", "issue:42831", "issue:42890", "issue:43232", "issue:43576", "issue:43673", "issue:43824", "issue:43827", "issue:43874", "issue:44222", "issue:44230", "issue:44492", "issue:44961", "issue:45071" ], "soft_pairs": [ "issue:42831|issue:43874", "issue:42890|issue:43232", "issue:44222|issue:44230", "issue:33453|issue:34689", "issue:33453|issue:36296", "issue:43827|issue:44961", "issue:43576|issue:43824", "issue:43576|issue:45071", "issue:43673|issue:44492", "issue:38617|issue:43576", "issue:33357|issue:33453" ], "prepared_review_unit_hash": "c64065710c53ef32aae0f322482e62e211d823df84fc42fb5e254537be700120", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 11, "serialized_chars": 13445, "estimated_input_tokens": 3362, "estimated_eval_tokens": 6980 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: it mixes unrelated regressions across model loading, generation, FP8, docs, tests, and CLI/env tooling. The soft-similarity links are too broad to treat as duplicates.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42831", "right": "issue:43874", "accept": false, "reason": "Different bugs: FP8 accuracy issue vs missing image-processor method causing multimodal token counting failure." }, { "left": "issue:42890", "right": "issue:43232", "accept": false, "reason": "One is a flaky integration test missing set_seed; the other is a generation kwargs bug after sync_gpus. Different code paths and symptoms." }, { "left": "issue:44222", "right": "issue:44230", "accept": false, "reason": "Both mention FP8/MoE, but one is save_pretrained behavior and the other is inference support for specific Qwen models; not the same concrete defect." }, { "left": "issue:33453", "right": "issue:34689", "accept": false, "reason": "Both are loading regressions, but one is tokenizer loading and the other is full model loading for a specific vision instruct model. Too broad to merge." }, { "left": "issue:33453", "right": "issue:36296", "accept": false, "reason": "Tokenizer loading regression and tensor-parallel training bug are unrelated." }, { "left": "issue:43827", "right": "issue:44961", "accept": false, "reason": "Docs mentioning pipeline() after v5 removals is unrelated to an issue titled 'racoon'." }, { "left": "issue:43576", "right": "issue:43824", "accept": false, "reason": "Both are v5-era breakages, but one is the env command and the other is an import error for a specific model class; different fixes." }, { "left": "issue:43576", "right": "issue:45071", "accept": false, "reason": "v5 env command failure and PretrainedConfig type-checking breakage are separate regressions with different surfaces." }, { "left": "issue:43673", "right": "issue:44492", "accept": false, "reason": "Generation cache missing during chunked_prefill is unrelated to a typo in cache strategy text/naming." }, { "left": "issue:38617", "right": "issue:43576", "accept": false, "reason": "ImportError for layer_type_validation is a specific API break, not the same as the broken v5 env command." }, { "left": "issue:33357", "right": "issue:33453", "accept": false, "reason": "MacOS bus error loading a community CLIP model is unrelated to a tokenizer-loading regression." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 126, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:31795", "issue:36296", "issue:38617", "issue:41628", "issue:42548", "issue:43012", "issue:43526", "issue:43576", "issue:43901", "issue:44261", "issue:44273", "issue:44351", "issue:44355", "issue:44485", "issue:44908", "issue:44938", "issue:44961", "issue:45003" ], "soft_pairs": [ "issue:41628|issue:42548", "issue:31795|issue:38617", "issue:43576|issue:44351", "issue:44908|issue:44961", "issue:43576|issue:44938", "issue:44485|issue:44961", "issue:44273|issue:45003", "issue:36296|issue:43901", "issue:36296|issue:43012", "issue:36296|issue:43526", "issue:41628|issue:44355", "issue:44261|issue:44961" ], "prepared_review_unit_hash": "fc0473b9c0c80b0157fda6b16209731ba478c6b0bca98155c7daa59d714289bf", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 12, "serialized_chars": 13671, "estimated_input_tokens": 3418, "estimated_eval_tokens": 7092 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is not a duplicate set; it contains many unrelated issues across docs, imports, training/runtime behavior, and different model/component bugs. All soft pairs should be rejected.", "confidence": 0.98, "canonical_issue_reason": "No single canonical issue stands out because the issues cover unrelated problems and components rather than one underlying bug.", "canonical_pr_reason": null, "best_issue_reason": "No strong best issue for clustering; none of the items clearly represents the whole set, since they span distinct failure modes.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41628", "right": "issue:42548", "accept": false, "reason": "Different import failures for different symbols (`AutoImageProcessor` vs `PreTrainedModel`); not the same underlying bug." }, { "left": "issue:31795", "right": "issue:38617", "accept": false, "reason": "One is a documentation clarity issue, the other is an import error for `layer_type_validation`; unrelated." }, { "left": "issue:43576", "right": "issue:44351", "accept": false, "reason": "`transformers env` CLI breakage is unrelated to the `HybridCache` import failure." }, { "left": "issue:44908", "right": "issue:44961", "accept": false, "reason": "An LR scheduler kwargs bug and an unrelated issue titled `racoon`; no evidence of a shared code-path bug." }, { "left": "issue:43576", "right": "issue:44938", "accept": false, "reason": "CLI/env command breakage in v5 is unrelated to Python 3.14 loading failure." }, { "left": "issue:44485", "right": "issue:44961", "accept": false, "reason": "GLM-5 RoPE implementation discussion is unrelated to the `racoon` issue." }, { "left": "issue:44273", "right": "issue:45003", "accept": false, "reason": "Lazy loading behavior and unsafe `sys.modules[]` access are different bugs in different code paths." }, { "left": "issue:36296", "right": "issue:43901", "accept": false, "reason": "Tensor parallel training bug is unrelated to docs still mentioning `return_all_scores`." }, { "left": "issue:36296", "right": "issue:43012", "accept": false, "reason": "Tensor parallel training bug and bfloat16 compilation warning are distinct runtime concerns." }, { "left": "issue:36296", "right": "issue:43526", "accept": false, "reason": "Tensor parallel training bug is unrelated to BEiT image processor label handling." }, { "left": "issue:41628", "right": "issue:44355", "accept": false, "reason": "`AutoImageProcessor` import failure and compiled Python file errors are separate import/loading problems." }, { "left": "issue:44261", "right": "issue:44961", "accept": false, "reason": "MLA q_a_layernorm precision/config bug is unrelated to the `racoon` issue." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 127, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41669", "issue:42371", "issue:42890", "issue:43576", "issue:43673", "issue:44038", "issue:44261", "issue:44360", "issue:44393", "issue:44492", "issue:44556", "issue:44623", "issue:44908", "issue:45071", "issue:45092", "issue:45125", "issue:45230" ], "soft_pairs": [ "issue:44261|issue:44360", "issue:43576|issue:44556", "issue:44623|issue:45230", "issue:41669|issue:43673", "issue:44393|issue:45071", "issue:44038|issue:45125", "issue:44556|issue:45092", "issue:42371|issue:44492", "issue:42890|issue:44908" ], "prepared_review_unit_hash": "586ef98b37d921027c3424ec6e370008942dfc222c831aae27488f3e18b36008", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 9, "serialized_chars": 12310, "estimated_input_tokens": 3078, "estimated_eval_tokens": 6412 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster looks heterogeneous: the issues share broad Transformers/v5/modeling vocabulary, but they describe different bugs, subsystems, and likely fixes. All soft pairs are false positives rather than true duplicates.", "confidence": 0.79, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "Issue 45092 is the clearest standalone bug report here: it names a specific incompatibility (old InternVL2 remote-code checkpoints vs v5 meta initialization) and provides a concrete failure mode, even though the cluster is too mixed to justify a true canonical duplicate set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44261", "right": "issue:44360", "accept": false, "reason": "Different underlying problems: one is an MLA q_a_layernorm precision/config epsilon issue, the other is a DSA indexer missing ReLU." }, { "left": "issue:43576", "right": "issue:44556", "accept": false, "reason": "Both mention v5 regressions, but one is about the `transformers env` command and the other about loading old checkpoints after upgrade; different code paths and fixes." }, { "left": "issue:44623", "right": "issue:45230", "accept": false, "reason": "`processor.save_pretrained(...) missing files` is a specific serialization bug, while `Bug report` is too generic to establish the same underlying issue." }, { "left": "issue:41669", "right": "issue:43673", "accept": false, "reason": "Import-star model startup slowdown and missing GenerationMixin cache during chunked prefill are unrelated failures." }, { "left": "issue:44393", "right": "issue:45071", "accept": false, "reason": "Qwen3-VL 2D bbox hallucination/error is a model-output bug, while PretrainedConfig type checking is a config validation regression." }, { "left": "issue:44038", "right": "issue:45125", "accept": false, "reason": "Both involve Qwen3/MoE, but one is a generic v5/Qwen3-VL-Moe bug and the other is a missing `_tp_plan` for tensor parallelism; not the same concrete fix." }, { "left": "issue:44556", "right": "issue:45092", "accept": false, "reason": "Both are v5 compatibility complaints, but one is general checkpoint reload failure after upgrading and the other is a remote-code/meta-initialization incompatibility for InternVL2; too different to merge as one bug." }, { "left": "issue:42371", "right": "issue:44492", "accept": false, "reason": "TF32 API settings guidance and a typo in cache strategies are unrelated." }, { "left": "issue:42890", "right": "issue:44908", "accept": false, "reason": "Missing `set_seed()` in integration tests versus `inverse_sqrt` ignoring `lr_scheduler_kwargs` are distinct issues affecting different parts of the codebase." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 128, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:41669", "issue:43352", "issue:43408", "issue:43576", "issue:43976", "issue:44246", "issue:44273", "issue:44297", "issue:44315", "issue:44351", "issue:44393", "issue:44492", "issue:44704", "issue:44829", "issue:44908", "issue:45071", "issue:45230", "issue:45341" ], "soft_pairs": [ "issue:44246|issue:44273", "issue:44704|issue:44908", "issue:43408|issue:44315", "issue:43576|issue:44908", "issue:43976|issue:44393", "issue:43352|issue:44393", "issue:44908|issue:45071", "issue:41669|issue:44351", "issue:44315|issue:44829", "issue:44297|issue:45230", "issue:41669|issue:45341", "issue:41669|issue:44492" ], "prepared_review_unit_hash": "3844b4216f0fbada57e2d653ce2b1bc08aa5bc8f89ffdb326ceb2b92aabe37d3", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 12, "serialized_chars": 13743, "estimated_input_tokens": 3436, "estimated_eval_tokens": 7128 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are mostly unrelated issue reports across different subsystems: import performance, lazy loading, model-type warnings, scheduler kwargs, tokenizer save behavior, and model-specific incompatibilities. None of the soft pairs look like the same underlying bug.", "confidence": 0.91, "canonical_issue_reason": "issue:41669 is the clearest, most concrete anchor because it describes a specific import-time performance regression with measurable impact; the others are broader symptom reports or different code paths.", "canonical_pr_reason": null, "best_issue_reason": "issue:41669 is the best representative issue in this set, but only as an anchor for the import/performance subtheme\u2014not as a duplicate of the other issues.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44246", "right": "issue:44273", "accept": false, "reason": "Both concern import/lazy-loading behavior, but one is about intermittent import slowness and the other about lazy loading being incorrect; different bugs and likely different fixes." }, { "left": "issue:44704", "right": "issue:44908", "accept": false, "reason": "Both involve kwargs propagation, but they affect different APIs and different code paths (AutoProcessor vs inverse_sqrt scheduler)." }, { "left": "issue:43408", "right": "issue:44315", "accept": false, "reason": "A model-type mismatch warning and a Liger Kernel/model_init application bug are unrelated issues in different flows." }, { "left": "issue:43576", "right": "issue:44908", "accept": false, "reason": "The env command regression is unrelated to scheduler kwargs handling; no shared underlying defect." }, { "left": "issue:43976", "right": "issue:44393", "accept": false, "reason": "Python version compatibility and Qwen3-VL bounding-box output errors are unrelated." }, { "left": "issue:43352", "right": "issue:44393", "accept": false, "reason": "Flash Attention support for Nemotron and Qwen3-VL 2D box hallucinations are different model-specific problems." }, { "left": "issue:44908", "right": "issue:45071", "accept": false, "reason": "Scheduler kwargs omission and PretrainedConfig type checking are separate regressions in different components." }, { "left": "issue:41669", "right": "issue:44351", "accept": false, "reason": "Both mention imports, but one is import-time slowdown from import * usage and the other is a missing exported symbol; different root causes." }, { "left": "issue:44315", "right": "issue:44829", "accept": false, "reason": "Both touch model initialization/attention features, but the reported failures are distinct: Liger Kernel not applied vs degenerate training with flash_attention_3." }, { "left": "issue:44297", "right": "issue:45230", "accept": false, "reason": "The tokenizer_class mismatch bug is specific and concrete; the other issue is too vague to establish the same underlying problem." }, { "left": "issue:41669", "right": "issue:45341", "accept": false, "reason": "An import performance regression and a small testing_utils bug do not share a concrete code-path defect." }, { "left": "issue:41669", "right": "issue:44492", "accept": false, "reason": "Import slowdown from wildcard imports is unrelated to a typo in cache strategies." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 129, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:31795", "issue:33453", "issue:41669", "issue:43576", "issue:43704", "issue:43723", "issue:43827", "issue:43937", "issue:44077", "issue:44273", "issue:44295", "issue:44393", "issue:44492", "issue:44655", "issue:44861", "issue:44908", "issue:44945", "issue:45230" ], "soft_pairs": [ "issue:44295|issue:45230", "issue:44492|issue:44945", "issue:43827|issue:44908", "issue:44077|issue:44861", "issue:31795|issue:33453", "issue:41669|issue:43704", "issue:43723|issue:44393", "issue:44655|issue:44861", "issue:43576|issue:44273", "issue:43937|issue:44393" ], "prepared_review_unit_hash": "422c5891769b9117b53cd7462b977396edd092c8326ec056dda1f8807d6490bc", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 10, "serialized_chars": 13061, "estimated_input_tokens": 3266, "estimated_eval_tokens": 6788 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is heterogeneous: it mixes unrelated issues about docs, tokenizer loading, env tooling, lazy loading, VRAM leaks, cache typos, schedulers, and model-specific bugs. The soft pairs look like title-level similarity only, not shared underlying defects or changes.", "confidence": 0.91, "canonical_issue_reason": "No single canonical issue fits the cluster: the items do not share one concrete bug or code path.", "canonical_pr_reason": null, "best_issue_reason": "Issue 43723 is the most concrete and specific report, but it only represents a small tokenizer-loading subset rather than the cluster as a whole.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44295", "right": "issue:45230", "accept": false, "reason": "Specific buffer/position_ids bug vs a generic bug report; no shared code path is evident." }, { "left": "issue:44492", "right": "issue:44945", "accept": false, "reason": "A typo in cache strategy docs/code is unrelated to pipeline-parallelism output errors." }, { "left": "issue:43827", "right": "issue:44908", "accept": false, "reason": "Deprecated pipeline docs vs scheduler kwarg handling are different subsystems and bugs." }, { "left": "issue:44077", "right": "issue:44861", "accept": false, "reason": "PatchTSMixer post_init validation is unrelated to tied-weight key handling." }, { "left": "issue:31795", "right": "issue:33453", "accept": false, "reason": "Model.forward docs confusion is not the same as a tokenizer-loading regression." }, { "left": "issue:41669", "right": "issue:43704", "accept": false, "reason": "Import-star performance issue and a VRAM leak in Qwen3ForCausalLM are distinct problems." }, { "left": "issue:43723", "right": "issue:44393", "accept": false, "reason": "Tokenizer loading in v5 is unrelated to Qwen3-VL bounding-box generation errors." }, { "left": "issue:44655", "right": "issue:44861", "accept": false, "reason": "Saving Pipeline objects and tied-weight key crashes concern different save/serialization paths." }, { "left": "issue:43576", "right": "issue:44273", "accept": false, "reason": "Broken 'transformers env' command is not the same as lazy loading behavior." }, { "left": "issue:43937", "right": "issue:44393", "accept": false, "reason": "Invalid GenerationConfig handling and Qwen3-VL bbox output issues are different model-specific bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 130, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:31795", "issue:34689", "issue:37428", "issue:42371", "issue:43502", "issue:43519", "issue:43704", "issue:44273", "issue:44315", "issue:44393", "issue:44485", "issue:44623", "issue:44908", "issue:45071", "issue:45125", "issue:45230", "issue:45310" ], "soft_pairs": [ "issue:45125|issue:45310", "issue:44273|issue:44908", "issue:45230|issue:45310", "issue:43519|issue:44485", "issue:43519|issue:44908", "issue:31795|issue:34689", "issue:44315|issue:44393", "issue:31795|issue:37428", "issue:44273|issue:45071", "issue:43704|issue:44485", "issue:42371|issue:43704", "issue:43502|issue:43519", "issue:43519|issue:44623", "issue:43519|issue:43704" ], "prepared_review_unit_hash": "f3eb05e668511c252560ff067b6c12b8d6ccdb2137b377faec12e0a48c9edfc0", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 14, "serialized_chars": 13757, "estimated_input_tokens": 3440, "estimated_eval_tokens": 7136 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The set is mostly unrelated issues. The only plausible duplicate pair is the Qwen3.5 MoE regression around from_pretrained/tensor-parallel handling; everything else looks distinct.", "confidence": 0.83, "canonical_issue_reason": "issue:45310 is the broadest and most user-facing report in the only plausible duplicate group, while issue:45125 is the narrower technical symptom.", "canonical_pr_reason": null, "best_issue_reason": "issue:45310 is the best representative issue for triage because it captures the concrete loading failure and has a direct inbound reference.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45125", "right": "issue:45310", "accept": true, "reason": "Same underlying Qwen3.5 MoE loading/tensor-parallel regression; one report is the narrower missing _tp_plan symptom, the other is the broader from_pretrained failure." }, { "left": "issue:44273", "right": "issue:44908", "accept": false, "reason": "Lazy loading behavior and inverse_sqrt scheduler kwargs are unrelated subsystems and bugs." }, { "left": "issue:45230", "right": "issue:45310", "accept": false, "reason": "The generic title gives no evidence it is the same loading bug; too little concrete overlap to merge." }, { "left": "issue:43519", "right": "issue:44485", "accept": false, "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different model/component issues." }, { "left": "issue:43519", "right": "issue:44908", "accept": false, "reason": "Processor timestamp math and scheduler kwargs handling are unrelated code paths." }, { "left": "issue:31795", "right": "issue:34689", "accept": false, "reason": "Documentation confusion in model.forward is not the same as a model loading breakage." }, { "left": "issue:44315", "right": "issue:44393", "accept": false, "reason": "Liger Kernel application during model_init and Qwen3-VL bbox output errors are different problems." }, { "left": "issue:31795", "right": "issue:37428", "accept": false, "reason": "Forward-doc wording and a flash-attention import error are unrelated." }, { "left": "issue:44273", "right": "issue:45071", "accept": false, "reason": "Lazy loading and PretrainedConfig type-checking regression do not describe the same bug." }, { "left": "issue:43704", "right": "issue:44485", "accept": false, "reason": "VRAM leak in multithreaded dataloader use is unrelated to RoPE implementation discussion." }, { "left": "issue:42371", "right": "issue:43704", "accept": false, "reason": "TF32 API guidance and VRAM leakage are different issues with no shared failure mode." }, { "left": "issue:43502", "right": "issue:43519", "accept": false, "reason": "local_files_only network requests and Qwen3VL timestamp calculation are separate bugs." }, { "left": "issue:43519", "right": "issue:44623", "accept": false, "reason": "Processor timestamp calculation and processor.save_pretrained missing files are different processor issues." }, { "left": "issue:43519", "right": "issue:43704", "accept": false, "reason": "Timestamp calculation in a processor is unrelated to Qwen3ForCausalLM VRAM leakage." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 131, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:31515", "issue:38617", "issue:42890", "issue:42913", "issue:43066", "issue:43525", "issue:43572", "issue:43618", "issue:43704", "issue:44230", "issue:44492", "issue:44556", "issue:44623", "issue:44861", "issue:44908", "issue:44998", "issue:45290", "issue:45341" ], "soft_pairs": [ "issue:42913|issue:43066", "issue:42890|issue:43704", "issue:44908|issue:44998", "issue:44861|issue:45290", "issue:44998|issue:45341", "issue:44492|issue:44998", "issue:43618|issue:44998", "issue:44556|issue:44998", "issue:43525|issue:44861", "issue:43572|issue:44861", "issue:31515|issue:38617", "issue:44230|issue:44623" ], "prepared_review_unit_hash": "275e0ab1ea62961d4c4c7f55eb3c06d9c57803720c4253fad56cf4f85bf85c62", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 18, "item_count": 18, "soft_pair_count": 12, "serialized_chars": 13769, "estimated_input_tokens": 3443, "estimated_eval_tokens": 7142 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly unrelated issue reports that only share broad Transformers vocabulary; none of the suggested soft edges look like the same underlying bug or change.", "confidence": 0.95, "canonical_issue_reason": "No clear duplicate center emerges: the items span unrelated areas (tokenizer behavior, config attributes, cache strategy, test flakiness, checkpoint loading, etc.), so there is no single canonical issue for the cluster.", "canonical_pr_reason": null, "best_issue_reason": "If one issue must be used as the standalone representative, issue 44556 is the most concrete and broadly actionable regression report (checkpoint reload failure after v4.57 -> v5.2/v5.3), but it is not a duplicate anchor for the rest of the set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42913", "right": "issue:43066", "accept": false, "reason": "Both mention tokenizer v5, but one reports generic behavior differences while the other is about the wrong decoder type; different failure modes and code paths." }, { "left": "issue:42890", "right": "issue:43704", "accept": false, "reason": "Test flakiness from missing set_seed is unrelated to a VRAM leak in Qwen3ForCausalLM with multiple dataloader threads." }, { "left": "issue:44908", "right": "issue:44998", "accept": false, "reason": "A scheduler kwargs bug and an unrelated placeholder title ('Unemployment') are not the same issue." }, { "left": "issue:44861", "right": "issue:45290", "accept": false, "reason": "_get_tied_weight_keys AttributeError and apply_chat_template crashing on tool-call assistant messages are separate subsystems and bugs." }, { "left": "issue:44998", "right": "issue:45341", "accept": false, "reason": "The placeholder/irrelevant 'Unemployment' issue is unrelated to a testing_utils.py bug." }, { "left": "issue:44492", "right": "issue:44998", "accept": false, "reason": "A cache-strategy typo is unrelated to the unrelated 'Unemployment' issue." }, { "left": "issue:43618", "right": "issue:44998", "accept": false, "reason": "CLIPOutput attentions regression and the unrelated 'Unemployment' issue are not duplicates." }, { "left": "issue:44556", "right": "issue:44998", "accept": false, "reason": "Checkpoint reload breakage after upgrading to v5.x is unrelated to the unrelated 'Unemployment' issue." }, { "left": "issue:43525", "right": "issue:44861", "accept": false, "reason": "Missing pad_token_id in Llama4Config and AttributeError in tied-weight-key collection affect different code paths and model/config classes." }, { "left": "issue:43572", "right": "issue:44861", "accept": false, "reason": "Missing pad_token_idx in StableLmConfig is a config-compatibility issue, not the tied-weight-key crash described in 44861." }, { "left": "issue:31515", "right": "issue:38617", "accept": false, "reason": "Slow from_pretrained checkpoint loading and an ImportError for layer_type_validation are unrelated problems." }, { "left": "issue:44230", "right": "issue:44623", "accept": false, "reason": "fp8 inference support for Qwen3-VL is a feature/support request, while processor.save_pretrained missing files is a separate save-path bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 2, "component_count": 12, "review_unit_index": 132, "review_unit_count": 132, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-15354-353", "nodes": [ "issue:31515", "issue:31795", "issue:33453", "issue:36296", "issue:36683", "issue:43298", "issue:44230", "issue:45310" ], "soft_pairs": [ "issue:31515|issue:36296", "issue:31515|issue:31795", "issue:31515|issue:33453", "issue:44230|issue:45310", "issue:31515|issue:36683", "issue:43298|issue:44230" ], "prepared_review_unit_hash": "a484ce0df14bb18e13bb94f34434f95a7d7a2235fd87067e59a2b203a1de956b", "component_budget": { "node_count": 353, "item_count": 353, "soft_pair_count": 1317, "serialized_chars": 679748, "estimated_input_tokens": 169937, "estimated_eval_tokens": 340130 }, "budget": { "node_count": 8, "item_count": 8, "soft_pair_count": 6, "serialized_chars": 6280, "estimated_input_tokens": 1570, "estimated_eval_tokens": 3396 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These items are not duplicates of one another; they span separate problems such as slow checkpoint loading, tokenizer loading regressions, documentation confusion, tensor-parallel training, missing config attributes, continuous batching audio support, and FP8 inference support.", "confidence": 0.92, "canonical_issue_reason": "No single canonical issue is justified here: the soft-linked issues are thematically related to model loading/inference, but the actual bug reports are different enough that they should remain separate.", "canonical_pr_reason": null, "best_issue_reason": "If one issue had to serve as the broadest representative, issue 45310 is the most concrete/reproducible code bug, but it still does not subsume the others; overall this cluster does not have a strong best issue.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:31515", "right": "issue:36296", "accept": false, "reason": "Slow from_pretrained checkpoint loading and a tensor-parallel training bug are different failures in different code paths." }, { "left": "issue:31515", "right": "issue:31795", "accept": false, "reason": "One is a performance/load-time complaint, the other is documentation confusion about forward inputs; not the same underlying bug." }, { "left": "issue:31515", "right": "issue:33453", "accept": false, "reason": "Both mention loading, but checkpoint loading slowness and tokenizer-loading regression are distinct issues." }, { "left": "issue:44230", "right": "issue:45310", "accept": false, "reason": "FP8 MoE inference support and a Qwen3.5 MoE from_pretrained error are different feature/bug reports, not one concrete fix." }, { "left": "issue:31515", "right": "issue:36683", "accept": false, "reason": "A slow checkpoint load issue and a Gemma3Config missing vocab_size AttributeError are separate problems." }, { "left": "issue:43298", "right": "issue:44230", "accept": false, "reason": "Continuous batching support for audio models and FP8 support for Qwen3-VL/Qwen3.5 MoE are unrelated feature requests/bugs." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 3, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-27088-4", "nodes": [ "issue:27088", "issue:43834", "issue:44365", "issue:44370" ], "soft_pairs": [ "issue:27088|issue:44370", "issue:27088|issue:44365", "issue:27088|issue:43834", "issue:43834|issue:44370", "issue:43834|issue:44365", "issue:44365|issue:44370" ], "prepared_review_unit_hash": "a3baa66527ce261ac9aec7cf414cff34563814de7088916e46a9249e6993a3b2", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, "serialized_chars": 4454, "estimated_input_tokens": 1114, "estimated_eval_tokens": 2484 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, "serialized_chars": 4454, "estimated_input_tokens": 1114, "estimated_eval_tokens": 2484 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "This cluster is dominated by near-identical i18n template issues, but the only concrete request is for Turkish. The placeholder-style issues look like separate language requests rather than duplicates of the Turkish one, so I would not merge any pair.", "confidence": 0.88, "canonical_issue_reason": "issue:27088 is the only specific, non-placeholder request ([i18n-TR] Translating docs to Turkish), so it is the clearest representative of the actual translation task in this cluster.", "canonical_pr_reason": null, "best_issue_reason": "issue:27088 is the most actionable and specific issue; the others are generic template variants with no explicit language named in the visible title.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:27088", "right": "issue:44370", "accept": false, "reason": "High textual similarity, but the right side is a generic placeholder and not clearly the same language/task as Turkish." }, { "left": "issue:27088", "right": "issue:44365", "accept": false, "reason": "High textual similarity, but this looks like a separate placeholder i18n request rather than the same Turkish translation issue." }, { "left": "issue:27088", "right": "issue:43834", "accept": false, "reason": "The visible titles/body patterns are templated i18n requests; no evidence this is the same underlying Turkish request." }, { "left": "issue:43834", "right": "issue:44370", "accept": false, "reason": "These appear to be separate placeholder translation issues, likely for different languages, not one concrete bug/change." }, { "left": "issue:43834", "right": "issue:44365", "accept": false, "reason": "Similarity is template-level only; there is no concrete evidence they target the same translation task." }, { "left": "issue:44365", "right": "issue:44370", "accept": false, "reason": "Both are generic i18n placeholders, but the data does not show they are the same underlying request." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 4, "component_count": 12, "review_unit_index": 1, "review_unit_count": 2, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-36817-13", "nodes": [ "issue:36817", "issue:37015", "issue:41211", "issue:42503", "issue:42738", "issue:43671", "issue:43837", "issue:43909", "issue:44121", "issue:44205", "issue:45027", "issue:45177", "issue:45322" ], "soft_pairs": [ "issue:43909|issue:44205", "issue:43671|issue:43837", "issue:44121|issue:45322", "issue:43837|issue:43909", "issue:43837|issue:45322", "issue:37015|issue:43909", "issue:43671|issue:45177", "issue:37015|issue:43837", "issue:43671|issue:45027", "issue:37015|issue:44121", "issue:43837|issue:44121", "issue:43837|issue:45177", "issue:43671|issue:43909", "issue:43671|issue:44121", "issue:41211|issue:43671", "issue:36817|issue:41211", "issue:37015|issue:42503", "issue:36817|issue:43671", "issue:37015|issue:45027", "issue:42738|issue:45027", "issue:43837|issue:45027", "issue:42503|issue:43837", "issue:42503|issue:42738", "issue:42503|issue:43671" ], "prepared_review_unit_hash": "c10d2074d217f376ef8a927e824bb6ccc955b3583a34a842861357b258ffbf56", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 25, "serialized_chars": 16011, "estimated_input_tokens": 4003, "estimated_eval_tokens": 8262 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 24, "serialized_chars": 14125, "estimated_input_tokens": 3532, "estimated_eval_tokens": 7320 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster contains many unrelated model-request issues that only share a broad 'add model support' theme. The apparent similarities are mostly by model family or modality (e.g., TTS, ASR, OCR, BERT-like), not the same concrete change, so no soft edges should be merged.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43909", "right": "issue:44205", "accept": false, "reason": "Different models and scopes: LFM2.5 Audio 1.5B vs SAM3-LiteText. Both are model requests, but they target different architectures and features, not the same underlying issue." }, { "left": "issue:43671", "right": "issue:43837", "accept": false, "reason": "Both are Qwen3 requests, but one is TTS and the other is ASR. Different code paths and model capabilities, so not duplicates." }, { "left": "issue:44121", "right": "issue:45322", "accept": false, "reason": "Weight-sparse transformer support request vs EUPE perception encoder request. They are unrelated model additions with different implementation requirements." }, { "left": "issue:43837", "right": "issue:43909", "accept": false, "reason": "ASR support vs a generic audio model request. Similar domain, but distinct model and task, so not the same change." }, { "left": "issue:43837", "right": "issue:45322", "accept": false, "reason": "Qwen3-ASR support and EUPE are different model families and modalities; no shared concrete bug or code path." }, { "left": "issue:37015", "right": "issue:43909", "accept": false, "reason": "NeoBERT and LFM2.5 Audio are unrelated models. Broadly both are add-model requests, but they do not describe the same feature." }, { "left": "issue:43671", "right": "issue:45177", "accept": false, "reason": "Qwen3-TTS support vs DeepSeek-OCR2. Different modality and integration path, so not duplicates." }, { "left": "issue:37015", "right": "issue:43837", "accept": false, "reason": "NeoBERT vs Qwen3-ASR are unrelated model additions. Same general 'support a model' theme only." }, { "left": "issue:43671", "right": "issue:45027", "accept": false, "reason": "Both are TTS-related requests, but for different models (Qwen3-TTS vs Voxtral-4B-TTS-2603) and likely different integration details." }, { "left": "issue:37015", "right": "issue:44121", "accept": false, "reason": "NeoBERT request and weight-sparse transformer request target different model types and functionality." }, { "left": "issue:43837", "right": "issue:44121", "accept": false, "reason": "ASR support for Qwen3 is not the same as adding an OpenAI weight-sparse transformer model." }, { "left": "issue:43837", "right": "issue:45177", "accept": false, "reason": "Qwen3-ASR and DeepSeek-OCR2 are different tasks and model families; no duplicate evidence beyond generic model-support similarity." }, { "left": "issue:43671", "right": "issue:43909", "accept": false, "reason": "TTS support request vs audio model addition. Related domain, but not the same model or implementation target." }, { "left": "issue:43671", "right": "issue:44121", "accept": false, "reason": "Qwen3-TTS and the OpenAI weight-sparse transformer request are different model additions with different code paths." }, { "left": "issue:41211", "right": "issue:43671", "accept": false, "reason": "DEIMv2 and Qwen3-TTS are unrelated model requests; similarity is only at the generic 'add support' level." }, { "left": "issue:36817", "right": "issue:41211", "accept": false, "reason": "EuroBert config support vs DEIMv2 model support. Different models and different integration work." }, { "left": "issue:37015", "right": "issue:42503", "accept": false, "reason": "NeoBERT and ModernVBERT are both BERT-family requests, but they are distinct models with different integration requirements." }, { "left": "issue:36817", "right": "issue:43671", "accept": false, "reason": "EuroBert model config addition and Qwen3-TTS support are unrelated changes across different model families and modalities." }, { "left": "issue:37015", "right": "issue:45027", "accept": false, "reason": "NeoBERT vs Voxtral-4B-TTS-2603 are different model requests, one text encoder-style and one speech/TTS." }, { "left": "issue:42738", "right": "issue:45027", "accept": false, "reason": "BERT-like models with RoPE is a broad conceptual request, but Voxtral-4B-TTS-2603 is a specific TTS model. Not the same concrete issue." }, { "left": "issue:43837", "right": "issue:45027", "accept": false, "reason": "Qwen3-ASR and Voxtral TTS are different speech tasks and model families; they should not be merged." }, { "left": "issue:42503", "right": "issue:43837", "accept": false, "reason": "ModernVBERT models vs Qwen3-ASR support are distinct model additions with different target code paths." }, { "left": "issue:42503", "right": "issue:42738", "accept": false, "reason": "ModernVBERT models may relate to RoPE usage, but the issues are not the same request: one is a specific model family, the other a broader architectural pattern." }, { "left": "issue:42503", "right": "issue:43671", "accept": false, "reason": "ModernVBERT and Qwen3-TTS are unrelated model requests; no shared concrete change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 4, "component_count": 12, "review_unit_index": 2, "review_unit_count": 2, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-36817-13", "nodes": [ "issue:42503", "issue:45027" ], "soft_pairs": [ "issue:42503|issue:45027" ], "prepared_review_unit_hash": "7e9ea9315e6f01a459b6af18b13d615bf3453b3824dc010e7211948e0e5b694b", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 25, "serialized_chars": 16011, "estimated_input_tokens": 4003, "estimated_eval_tokens": 8262 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1434, "estimated_input_tokens": 359, "estimated_eval_tokens": 974 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are two separate model-support requests in the same broad area, but they target different models and likely different implementation work. They should not be merged as duplicates.", "confidence": 0.91, "canonical_issue_reason": "issue:45027 is the more specific and active request: it targets Voxtral-4B-TTS-2603, is open, and has discussion/inbound activity, making it the better representative of the current support request cluster.", "canonical_pr_reason": null, "best_issue_reason": "issue:45027 is the strongest issue to keep as the cluster representative because it is open, concrete, and likely to drive the actual implementation discussion for a specific model.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:42503", "right": "issue:45027", "accept": false, "reason": "Same broad topic of adding model support, but they request different models (ModernVBERT vs Voxtral-4B-TTS-2603) and do not appear to describe the same underlying bug or change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 5, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-37934-6", "nodes": [ "issue:37934", "issue:42936", "issue:43240", "issue:43668", "issue:43813", "issue:44031" ], "soft_pairs": [ "issue:37934|issue:43668", "issue:42936|issue:43240", "issue:42936|issue:44031", "issue:42936|issue:43813", "issue:42936|issue:43668", "issue:43240|issue:43668", "issue:43668|issue:44031", "issue:43668|issue:43813" ], "prepared_review_unit_hash": "5fce2c6946c6eb9341ec0f9ee15534af159ab3a0a5667b4bf7ce33ac636e065c", "component_budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, "serialized_chars": 6293, "estimated_input_tokens": 1574, "estimated_eval_tokens": 3404 }, "budget": { "node_count": 6, "item_count": 6, "soft_pair_count": 8, "serialized_chars": 6293, "estimated_input_tokens": 1574, "estimated_eval_tokens": 3404 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These issues are not duplicates of one another; they span unrelated areas (model behavior, loss plumbing, config typing, a typo, and a tokenizer regression).", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:44031 is the strongest standalone issue here because it describes a concrete, user-facing regression affecting all tokenizers, with clear impact and discussion activity.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:37934", "right": "issue:43668", "accept": false, "reason": "Different problems: one questions Llama4TextL2Norm semantics, the other reports an incorrect type hint in ModernBERTConfig." }, { "left": "issue:42936", "right": "issue:43240", "accept": false, "reason": "Both are model-related bugs, but they affect different code paths: Mask2Former ignore_value handling vs loss-function kwargs propagation." }, { "left": "issue:42936", "right": "issue:44031", "accept": false, "reason": "Unrelated subsystems: a Mask2Former model option bug versus a tokenizer regex warning regression." }, { "left": "issue:42936", "right": "issue:43813", "accept": false, "reason": "Different scope and failure mode: missing use of ignore_value vs a typo in a quantization attribute name." }, { "left": "issue:42936", "right": "issue:43668", "accept": false, "reason": "No shared underlying bug: Mask2Former ignore_value behavior is unrelated to a ModernBERTConfig annotation error." }, { "left": "issue:43240", "right": "issue:43668", "accept": false, "reason": "Loss kwargs forwarding and a config type-hint mistake are distinct issues with no common code path." }, { "left": "issue:43668", "right": "issue:44031", "accept": false, "reason": "A ModernBERT config typing issue is unrelated to the tokenizer regex-warning regression." }, { "left": "issue:43668", "right": "issue:43813", "accept": false, "reason": "Different classes of change: type-hint correction versus a literal typo in a conversion attribute name." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 6, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-41084-2", "nodes": [ "issue:41084", "issue:43037" ], "soft_pairs": [ "issue:41084|issue:43037" ], "prepared_review_unit_hash": "eeaff9a73cee7a8441ade7a6e29ebebe7f15fd4d5cdb2b7eaa793f2e8c5171c3", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1480, "estimated_input_tokens": 370, "estimated_eval_tokens": 996 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1480, "estimated_input_tokens": 370, "estimated_eval_tokens": 996 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These two issues are not duplicates: one is a vague request about block decoding, while the other is a specific feature/support request for DeepSeek v3.2. They don\u2019t describe the same underlying bug or change.", "confidence": 0.91, "canonical_issue_reason": "Issue #43037 is the more concrete and actionable issue: it names a specific model support request, whereas #41084 is too vague to serve as a strong canonical reference.", "canonical_pr_reason": null, "best_issue_reason": "#43037 is the best representative issue in this pair because it has a clearer scope and intent; #41084 is too underspecified.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:41084", "right": "issue:43037", "accept": false, "reason": "Different problem statements: 'Set Block Decoding' suggests an implementation detail, while 'DeepSeek v3.2 support' is a model-support request. No shared targets, files, or concrete evidence that they refer to the same change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 7, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43598-2", "nodes": [ "issue:43598", "issue:43600" ], "soft_pairs": [ "issue:43598|issue:43600" ], "prepared_review_unit_hash": "c08e1759b2e5beb6b9b88c0827c1219cf24bd97229e13597fe2c0570c896b941", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1583, "estimated_input_tokens": 396, "estimated_eval_tokens": 1048 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1583, "estimated_input_tokens": 396, "estimated_eval_tokens": 1048 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These two issues are related to Trainer internals and customization, but they describe different problems: one asks to revisit `remove_unused_column` behavior, while the other asks FLOPS estimation to account for custom trainers. They should not be merged as duplicates.", "confidence": 0.92, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "No single canonical issue stands out because the issues target different Trainer behaviors and different underlying fixes.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43598", "right": "issue:43600", "accept": false, "reason": "The titles and likely scopes differ: one concerns column pruning customizability, the other concerns FLOPS counting for custom trainers. They share a broad Trainer theme but not the same underlying bug or change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 8, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43684-2", "nodes": [ "issue:43684", "issue:44405" ], "soft_pairs": [ "issue:43684|issue:44405" ], "prepared_review_unit_hash": "db892e2a8fc708401ffbd729d964056f57964ad7680f684302f92e7fafb8767d", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1612, "estimated_input_tokens": 403, "estimated_eval_tokens": 1062 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1612, "estimated_input_tokens": 403, "estimated_eval_tokens": 1062 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These issues are both about missing AutoModel registration/support, but they target different model families and different auto classes, so they do not look like the same underlying bug.", "confidence": 0.93, "canonical_issue_reason": "Not a duplicate cluster: one asks for Qwen3-Omni registration in AutoModel/AutoModelForConditionalGeneration, while the other asks for AutoModelForSequenceClassification support for Qwen3.5/Qwen3_5Config.", "canonical_pr_reason": null, "best_issue_reason": "Neither issue is a clear canonical for the other because they address different model/config combinations and different auto-model entry points.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43684", "right": "issue:44405", "accept": false, "reason": "Different model families (Qwen3-Omni vs Qwen3.5) and different tasks/auto classes (conditional generation vs sequence classification); not the same fixable code-path problem." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 9, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43835-4", "nodes": [ "issue:43835", "issue:43845", "issue:43846", "issue:43859" ], "soft_pairs": [ "issue:43835|issue:43845", "issue:43835|issue:43846", "issue:43835|issue:43859", "issue:43845|issue:43846", "issue:43845|issue:43859", "issue:43846|issue:43859" ], "prepared_review_unit_hash": "6d16fe822b94b1410b1d0c920dc65676846cf21d66e0fb391a4da59b9babb5b7", "component_budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, "serialized_chars": 4267, "estimated_input_tokens": 1067, "estimated_eval_tokens": 2390 }, "budget": { "node_count": 4, "item_count": 4, "soft_pair_count": 6, "serialized_chars": 4267, "estimated_input_tokens": 1067, "estimated_eval_tokens": 2390 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Fett looks unrelated. The three \"huggingface\" issues are only weakly similar by title and timing; without body text or explicit links, they should not be treated as confirmed duplicates.", "confidence": 0.67, "canonical_issue_reason": "43845 is the earliest of the similarly titled \"huggingface\" issues, so it is the best provisional representative if this later proves to be a duplicate cluster.", "canonical_pr_reason": null, "best_issue_reason": "43845 is the best global issue candidate only in a provisional sense: it is the earliest issue with the repeated title, but the evidence is too thin to confirm a true duplicate set.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:43835", "right": "issue:43845", "accept": false, "reason": "Different title and no concrete evidence that they report the same underlying problem." }, { "left": "issue:43835", "right": "issue:43846", "accept": false, "reason": "Different title and no concrete evidence that they report the same underlying problem." }, { "left": "issue:43835", "right": "issue:43859", "accept": false, "reason": "Different title and no concrete evidence that they report the same underlying problem." }, { "left": "issue:43845", "right": "issue:43846", "accept": false, "reason": "Same title and close timing are not enough without body text, links, or target/code-path evidence." }, { "left": "issue:43845", "right": "issue:43859", "accept": false, "reason": "Same title and close timing are not enough without body text, links, or target/code-path evidence." }, { "left": "issue:43846", "right": "issue:43859", "accept": false, "reason": "Same title and close timing are not enough without body text, links, or target/code-path evidence." } ] }, "evaluator_result": { "accept": false, "feedback": "The grouping is a bit too strong for the evidence in the packet. We only see matching titles, close timestamps, and soft-similarity scores/body lengths; there is no body text or explicit link showing the three \"huggingface\" issues are the same underlying report. The soft-edge accepts therefore overstate certainty, especially the claims about \"same general report text pattern\" and \"same underlying issue report.\" The \"Fett\" issue being unrelated is reasonable, but the duplicate-set verdicts should be more conservative." }, "evaluator_used": true, "retried": true, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 10, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-44371-5", "nodes": [ "issue:44371", "issue:44418", "issue:44572", "issue:44573", "issue:44937" ], "soft_pairs": [ "issue:44371|issue:44572", "issue:44371|issue:44573", "issue:44371|issue:44418", "issue:44371|issue:44937", "issue:44418|issue:44572", "issue:44418|issue:44573", "issue:44572|issue:44573", "issue:44572|issue:44937", "issue:44573|issue:44937" ], "prepared_review_unit_hash": "8493ca49148c5b94fd01c93e8ce07d61a76fe067805e0db0fc93f657a7ec2113", "component_budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 9, "serialized_chars": 5794, "estimated_input_tokens": 1449, "estimated_eval_tokens": 3154 }, "budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 9, "serialized_chars": 5794, "estimated_input_tokens": 1449, "estimated_eval_tokens": 3154 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "All items look like unrelated low-signal spam or placeholder issues, not duplicates of a single underlying bug or request. No soft edges should be accepted.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": "issue:44371 is the best placeholder only because it is an early, generic spam-like report; however, none of the issues form a meaningful canonical root cause.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:44371", "right": "issue:44572", "accept": false, "reason": "Both are generic spam placeholders, but there is no concrete shared bug or request to unify." }, { "left": "issue:44371", "right": "issue:44573", "accept": false, "reason": "Same as above: similar low-content spam titles, not the same underlying issue." }, { "left": "issue:44371", "right": "issue:44418", "accept": false, "reason": "One is a spam placeholder and the other is a documentation enhancement request; these are unrelated." }, { "left": "issue:44371", "right": "issue:44937", "accept": false, "reason": "A spam placeholder and a named-entity style issue title are not the same bug/change." }, { "left": "issue:44418", "right": "issue:44572", "accept": false, "reason": "Documentation enhancement and spam placeholder do not describe the same underlying task." }, { "left": "issue:44418", "right": "issue:44573", "accept": false, "reason": "Documentation enhancement and spam placeholder are unrelated." }, { "left": "issue:44572", "right": "issue:44573", "accept": false, "reason": "Both are spam placeholders, but there is no evidence of a shared concrete issue beyond generic junk titles." }, { "left": "issue:44572", "right": "issue:44937", "accept": false, "reason": "Spam placeholder and unrelated named-entity title do not match the same problem." }, { "left": "issue:44573", "right": "issue:44937", "accept": false, "reason": "Spam placeholder and unrelated named-entity title do not match the same problem." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 11, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-45145-2", "nodes": [ "issue:45145", "issue:45151" ], "soft_pairs": [ "issue:45145|issue:45151" ], "prepared_review_unit_hash": "9220369c4793908cf596f90434c4631e9ad14d20a52ead1a0f4fab7a60708a40", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1646, "estimated_input_tokens": 412, "estimated_eval_tokens": 1080 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 1646, "estimated_input_tokens": 412, "estimated_eval_tokens": 1080 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "The pair looks like a likely duplicate issue cluster: both are closed, share an identical title, and were opened within hours of each other. #45145 is the best canonical/representative issue because it was created first and has the only inbound reference.", "confidence": 0.96, "canonical_issue_reason": "Issue #45145 is the better canonical issue because it was created first and has an inbound reference, making it the most likely original thread.", "canonical_pr_reason": null, "best_issue_reason": "#45145 is the best representative issue for this cluster: earliest timestamp, identical title to #45151, and slightly stronger evidence of being the original discussion.", "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "issue:45145", "right": "issue:45151", "accept": true, "reason": "Likely duplicate: the issues have identical titles, were opened close together, and have similar body lengths. The packet does not show body-content comparison, so this is a conservative duplicate acceptance based on metadata." } ] }, "evaluator_result": { "accept": false, "feedback": "The overall clustering judgment is plausible, but the soft-edge verdict is a bit too strong for the evidence in the packet. We only have identical titles, similar body lengths, and close creation times; we do not have body-content comparison, so saying the issues are 'evidently the same topic/body' overstates what is shown. Please keep the verdict more conservative (e.g., 'likely duplicate' based on identical title and timing)." }, "evaluator_used": true, "retried": true, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "issue", "component_index": 12, "component_count": 12, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-45295-2", "nodes": [ "issue:45295", "issue:45373" ], "soft_pairs": [], "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 0, "serialized_chars": 1286, "estimated_input_tokens": 322, "estimated_eval_tokens": 900 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 0, "serialized_chars": 1286, "estimated_input_tokens": 322, "estimated_eval_tokens": 900 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Both items are the same missing feature request for Gemma 4 sequence classification support; the newer issue is more specific and better documented.", "confidence": 0.97, "canonical_issue_reason": "Issue #45373 is the clearest canonical issue because it precisely names the missing class (`Gemma4ForSequenceClassification`) and states the gap in the `gemma4` module.", "canonical_pr_reason": null, "best_issue_reason": "Issue #45373 is the best representative: it is open, more detailed, and directly describes the exact API missing from Gemma 4.", "best_pr_reason": null, "soft_edge_verdicts": [] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 1, "component_count": 23, "review_unit_index": 1, "review_unit_count": 2, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-39895-14", "nodes": [ "pull_request:39895", "pull_request:42668", "pull_request:43251", "pull_request:43254", "pull_request:43424", "pull_request:43823", "pull_request:43995", "pull_request:44827", "pull_request:45078", "pull_request:45170", "pull_request:45213", "pull_request:45433", "pull_request:45435", "pull_request:45454" ], "soft_pairs": [ "pull_request:43251|pull_request:43254", "pull_request:44827|pull_request:45433", "pull_request:42668|pull_request:43823", "pull_request:42668|pull_request:45078", "pull_request:45078|pull_request:45454", "pull_request:45213|pull_request:45435", "pull_request:43823|pull_request:45454", "pull_request:44827|pull_request:45170", "pull_request:43424|pull_request:44827", "pull_request:43823|pull_request:45435", "pull_request:43823|pull_request:43995", "pull_request:43251|pull_request:43823", "pull_request:43995|pull_request:45078", "pull_request:45078|pull_request:45435", "pull_request:43995|pull_request:45435", "pull_request:43424|pull_request:45435", "pull_request:43251|pull_request:44827", "pull_request:43424|pull_request:43823", "pull_request:39895|pull_request:43424", "pull_request:43995|pull_request:45213", "pull_request:44827|pull_request:45213", "pull_request:43995|pull_request:44827", "pull_request:43823|pull_request:44827", "pull_request:43823|pull_request:45213" ], "prepared_review_unit_hash": "cfa19812d5587faea9e811c8e8eac0357ca101befdf21d252cfc4655a578d81f", "component_budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 26, "serialized_chars": 40440, "estimated_input_tokens": 10110, "estimated_eval_tokens": 20476 }, "budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 24, "serialized_chars": 37801, "estimated_input_tokens": 9451, "estimated_eval_tokens": 19158 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Mostly unrelated pull requests across different models and subsystems; only one soft edge looks like a true duplicate fix pair in `fixed_cross_entropy` for issue 43240.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": "PR 43251 is the best representative of the duplicate pair because it explicitly targets issue 43240 and contains the more complete `fixed_cross_entropy` kwargs pass-through fix in the shared code path.", "best_issue_reason": null, "best_pr_reason": "PR 43251 is the strongest canonical PR candidate in this cluster: explicit issue linkage, direct utility-function fix, and the clearest match to the underlying bug. The rest of the cluster is mostly unrelated model additions or subsystem changes.", "soft_edge_verdicts": [ { "left": "pull_request:43251", "right": "pull_request:43254", "accept": true, "reason": "Same bug and same code path: both patch `src/transformers/loss/loss_utils.py` to forward supported `cross_entropy` kwargs for issue 43240, so they are functionally the same fix." }, { "left": "pull_request:44827", "right": "pull_request:45433", "accept": false, "reason": "Different changes: Mistral4 test/integration fixes versus adding SonicMoe kernels/MoE integration; same broad MoE area but not the same underlying bug." }, { "left": "pull_request:42668", "right": "pull_request:43823", "accept": false, "reason": "Unrelated: processor/auto-loading robustness changes versus adding a new MobileLLM model." }, { "left": "pull_request:42668", "right": "pull_request:45078", "accept": false, "reason": "Different problems in `auto` loading logic, but one is processor handling and the other is tokenizer conversion/error behavior; not the same fix." }, { "left": "pull_request:45078", "right": "pull_request:45454", "accept": false, "reason": "Tokenizer conversion/error handling versus Gemma4 text-only training behavior; no shared underlying bug." }, { "left": "pull_request:45213", "right": "pull_request:45435", "accept": false, "reason": "Model-creation skill/docs scaffolding is unrelated to the Whisper tokenizer off-by-one fix." }, { "left": "pull_request:43823", "right": "pull_request:45454", "accept": false, "reason": "New MobileLLM model addition versus Gemma4 training-mask logic; different models and different code paths." }, { "left": "pull_request:44827", "right": "pull_request:45170", "accept": false, "reason": "Mistral4 test fixes and SonicMoe renaming/kernels are unrelated changes." }, { "left": "pull_request:43424", "right": "pull_request:44827", "accept": false, "reason": "Executorch export dynamic-shape test is unrelated to Mistral4 test/runtime fixes." }, { "left": "pull_request:43823", "right": "pull_request:45435", "accept": false, "reason": "New model implementation versus Whisper tokenizer special-token indexing fix; unrelated." }, { "left": "pull_request:43823", "right": "pull_request:43995", "accept": false, "reason": "Different model additions/refactors: MobileLLM versus Falcon output-collection refactor." }, { "left": "pull_request:43251", "right": "pull_request:43823", "accept": false, "reason": "Loss utility kwargs fix versus a new model implementation; no overlap in underlying bug." }, { "left": "pull_request:43995", "right": "pull_request:45078", "accept": false, "reason": "Falcon output-capture refactor and tokenizer conversion behavior are different code paths and issues." }, { "left": "pull_request:45078", "right": "pull_request:45435", "accept": false, "reason": "Tokenizer auto-loading error handling versus Whisper tokenization bug; different behavior and files." }, { "left": "pull_request:43995", "right": "pull_request:45435", "accept": false, "reason": "Falcon model refactor is unrelated to Whisper tokenizer boundary checking." }, { "left": "pull_request:43424", "right": "pull_request:45435", "accept": false, "reason": "Executorch export test and Whisper tokenizer fix are unrelated." }, { "left": "pull_request:43251", "right": "pull_request:44827", "accept": false, "reason": "Cross-entropy kwargs fix is unrelated to Mistral4/MoE test changes." }, { "left": "pull_request:43424", "right": "pull_request:43823", "accept": false, "reason": "Executorch exportability testing is unrelated to adding MobileLLM." }, { "left": "pull_request:39895", "right": "pull_request:43424", "accept": false, "reason": "VideoPrism model addition and Executorch dynamic-shape export testing are separate, unrelated efforts." }, { "left": "pull_request:43995", "right": "pull_request:45213", "accept": false, "reason": "Falcon refactor and model-creation skill PR are unrelated and not the same code change." }, { "left": "pull_request:44827", "right": "pull_request:45213", "accept": false, "reason": "Mistral4 tests and model-creation skill scaffolding are unrelated." }, { "left": "pull_request:43995", "right": "pull_request:44827", "accept": false, "reason": "Falcon standardized output collection refactor is unrelated to Mistral4 test fixes." }, { "left": "pull_request:43823", "right": "pull_request:44827", "accept": false, "reason": "MobileLLM addition and Mistral4 test fixes are different model areas and different changes." }, { "left": "pull_request:43823", "right": "pull_request:45213", "accept": false, "reason": "MobileLLM model addition and model-creation skill PR are not the same underlying bug/change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 1, "component_count": 23, "review_unit_index": 2, "review_unit_count": 2, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-39895-14", "nodes": [ "pull_request:43823", "pull_request:45078", "pull_request:45213" ], "soft_pairs": [ "pull_request:45078|pull_request:45213", "pull_request:43823|pull_request:45078" ], "prepared_review_unit_hash": "298804c3f8b113393543327142eafa6e72c43cfc87f51bd8097a26c2b26f09e0", "component_budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 26, "serialized_chars": 40440, "estimated_input_tokens": 10110, "estimated_eval_tokens": 20476 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 7517, "estimated_input_tokens": 1880, "estimated_eval_tokens": 4016 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "The three PRs are unrelated: one adds MobileLLM, one changes tokenizer auto-selection logic, and one is a broad model-creation skill/meta update. None look like duplicates or the same underlying change.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": "PR 43823 is the most concrete standalone code contribution in the set: a new model implementation with tests. The other PRs are a narrow tokenizer behavior fix and a broad scaffolding/meta workflow change, so they are not better representatives of a shared change.", "best_issue_reason": null, "best_pr_reason": "PR 43823 is the best representative artifact here because it is a focused, self-contained feature addition with model code and tests. PR 45078 is a separate tokenizer fix, and PR 45213 is mostly tooling/documentation scaffolding.", "soft_edge_verdicts": [ { "left": "pull_request:45078", "right": "pull_request:45213", "accept": false, "reason": "Tokenizer auto-selection bug fix and model-creation skill/meta PR are different changes in different code paths; they do not fix the same concrete problem." }, { "left": "pull_request:43823", "right": "pull_request:45078", "accept": false, "reason": "New MobileLLM model implementation vs tokenizer auto behavior change are unrelated features/fixes with no shared underlying bug." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 2, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-40546-2", "nodes": [ "pull_request:40546", "pull_request:42781" ], "soft_pairs": [ "pull_request:40546|pull_request:42781" ], "prepared_review_unit_hash": "b5e7369a3c793c671281900ca8310e502019eb287687dd41004b7fc43300f4c8", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 7225, "estimated_input_tokens": 1807, "estimated_eval_tokens": 3870 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 7225, "estimated_input_tokens": 1807, "estimated_eval_tokens": 3870 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are related VibeVoice PRs, but they are not duplicates: #40546 adds the base VibeVoice model integration, while #42781 adds a separate realtime variant with additional tokenizer/feature-extractor and generation changes. They share a model family and several files, but they target different functionality and code paths.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": "#40546 is the better canonical PR because it is the original, broader VibeVoice integration and provides the baseline implementation that the later realtime work builds on.", "best_issue_reason": null, "best_pr_reason": "#40546 is the best PR to represent the cluster since it is the main implementation PR; #42781 is a follow-on extension rather than the same change.", "soft_edge_verdicts": [ { "left": "pull_request:40546", "right": "pull_request:42781", "accept": false, "reason": "Reject: same model family and overlapping files, but #40546 implements core VibeVoice support while #42781 adds realtime-specific functionality and extra components, so they are not the same concrete change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 3, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-40755-2", "nodes": [ "pull_request:40755", "pull_request:41224" ], "soft_pairs": [ "pull_request:40755|pull_request:41224" ], "prepared_review_unit_hash": "9b213186e856bbb96d838f11e85b2bf1b94cf5965d5db2e697629888e1be78b1", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4675, "estimated_input_tokens": 1169, "estimated_eval_tokens": 2594 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4675, "estimated_input_tokens": 1169, "estimated_eval_tokens": 2594 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "The cluster contains two unrelated feature PRs: TimesFM covariate forecasting and DINOv3 image-classification support. They do not duplicate each other.", "confidence": 0.99, "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the PRs target different models and code paths, so there is no duplicate pair to collapse.", "best_issue_reason": null, "best_pr_reason": "41224 is the more developed and reviewed PR, so it is the better standalone representative, but it is not a duplicate of 40755.", "soft_edge_verdicts": [ { "left": "pull_request:40755", "right": "pull_request:41224", "accept": false, "reason": "Different model families and code paths: TimesFM covariate forecasting vs DINOv3 image-classification support. They share only a broad 'add model support' theme, not the same underlying change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 4, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-41356-2", "nodes": [ "pull_request:41356", "pull_request:44339" ], "soft_pairs": [ "pull_request:41356|pull_request:44339" ], "prepared_review_unit_hash": "ba02369a73fdb5d12f38ed566851f1904efdf0a4b385e10a04bf2c7968ceccf8", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5934, "estimated_input_tokens": 1484, "estimated_eval_tokens": 3224 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5934, "estimated_input_tokens": 1484, "estimated_eval_tokens": 3224 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Both PRs are for DEIMv2 integration, with substantial overlap in model/docs/tests and the same tracking issue. PR 44339 is the broader, more complete implementation, while 41356 looks like an earlier partial version.", "confidence": 0.78, "canonical_issue_reason": null, "canonical_pr_reason": "PR 44339 is the better canonical PR because it is the more complete DEIMv2 integration: it adds the model, loss, auto-registry wiring, converter, docs, and tests, whereas 41356 is an earlier smaller implementation.", "best_issue_reason": null, "best_pr_reason": "PR 44339 is the best representative of the cluster because it covers the full Transformers integration for DEIMv2 and subsumes the narrower initial add in 41356.", "soft_edge_verdicts": [ { "left": "pull_request:41356", "right": "pull_request:44339", "accept": true, "reason": "These look like the same underlying feature change: adding DEIMv2 to Transformers. They share the same issue target, overlap on core model/doc/test files, and 44339 appears to be the later, expanded version of the same integration rather than a different change." } ] }, "evaluator_result": { "accept": true, "feedback": "The summary is grounded in the packet: both PRs target issue 41211, share core DEIMv2 files, and 44339 clearly adds a broader set of integration pieces (loss, auto-registry, converter, modular code, docs, tests). The mergeability judgment is conservative enough for a same-feature follow-up rather than an unsupported leap." }, "evaluator_used": true, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 5, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-41797-2", "nodes": [ "pull_request:41797", "pull_request:45186" ], "soft_pairs": [ "pull_request:41797|pull_request:45186" ], "prepared_review_unit_hash": "c255482e2d75fcab21760b94af16c25a65ec36690463ecb3e80fe24f84ca466f", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 6723, "estimated_input_tokens": 1681, "estimated_eval_tokens": 3618 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 6723, "estimated_input_tokens": 1681, "estimated_eval_tokens": 3618 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are both new-model integration PRs, but for different models and codepaths (DeepSeekOCR vs Isaac). The overlap is mostly boilerplate for registering a new model, not a shared underlying bug or change.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: they introduce different model families with distinct configs, modeling, processing, and tests.", "best_issue_reason": null, "best_pr_reason": "45186 is the slightly better representative of the cluster because it is a fuller integration with model, image processing, and extra utility coverage, but it is not a duplicate of 41797.", "soft_edge_verdicts": [ { "left": "pull_request:41797", "right": "pull_request:45186", "accept": false, "reason": "Different model additions for different architectures (DeepSeekOCR vs Isaac). Shared files like auto registries and repo checks are expected boilerplate for new model PRs, not evidence of the same underlying change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 6, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-42166-2", "nodes": [ "pull_request:42166", "pull_request:45426" ], "soft_pairs": [ "pull_request:42166|pull_request:45426" ], "prepared_review_unit_hash": "2dd92b3a46304258998dcb98e9055eff0bc52685ebcbfa34f509f08f16a42355", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5407, "estimated_input_tokens": 1352, "estimated_eval_tokens": 2960 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5407, "estimated_input_tokens": 1352, "estimated_eval_tokens": 2960 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are two separate model-addition pull requests for different architectures (InternVLFlash vs AXK1). They share some auto-registration scaffolding, but they do not address the same underlying change and should not be deduplicated.", "confidence": 0.96, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": null, "soft_edge_verdicts": [ { "left": "pull_request:42166", "right": "pull_request:45426", "accept": false, "reason": "Different model integrations with different files, configs, and code paths; shared registry edits are incidental and not enough to treat them as the same PR or merge into one change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 7, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-42227-5", "nodes": [ "pull_request:42227", "pull_request:43291", "pull_request:43665", "pull_request:44815", "pull_request:45448" ], "soft_pairs": [ "pull_request:42227|pull_request:43291", "pull_request:42227|pull_request:43665", "pull_request:42227|pull_request:44815", "pull_request:42227|pull_request:45448", "pull_request:43291|pull_request:43665", "pull_request:43291|pull_request:44815", "pull_request:43291|pull_request:45448", "pull_request:43665|pull_request:44815", "pull_request:43665|pull_request:45448", "pull_request:44815|pull_request:45448" ], "prepared_review_unit_hash": "f3f6a311a097649c7d15a1f2978103d2557cc9bb2e0ebe912e494e623a047175", "component_budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 10, "serialized_chars": 13679, "estimated_input_tokens": 3420, "estimated_eval_tokens": 7096 }, "budget": { "node_count": 5, "item_count": 5, "soft_pair_count": 10, "serialized_chars": 13679, "estimated_input_tokens": 3420, "estimated_eval_tokens": 7096 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These PRs are not duplicates overall; they split into unrelated fixes: Whisper return-language plumbing, Whisper tokenizer tests, placeholder/bogus test edits, FP8 dequant loading fixes, and CLIP checkpoint conversion fixes.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": "PR 42227 is the strongest standalone candidate: it has a concrete Whisper bug fix, an explicit linked issue, and a focused code change in the ASR pipeline.", "soft_edge_verdicts": [ { "left": "pull_request:42227", "right": "pull_request:43291", "accept": false, "reason": "Both touch Whisper, but 42227 fixes return_language forwarding in the ASR pipeline while 43291 changes tokenizer decode/test behavior; different underlying bugs." }, { "left": "pull_request:42227", "right": "pull_request:43665", "accept": false, "reason": "Unrelated: 42227 is a Whisper pipeline bug fix, while 43665 only adds/edits tests for CLIP and ViT with no shared concrete code-path problem." }, { "left": "pull_request:42227", "right": "pull_request:44815", "accept": false, "reason": "Different subsystems and failures: Whisper ASR parameter handling versus FP8/core model loading dequantization logic." }, { "left": "pull_request:42227", "right": "pull_request:45448", "accept": false, "reason": "Unrelated changes: Whisper return-language fix versus CLIP checkpoint conversion mapping and loading behavior." }, { "left": "pull_request:43291", "right": "pull_request:43665", "accept": false, "reason": "Test-focused edits in different models, but they do not address the same concrete bug or code path." }, { "left": "pull_request:43291", "right": "pull_request:44815", "accept": false, "reason": "Whisper tokenizer/test changes are unrelated to FP8 loading/dequantization fixes." }, { "left": "pull_request:43291", "right": "pull_request:45448", "accept": false, "reason": "Different targets: Whisper decode/test adjustments versus CLIP conversion/loading fixes." }, { "left": "pull_request:43665", "right": "pull_request:44815", "accept": false, "reason": "No shared underlying bug; one is bogus model test edits, the other is a substantive FP8 dequant/loading change." }, { "left": "pull_request:43665", "right": "pull_request:45448", "accept": false, "reason": "Different code paths and goals: placeholder test edits versus CLIP checkpoint conversion mapping." }, { "left": "pull_request:44815", "right": "pull_request:45448", "accept": false, "reason": "They both mention core_model_loading.py, but 44815 is FP8 dequant logic while 45448 is CLIP prefix/conversion mapping; not the same fix and not mergeable into one PR." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 8, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-42975-2", "nodes": [ "pull_request:42975", "pull_request:42976" ], "soft_pairs": [ "pull_request:42975|pull_request:42976" ], "prepared_review_unit_hash": "d8f875bf0db77bab5a32b47689b90e4603768efccf86616b34a63cb7aee63190", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5934, "estimated_input_tokens": 1484, "estimated_eval_tokens": 3224 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5934, "estimated_input_tokens": 1484, "estimated_eval_tokens": 3224 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Two open PRs both update GitHub Actions versions in workflow files, but they target different action families and different change scopes: one is about Node 24 compatibility for core actions, the other is a broader/latest-version bump centered on Docker-related actions. They are related maintenance, not the same underlying change.", "confidence": 0.91, "canonical_issue_reason": null, "canonical_pr_reason": "PR #42975 is the better canonical representative because it has the broader workflow-action compatibility upgrade scope and a clearer single theme (Node 24 compatibility across multiple GitHub Actions).", "best_issue_reason": null, "best_pr_reason": "PR #42975 is the best representative of the cluster overall: it spans more workflows and a more coherent dependency upgrade objective than #42976, which is narrower and more piecemeal.", "soft_edge_verdicts": [ { "left": "pull_request:42975", "right": "pull_request:42976", "accept": false, "reason": "Both are workflow dependency bumps, but they change different actions and do not appear to fix the same concrete bug or be safely mergeable into one PR without conflating distinct upgrade tracks." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 9, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-42978-3", "nodes": [ "pull_request:42978", "pull_request:43448", "pull_request:43451" ], "soft_pairs": [ "pull_request:42978|pull_request:43448", "pull_request:42978|pull_request:43451", "pull_request:43448|pull_request:43451" ], "prepared_review_unit_hash": "5e6f6d668a9c602eecd1717890fb4fd662859b7c0a10ddb9407083169910ac69", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, "serialized_chars": 10180, "estimated_input_tokens": 2545, "estimated_eval_tokens": 5346 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, "serialized_chars": 10180, "estimated_input_tokens": 2545, "estimated_eval_tokens": 5346 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are three separate model-addition PRs: ViT NEPA, Molmo, and Molmo2. Their overlap is limited to shared docs/auto-registration boilerplate, not the same underlying code change, so none should be merged as duplicates.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": "pull_request:43451 is the most substantial and specific PR in the set, but it is still a distinct Molmo2 addition rather than a duplicate of the others.", "soft_edge_verdicts": [ { "left": "pull_request:42978", "right": "pull_request:43448", "accept": false, "reason": "Different models and code paths: ViT NEPA vs Molmo. Shared docs/auto-registry edits are boilerplate only." }, { "left": "pull_request:42978", "right": "pull_request:43451", "accept": false, "reason": "Different model additions with unrelated implementations. Similarities are limited to generic docs/registration files." }, { "left": "pull_request:43448", "right": "pull_request:43451", "accept": false, "reason": "Related family names, but Molmo and Molmo2 are separate model integrations with different files and behavior; not the same concrete change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 10, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43488-2", "nodes": [ "pull_request:43488", "pull_request:43532" ], "soft_pairs": [ "pull_request:43488|pull_request:43532" ], "prepared_review_unit_hash": "c5cff6f71c7ec8e7f20e7bb227a031abcf12197a6fcb47c8cb5771e0ec4bee2c", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4563, "estimated_input_tokens": 1141, "estimated_eval_tokens": 2538 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4563, "estimated_input_tokens": 1141, "estimated_eval_tokens": 2538 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "The two PRs are not the same underlying change: #43488 is a repo-bot/check-format style change touching packaging and a VIT test, while #43532 is a broader CI/workflow and testing utility change. They only overlap on one test file and do not appear mergeable into a single fix.", "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": "PR #43532 is the more substantive and representative change in this cluster: it makes a concrete CI/testing workflow adjustment across workflow files and testing utilities, whereas #43488 looks like a synthetic repo-bot check with unrelated packaging edits.", "best_issue_reason": null, "best_pr_reason": "PR #43532 is the best representative because it has a clear, coherent codepath change centered on CI/test infrastructure and broader impact than the narrow, likely synthetic edits in #43488.", "soft_edge_verdicts": [ { "left": "pull_request:43488", "right": "pull_request:43532", "accept": false, "reason": "Reject: the PRs target different problems. #43488 changes setup/versioning and test dependency plumbing for a repo-bot-style check, while #43532 rewires comment-driven CI/workflow behavior and related utilities. The only shared file is a test file, which is insufficient to treat them as the same underlying change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 11, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43612-2", "nodes": [ "pull_request:43612", "pull_request:43613" ], "soft_pairs": [ "pull_request:43612|pull_request:43613" ], "prepared_review_unit_hash": "2f571f69b82f0e8b4eb52552cb19c117326f47abc8d47b22f9892777bb0fb263", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 6083, "estimated_input_tokens": 1521, "estimated_eval_tokens": 3298 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 6083, "estimated_input_tokens": 1521, "estimated_eval_tokens": 3298 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are related but distinct PRs: one adds a Promptable Concept Segmentation pipeline, the other adds a Promptable Visual Segmentation pipeline. They share documentation/scaffolding patterns, but the underlying tasks, model integrations, and test targets differ, so they should not be deduplicated.", "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the two changes implement different pipeline features rather than the same fix/change.", "best_issue_reason": null, "best_pr_reason": "Neither PR is a duplicate of the other; if forced to pick a representative, PR #43613 is slightly broader in model coverage, but it still addresses a different feature than #43612.", "soft_edge_verdicts": [ { "left": "pull_request:43612", "right": "pull_request:43613", "accept": false, "reason": "Different pipeline features (concept vs visual segmentation) with different filenames/tests and model integrations; shared docs and pipeline scaffolding are not enough to treat them as the same change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 12, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43651-2", "nodes": [ "pull_request:43651", "pull_request:43663" ], "soft_pairs": [ "pull_request:43651|pull_request:43663" ], "prepared_review_unit_hash": "8a8bd0a1f8b61b9e8eaec9ccd1c006848dd5a53c1ca7d730bbc86880670d0328", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4292, "estimated_input_tokens": 1073, "estimated_eval_tokens": 2402 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4292, "estimated_input_tokens": 1073, "estimated_eval_tokens": 2402 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "The two PRs are both small trainer refactors that add override hooks, but they address different behaviors: one changes gradient-accumulation loss scaling, the other changes column filtering based on model signature. They are not the same underlying fix and should not be merged as duplicates.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR for a duplicate set: the PRs target different code paths and different extensibility points in `trainer.py`.", "best_issue_reason": null, "best_pr_reason": "PR 43651 is the cleaner representative of the cluster because it introduces a focused trainer override hook for loss scaling; however, it is not a duplicate of 43663.", "soft_edge_verdicts": [ { "left": "pull_request:43651", "right": "pull_request:43663", "accept": false, "reason": "Both touch `trainer.py`, but one addresses gradient accumulation loss normalization and the other changes signature-column filtering for PEFT/custom trainers. Different behavior, different fix." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 13, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43656-3", "nodes": [ "pull_request:43656", "pull_request:43836", "pull_request:43842" ], "soft_pairs": [ "pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842" ], "prepared_review_unit_hash": "c93599a405d213483d44b572c5c9c7fe8705f252347a42f5feb48056f9b16637", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 5992, "estimated_input_tokens": 1498, "estimated_eval_tokens": 3252 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 5992, "estimated_input_tokens": 1498, "estimated_eval_tokens": 3252 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "PR 43842 is still the best representative: it directly fixes the optional-`pydantic` `TypeAdapter` NameError with a minimal, focused change. The other two PRs are too weakly grounded as duplicates: 43656 looks like a broader annotation/`__future__` cleanup with a different issue target, and 43836 appears to be a larger Serve refactor that merely includes a TypeAdapter-related edit.", "confidence": 0.78, "canonical_issue_reason": null, "canonical_pr_reason": "PR 43842 is the most direct and minimal fix for the `TypeAdapter` NameError, with an explicit issue link and a narrow change set concentrated on the failing import/annotation path.", "best_issue_reason": null, "best_pr_reason": "PR 43842 is the best overall representative because it targets the concrete runtime failure directly and avoids the broader refactor/annotation cleanup noise seen in the other PRs.", "soft_edge_verdicts": [ { "left": "pull_request:43656", "right": "pull_request:43842", "accept": false, "reason": "The evidence is not strong enough to treat these as the same bugfix. 43656 has a different issue target and the preview shows broader `from __future__ import annotations` / annotation cleanup in `TimedModel`, not a clearly identical `TypeAdapter` NameError fix." }, { "left": "pull_request:43836", "right": "pull_request:43842", "accept": false, "reason": "Although both touch `src/transformers/cli/serve.py` and share an issue target, the preview for 43836 suggests a wider Serve refactor rather than a narrowly equivalent fix for the same concrete `TypeAdapter` NameError path." } ] }, "evaluator_result": { "accept": false, "feedback": "The conclusion is a bit too strong for the packet. PR 43842 is well-supported as the most direct fix: it has an explicit issue link and a narrow change around `TypeAdapter` import/annotation handling. But the claim that all three PRs are the same `TypeAdapter` NameError fix is not fully grounded. For 43656, the preview only shows a broader `from __future__ import annotations` / annotation cleanup in `TimedModel`, with a different issue target and no direct evidence it is the same bug. For 43836, the shared issue target helps, but the diff preview suggests a wider Serve refactor rather than just the minimal bugfix. The soft-edge accepts therefore feel a bit non-conservative, especially for 43656 vs 43842." }, "evaluator_used": true, "retried": true, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 14, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43757-2", "nodes": [ "pull_request:43757", "pull_request:45118" ], "soft_pairs": [ "pull_request:43757|pull_request:45118" ], "prepared_review_unit_hash": "43264d72e6cd1756d46db3cf65a63f221a2650537b445d3e04fc90799a43b383", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4609, "estimated_input_tokens": 1153, "estimated_eval_tokens": 2562 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4609, "estimated_input_tokens": 1153, "estimated_eval_tokens": 2562 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These PRs are related to GPT-OSS GGUF loading, but they are not duplicates: #43757 is a stopgap fallback to avoid hard failure, while #45118 adds full GGUF loading support with docs, integration, and tests. The later PR is the better representative of the underlying fix.", "confidence": 0.91, "canonical_issue_reason": null, "canonical_pr_reason": "#45118 is the broader, final implementation for GPT-OSS GGUF loading support; it supersedes the earlier fallback-only workaround in #43757.", "best_issue_reason": null, "best_pr_reason": "#45118 is the best PR because it addresses the concrete loading path end-to-end, not just a temporary compatibility fallback.", "soft_edge_verdicts": [ { "left": "pull_request:43757", "right": "pull_request:45118", "accept": false, "reason": "Same general issue target, but different changes: #43757 only adds a fallback to gpt-neox for unsupported GPT-OSS GGUF, while #45118 implements full GGUF loading support. They are related, but not the same underlying code change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 15, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43779-2", "nodes": [ "pull_request:43779", "pull_request:43816" ], "soft_pairs": [ "pull_request:43779|pull_request:43816" ], "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4362, "estimated_input_tokens": 1091, "estimated_eval_tokens": 2438 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4362, "estimated_input_tokens": 1091, "estimated_eval_tokens": 2438 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Both PRs target the same SwanLabCallback enhancement in the same file and appear to implement the same underlying run-resume/id support for SwanLab init. PR 43816 looks like the more complete formulation, with explicit parameter docs plus the code-path change.", "confidence": 0.79, "canonical_issue_reason": null, "canonical_pr_reason": "PR 43816 is the better canonical representative because it more clearly and completely describes the same SwanLab resume/id feature, including documentation for the new `SWANLAB_RUN_ID` and `SWANLAB_RESUME` parameters alongside the integration change.", "best_issue_reason": null, "best_pr_reason": "PR 43816 is the strongest overall candidate: it targets the same issue, touches the same code path, and appears to be the more complete implementation of the SwanLab `id`/`resume` support.", "soft_edge_verdicts": [ { "left": "pull_request:43779", "right": "pull_request:43816", "accept": true, "reason": "Same underlying SwanLabCallback change: both modify `integration_utils.py` to support `id`/`resume`-style run resumption, share the same issue target, and could plausibly be merged into one PR." } ] }, "evaluator_result": { "accept": true, "feedback": "The packet supports the analyst\u2019s conclusion: both PRs target issue 43698, touch the same file, and are clearly about the same SwanLab `id`/`resume` resume-support bugfix. The soft-edge mergeability verdict is conservative enough here. The only caveat is that PR 43816\u2019s preview is truncated, so the \u201cmore complete\u201d claim should be treated as tentative, but it is still grounded by the titles and diff previews." }, "evaluator_used": true, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 16, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43838-3", "nodes": [ "pull_request:43838", "pull_request:43973", "pull_request:45283" ], "soft_pairs": [ "pull_request:43838|pull_request:45283", "pull_request:43973|pull_request:45283" ], "prepared_review_unit_hash": "122912ebfe058fba12ba1a6c85c95f103d29cb580101dfe00b661a95c9806156", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 8701, "estimated_input_tokens": 2176, "estimated_eval_tokens": 4608 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 2, "serialized_chars": 8701, "estimated_input_tokens": 2176, "estimated_eval_tokens": 4608 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are three unrelated model-support pull requests: Qwen3-ASR integration, Lfm2Audio support, and Qwen3.5 GGUF loading. They share only broad audio/LLM-adjacent infrastructure patterns, not the same underlying bug or change.", "confidence": 0.97, "canonical_issue_reason": null, "canonical_pr_reason": "No single PR here is a clear canonical duplicate target; the closest to a substantial implementation PR is #43838 because it adds a full new model family with docs, auto-registration, modeling, processing, and tests.", "best_issue_reason": null, "best_pr_reason": "#43838 is the strongest standalone PR: it has the broadest and most complete model integration surface plus explicit issue linkage, making it the best representative of this cluster even though the other PRs are unrelated.", "soft_edge_verdicts": [ { "left": "pull_request:43838", "right": "pull_request:45283", "accept": false, "reason": "Different changes: #43838 adds Qwen3-ASR model support and docs/tests, while #45283 adds GGUF loading mappings for Qwen3.5. They do not fix the same code-path problem and would not plausibly be merged as one PR." }, { "left": "pull_request:43973", "right": "pull_request:45283", "accept": false, "reason": "Different model families and different purposes: #43973 adds Lfm2Audio support, while #45283 changes GGUF integration for Qwen3.5. Shared audio/serialization infrastructure is too broad to treat as the same underlying change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 17, "component_count": 23, "review_unit_index": 1, "review_unit_count": 4, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43996-28", "nodes": [ "pull_request:43996", "pull_request:44007", "pull_request:44013", "pull_request:44018", "pull_request:44019", "pull_request:44044", "pull_request:44066", "pull_request:44068", "pull_request:44071", "pull_request:44072", "pull_request:44085", "pull_request:44086", "pull_request:44116", "pull_request:44722" ], "soft_pairs": [ "pull_request:43996|pull_request:44085", "pull_request:43996|pull_request:44044", "pull_request:44066|pull_request:44085", "pull_request:44007|pull_request:44072", "pull_request:44072|pull_request:44722", "pull_request:44066|pull_request:44072", "pull_request:44013|pull_request:44044", "pull_request:44066|pull_request:44086", "pull_request:44066|pull_request:44071", "pull_request:44018|pull_request:44068", "pull_request:44066|pull_request:44068", "pull_request:44018|pull_request:44066", "pull_request:44019|pull_request:44722", "pull_request:44086|pull_request:44722", "pull_request:44019|pull_request:44085", "pull_request:44018|pull_request:44019", "pull_request:44019|pull_request:44071", "pull_request:44068|pull_request:44116", "pull_request:44068|pull_request:44722", "pull_request:44018|pull_request:44116", "pull_request:44071|pull_request:44722", "pull_request:44018|pull_request:44086", "pull_request:44018|pull_request:44085", "pull_request:44068|pull_request:44086" ], "prepared_review_unit_hash": "1c490e983cba6da6abadee9af7cc465830355935f9f969a3370443c5196e3f87", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 77, "serialized_chars": 84505, "estimated_input_tokens": 21127, "estimated_eval_tokens": 42510 }, "budget": { "node_count": 14, "item_count": 14, "soft_pair_count": 24, "serialized_chars": 34759, "estimated_input_tokens": 8690, "estimated_eval_tokens": 17636 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is mostly a set of model-specific output-tracing refactors, but only the GPT-Neo pair and the GPT-J family variants look like true duplicates of the same concrete code change. The rest are broader umbrella refactors across different architectures and should stay separate.", "confidence": 0.78, "canonical_issue_reason": null, "canonical_pr_reason": "PR 44066 is the best representative of the cluster: it has the explicit tracking issue, targets the standardized output-tracing migration directly, and covers the core GPT-J/CodeGen path cleanly.", "best_issue_reason": null, "best_pr_reason": "44066 is the most canonical single PR in this set because it is the clearest, broadest example of the shared refactor pattern and is explicitly tied to the umbrella issue.", "soft_edge_verdicts": [ { "left": "pull_request:43996", "right": "pull_request:44085", "accept": false, "reason": "Different model families and files; both are output-tracing refactors, but not the same concrete change." }, { "left": "pull_request:43996", "right": "pull_request:44044", "accept": false, "reason": "CVT/FNet vs DeBERTa V2; same umbrella theme only, not a duplicate patch." }, { "left": "pull_request:44066", "right": "pull_request:44085", "accept": true, "reason": "Same GPT-J output-tracing decorator migration in the same core file; 44085 looks like a narrower version of the same change." }, { "left": "pull_request:44007", "right": "pull_request:44072", "accept": false, "reason": "ResNet/RT-DETR vs EfficientNet; shared theme only, not the same code path." }, { "left": "pull_request:44072", "right": "pull_request:44722", "accept": false, "reason": "EfficientNet vs GPT-J/CodeGen; unrelated implementations despite similar refactor wording." }, { "left": "pull_request:44066", "right": "pull_request:44072", "accept": false, "reason": "GPT-J/CodeGen vs EfficientNet; same refactor style, different underlying model code." }, { "left": "pull_request:44013", "right": "pull_request:44044", "accept": false, "reason": "MobileNetV2 vs DeBERTa V2; not the same bug or patch." }, { "left": "pull_request:44066", "right": "pull_request:44086", "accept": false, "reason": "GPT-J/CodeGen vs MGP-STR; broad output-tracing work only, no shared concrete code path." }, { "left": "pull_request:44066", "right": "pull_request:44071", "accept": false, "reason": "GPT-J/CodeGen vs MPT; same infrastructure change, different model implementation." }, { "left": "pull_request:44018", "right": "pull_request:44068", "accept": true, "reason": "Both are GPT-Neo output-tracing decorator refactors in the same file/test area, so they look like the same underlying change." }, { "left": "pull_request:44066", "right": "pull_request:44068", "accept": false, "reason": "GPT-J/CodeGen vs GPT-Neo; similar refactor pattern, but different model-specific code paths." }, { "left": "pull_request:44018", "right": "pull_request:44066", "accept": false, "reason": "GPT-Neo vs GPT-J/CodeGen; same umbrella initiative, but not the same concrete patch." }, { "left": "pull_request:44019", "right": "pull_request:44722", "accept": false, "reason": "ResNet vs GPT-J/CodeGen; no shared code path." }, { "left": "pull_request:44086", "right": "pull_request:44722", "accept": false, "reason": "MGP-STR vs GPT-J/CodeGen; only the refactor pattern matches, not the implementation." }, { "left": "pull_request:44019", "right": "pull_request:44085", "accept": false, "reason": "ResNet vs GPT-J; different architecture and file set." }, { "left": "pull_request:44018", "right": "pull_request:44019", "accept": false, "reason": "GPT-Neo vs ResNet; same output-tracing theme, different concrete change." }, { "left": "pull_request:44019", "right": "pull_request:44071", "accept": false, "reason": "ResNet vs MPT; not the same code path." }, { "left": "pull_request:44068", "right": "pull_request:44116", "accept": false, "reason": "GPT-Neo vs Flaubert; shared refactor style only." }, { "left": "pull_request:44068", "right": "pull_request:44722", "accept": false, "reason": "GPT-Neo vs GPT-J/CodeGen; different model families." }, { "left": "pull_request:44071", "right": "pull_request:44722", "accept": false, "reason": "MPT vs GPT-J/CodeGen; not a duplicate." }, { "left": "pull_request:44018", "right": "pull_request:44086", "accept": false, "reason": "GPT-Neo vs MGP-STR; same umbrella work, different code paths." }, { "left": "pull_request:44018", "right": "pull_request:44085", "accept": false, "reason": "GPT-Neo vs GPT-J; different models and files." }, { "left": "pull_request:44068", "right": "pull_request:44086", "accept": false, "reason": "GPT-Neo vs MGP-STR; not the same underlying change." }, { "left": "pull_request:44066", "right": "pull_request:44722", "accept": true, "reason": "Both target the GPT-J output-tracing decorator refactor on the same core file family; they look mergeable into one PR." }, { "left": "pull_request:44085", "right": "pull_request:44722", "accept": true, "reason": "Same GPT-J output-tracing refactor in overlapping code; 44722 is a broader variant of the same change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": true, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 17, "component_count": 23, "review_unit_index": 2, "review_unit_count": 4, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43996-28", "nodes": [ "pull_request:43997", "pull_request:44017", "pull_request:44018", "pull_request:44056", "pull_request:44059", "pull_request:44068", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44076", "pull_request:44085", "pull_request:44086", "pull_request:44098", "pull_request:44101", "pull_request:44161", "pull_request:44722" ], "soft_pairs": [ "pull_request:44071|pull_request:44085", "pull_request:44059|pull_request:44086", "pull_request:44059|pull_request:44722", "pull_request:44085|pull_request:44086", "pull_request:44098|pull_request:44101", "pull_request:44018|pull_request:44071", "pull_request:44017|pull_request:44722", "pull_request:44017|pull_request:44161", "pull_request:44017|pull_request:44073", "pull_request:44068|pull_request:44071", "pull_request:44071|pull_request:44086", "pull_request:44059|pull_request:44071", "pull_request:44017|pull_request:44071", "pull_request:43997|pull_request:44073", "pull_request:44059|pull_request:44068", "pull_request:44018|pull_request:44059", "pull_request:44073|pull_request:44101", "pull_request:44056|pull_request:44101", "pull_request:44073|pull_request:44161", "pull_request:44101|pull_request:44161", "pull_request:44017|pull_request:44018", "pull_request:43997|pull_request:44056", "pull_request:44071|pull_request:44076", "pull_request:44074|pull_request:44101" ], "prepared_review_unit_hash": "d75994d68e416067626cc8c5513906763535efcf262df1ebbd47be44b25ebbb5", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 77, "serialized_chars": 84505, "estimated_input_tokens": 21127, "estimated_eval_tokens": 42510 }, "budget": { "node_count": 16, "item_count": 16, "soft_pair_count": 24, "serialized_chars": 38248, "estimated_input_tokens": 9562, "estimated_eval_tokens": 19380 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a set of similar but model-specific refactors for standardized output tracing/capture_outputs. The items mostly share the same refactoring theme, but they are not duplicate fixes of the same concrete code path, so the soft edges should be rejected.", "confidence": 0.89, "canonical_issue_reason": null, "canonical_pr_reason": "44161 is the cleanest representative of the shared theme: it explicitly targets #43979 and focuses on a single model\u2019s transition to @capture_outputs/@can_return_tuple with a clear, self-contained refactor.", "best_issue_reason": null, "best_pr_reason": "44161 is the best representative PR in the cluster because it is explicitly issue-linked, narrowly scoped, and directly reflects the standardized output-handling change pattern without mixing in unrelated model changes.", "soft_edge_verdicts": [ { "left": "pull_request:44071", "right": "pull_request:44085", "accept": false, "reason": "Different models/code paths (MPT vs GPT-J). Same refactor theme, but not the same underlying change." }, { "left": "pull_request:44059", "right": "pull_request:44086", "accept": false, "reason": "GPT-2 and MGP-STR are unrelated model implementations; this is only a broad architectural similarity." }, { "left": "pull_request:44059", "right": "pull_request:44722", "accept": false, "reason": "GPT-2 vs GPT-J/CodeGen. Shared output-tracing theme is too broad to treat as a duplicate change." }, { "left": "pull_request:44085", "right": "pull_request:44086", "accept": false, "reason": "GPT-J and MGP-STR touch different code paths and models." }, { "left": "pull_request:44098", "right": "pull_request:44101", "accept": false, "reason": "ViLT vs Flaubert/XLM are different model families; same refactor pattern is not enough." }, { "left": "pull_request:44018", "right": "pull_request:44071", "accept": false, "reason": "GPT-Neo and MPT are unrelated model-specific refactors." }, { "left": "pull_request:44017", "right": "pull_request:44722", "accept": false, "reason": "SegFormer vs GPT-J/CodeGen are different model changes with no shared concrete code path." }, { "left": "pull_request:44017", "right": "pull_request:44161", "accept": false, "reason": "SegFormer and LongT5 are separate implementations; this is only a similar output-handling migration." }, { "left": "pull_request:44017", "right": "pull_request:44073", "accept": false, "reason": "Different models (SegFormer vs VisualBert); not the same bug or fix." }, { "left": "pull_request:44068", "right": "pull_request:44071", "accept": false, "reason": "GPT-Neo and MPT are separate model refactors, not one duplicate change." }, { "left": "pull_request:44071", "right": "pull_request:44086", "accept": false, "reason": "MPT vs MGP-STR are unrelated code paths." }, { "left": "pull_request:44059", "right": "pull_request:44071", "accept": false, "reason": "GPT-2 and MPT are different models; shared decorator migration is too broad." }, { "left": "pull_request:44017", "right": "pull_request:44071", "accept": false, "reason": "SegFormer vs MPT are not the same underlying fix." }, { "left": "pull_request:43997", "right": "pull_request:44073", "accept": false, "reason": "RegNet and VisualBert are unrelated model changes." }, { "left": "pull_request:44059", "right": "pull_request:44068", "accept": false, "reason": "GPT-2 and GPT-Neo are distinct model code paths; same refactor family is not a duplicate." }, { "left": "pull_request:44018", "right": "pull_request:44059", "accept": false, "reason": "GPT-Neo vs GPT-2 are different implementations and should not be merged as duplicates." }, { "left": "pull_request:44073", "right": "pull_request:44101", "accept": false, "reason": "VisualBert vs Flaubert/XLM are different model families and fixes." }, { "left": "pull_request:44056", "right": "pull_request:44101", "accept": false, "reason": "MPNet and Flaubert/XLM are unrelated model-specific refactors." }, { "left": "pull_request:44073", "right": "pull_request:44161", "accept": false, "reason": "VisualBert and LongT5 are different model code paths; only the refactor style matches." }, { "left": "pull_request:44101", "right": "pull_request:44161", "accept": false, "reason": "Different model families and different output-tracing implementations." }, { "left": "pull_request:44017", "right": "pull_request:44018", "accept": false, "reason": "SegFormer vs GPT-Neo are unrelated model changes." }, { "left": "pull_request:43997", "right": "pull_request:44056", "accept": false, "reason": "RegNet and MPNet are separate model implementations." }, { "left": "pull_request:44071", "right": "pull_request:44076", "accept": false, "reason": "MPT and ImageGPT are different models; this is only the same refactoring pattern." }, { "left": "pull_request:44074", "right": "pull_request:44101", "accept": false, "reason": "TextNet vs Flaubert/XLM are different model-specific output handling refactors." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 17, "component_count": 23, "review_unit_index": 3, "review_unit_count": 4, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43996-28", "nodes": [ "pull_request:43996", "pull_request:43997", "pull_request:44007", "pull_request:44010", "pull_request:44017", "pull_request:44018", "pull_request:44044", "pull_request:44056", "pull_request:44066", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44085", "pull_request:44129", "pull_request:44154", "pull_request:44161", "pull_request:44722" ], "soft_pairs": [ "pull_request:44056|pull_request:44161", "pull_request:43997|pull_request:44018", "pull_request:43997|pull_request:44161", "pull_request:44074|pull_request:44161", "pull_request:43997|pull_request:44071", "pull_request:44010|pull_request:44071", "pull_request:44010|pull_request:44073", "pull_request:44018|pull_request:44129", "pull_request:43997|pull_request:44074", "pull_request:43997|pull_request:44129", "pull_request:44010|pull_request:44018", "pull_request:44071|pull_request:44129", "pull_request:43996|pull_request:44161", "pull_request:43996|pull_request:44017", "pull_request:44010|pull_request:44044", "pull_request:44044|pull_request:44074", "pull_request:44066|pull_request:44722", "pull_request:44085|pull_request:44722", "pull_request:44085|pull_request:44129", "pull_request:43996|pull_request:44007", "pull_request:43996|pull_request:44129", "pull_request:44044|pull_request:44129", "pull_request:44044|pull_request:44154" ], "prepared_review_unit_hash": "26b1afd0c7d354cf3cee80c0afb1064b2493655c23e0c56a977653a1dd850304", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 77, "serialized_chars": 84505, "estimated_input_tokens": 21127, "estimated_eval_tokens": 42510 }, "budget": { "node_count": 17, "item_count": 17, "soft_pair_count": 23, "serialized_chars": 40363, "estimated_input_tokens": 10091, "estimated_eval_tokens": 20438 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "Most items are related but distinct model-by-model refactors for the shared output-tracing initiative. The only true duplicate pair here is the GPT-J/CodeGen PR line (44066 and 44722); the rest are separate model ports and should not be merged as duplicates.", "confidence": 0.93, "canonical_issue_reason": null, "canonical_pr_reason": "44066 is the cleanest representative: it has the explicit #43979 linkage, a clear GPT-J/CodeGen output-tracing refactor, and is the strongest anchor for the duplicated GPT-J PR variant (44722).", "best_issue_reason": null, "best_pr_reason": "44066 is the best overall PR to represent this cluster because it is explicit about the target issue, covers the core code-path change, and most clearly matches the duplicate GPT-J/CodeGen follow-up PR.", "soft_edge_verdicts": [ { "left": "pull_request:44056", "right": "pull_request:44161", "accept": false, "reason": "Both are output-tracing refactors, but for different models (MPNet vs LongT5); same initiative, not the same concrete change." }, { "left": "pull_request:43997", "right": "pull_request:44018", "accept": false, "reason": "RegNet and GPT-Neo are separate model implementations with different code paths; issue similarity alone is not enough." }, { "left": "pull_request:43997", "right": "pull_request:44161", "accept": false, "reason": "Different model families (RegNet vs LongT5) and different refactor surfaces; not a duplicate change." }, { "left": "pull_request:44074", "right": "pull_request:44161", "accept": false, "reason": "TextNet and LongT5 are unrelated implementations; both migrate to the same tracing pattern but are not the same bug/change." }, { "left": "pull_request:43997", "right": "pull_request:44071", "accept": false, "reason": "RegNet vs MPT are distinct model code paths; this is only a shared refactor theme." }, { "left": "pull_request:44010", "right": "pull_request:44071", "accept": false, "reason": "SqueezeBert and MPT are separate model-specific migrations, not one mergeable fix." }, { "left": "pull_request:44010", "right": "pull_request:44073", "accept": false, "reason": "SqueezeBert and VisualBert touch different architectures and tracing code; not the same underlying change." }, { "left": "pull_request:44018", "right": "pull_request:44129", "accept": false, "reason": "GPT-Neo and SpeechT5 are different models with different output-tracing plumbing; do not treat as duplicates." }, { "left": "pull_request:43997", "right": "pull_request:44074", "accept": false, "reason": "RegNet and TextNet are separate model refactors, even if both reference the same broader initiative." }, { "left": "pull_request:43997", "right": "pull_request:44129", "accept": false, "reason": "RegNet and SpeechT5 have different concrete code paths and should not be merged as one PR." }, { "left": "pull_request:44010", "right": "pull_request:44018", "accept": false, "reason": "SqueezeBert and GPT-Neo are unrelated model implementations; same theme, not same fix." }, { "left": "pull_request:44071", "right": "pull_request:44129", "accept": false, "reason": "MPT and SpeechT5 are different subsystems; too broad to call a duplicate." }, { "left": "pull_request:43996", "right": "pull_request:44161", "accept": false, "reason": "CVT/FNet refactors and LongT5 refactors are separate model-specific changes." }, { "left": "pull_request:43996", "right": "pull_request:44017", "accept": false, "reason": "CVT/FNet vs SegFormer are different model ports of the same tracing pattern, not duplicates." }, { "left": "pull_request:44010", "right": "pull_request:44044", "accept": false, "reason": "SqueezeBert and DeBERTaV2 are distinct code paths; no evidence of the same concrete bug/change." }, { "left": "pull_request:44044", "right": "pull_request:44074", "accept": false, "reason": "DeBERTaV2 and TextNet are unrelated model implementations; reject despite shared topic." }, { "left": "pull_request:44066", "right": "pull_request:44722", "accept": true, "reason": "Same GPT-J/CodeGen output-tracing refactor on the same files and same issue target; this looks like a direct duplicate or rebased copy." }, { "left": "pull_request:44085", "right": "pull_request:44722", "accept": true, "reason": "Both appear to implement the same GPT-J output-tracing change on the same code path, with overlapping files and shared issue target." }, { "left": "pull_request:44085", "right": "pull_request:44129", "accept": false, "reason": "GPT-J and SpeechT5 are different models; shared issue target is not enough to merge them." }, { "left": "pull_request:43996", "right": "pull_request:44007", "accept": false, "reason": "CVT/FNet and RegNet are separate model-specific tracing refactors, not the same fix." }, { "left": "pull_request:43996", "right": "pull_request:44129", "accept": false, "reason": "CVT/FNet vs SpeechT5 are different code paths and architectures; not duplicates." }, { "left": "pull_request:44044", "right": "pull_request:44129", "accept": false, "reason": "DeBERTaV2 and SpeechT5 are distinct model implementations; the overlap is only in refactor style." }, { "left": "pull_request:44044", "right": "pull_request:44154", "accept": false, "reason": "DeBERTaV2 and VITS are different models and different tracing paths; do not merge as duplicates." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 17, "component_count": 23, "review_unit_index": 4, "review_unit_count": 4, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43996-28", "nodes": [ "pull_request:43997", "pull_request:44010", "pull_request:44024", "pull_request:44056", "pull_request:44073", "pull_request:44074", "pull_request:44076", "pull_request:44129" ], "soft_pairs": [ "pull_request:44024|pull_request:44076", "pull_request:44073|pull_request:44074", "pull_request:44056|pull_request:44073", "pull_request:44056|pull_request:44074", "pull_request:43997|pull_request:44010", "pull_request:44010|pull_request:44129" ], "prepared_review_unit_hash": "48a7b711572cf17092b9bdad3ec9657dba775a711c2f9e4c1e99015680e184d9", "component_budget": { "node_count": 28, "item_count": 28, "soft_pair_count": 77, "serialized_chars": 84505, "estimated_input_tokens": 21127, "estimated_eval_tokens": 42510 }, "budget": { "node_count": 8, "item_count": 8, "soft_pair_count": 6, "serialized_chars": 17125, "estimated_input_tokens": 4282, "estimated_eval_tokens": 8820 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These PRs are all model-specific refactors to standardized output tracing/capture, but they touch different architectures and code paths. They are related by theme, not duplicates.", "confidence": 0.92, "canonical_issue_reason": null, "canonical_pr_reason": "PR #44129 is the strongest representative: it has an explicit issue target, a clear standardized output-capture refactor, and enough implementation detail to anchor the cluster theme.", "best_issue_reason": null, "best_pr_reason": "PR #44129 is the best single representative of the cluster because it is issue-linked and clearly embodies the output-tracing migration pattern without being unusually narrow or incomplete.", "soft_edge_verdicts": [ { "left": "pull_request:44024", "right": "pull_request:44076", "accept": false, "reason": "Both are standardized-output refactors, but for different models (FocalNet vs ImageGPT) and different forward paths. Same theme, not the same change." }, { "left": "pull_request:44073", "right": "pull_request:44074", "accept": false, "reason": "VisualBert and TextNet are separate model implementations with distinct output-capture plumbing; they are not the same concrete bug or patch." }, { "left": "pull_request:44056", "right": "pull_request:44073", "accept": false, "reason": "MPNet and VisualBert both refactor tracing, but they modify different model internals and hook mappings. No evidence they could be merged as one PR." }, { "left": "pull_request:44056", "right": "pull_request:44074", "accept": false, "reason": "These are different model-specific refactors with different affected files and output shapes; similarity is only at the framework pattern level." }, { "left": "pull_request:43997", "right": "pull_request:44010", "accept": false, "reason": "RegNet and SqueezeBert are unrelated model code paths; both mention standardized output handling, but they are not the same underlying fix." }, { "left": "pull_request:44010", "right": "pull_request:44129", "accept": false, "reason": "SqueezeBert and SpeechT5 touch different models and different tracing logic. Shared output-capture vocabulary is not enough to treat them as duplicates." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 18, "component_count": 23, "review_unit_index": 1, "review_unit_count": 2, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43998-13", "nodes": [ "pull_request:43998", "pull_request:43999", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44003", "pull_request:44004", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030" ], "soft_pairs": [ "pull_request:43998|pull_request:44003", "pull_request:44001|pull_request:44003", "pull_request:44002|pull_request:44003", "pull_request:44000|pull_request:44003", "pull_request:44003|pull_request:44004", "pull_request:44000|pull_request:44004", "pull_request:43999|pull_request:44001", "pull_request:43999|pull_request:44002", "pull_request:43999|pull_request:44025", "pull_request:43999|pull_request:44026", "pull_request:43999|pull_request:44027", "pull_request:43998|pull_request:44002", "pull_request:43998|pull_request:44000", "pull_request:44026|pull_request:44029", "pull_request:43998|pull_request:44001", "pull_request:44029|pull_request:44030", "pull_request:44025|pull_request:44026", "pull_request:44025|pull_request:44027", "pull_request:44025|pull_request:44028", "pull_request:44026|pull_request:44027", "pull_request:44026|pull_request:44028", "pull_request:44027|pull_request:44028", "pull_request:44001|pull_request:44002", "pull_request:44000|pull_request:44001" ], "prepared_review_unit_hash": "83a474e530d27588af6420aad24b1d2df35289d582e041346efe0e1ab9d1aa53", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 42, "serialized_chars": 40929, "estimated_input_tokens": 10233, "estimated_eval_tokens": 20722 }, "budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 24, "serialized_chars": 32097, "estimated_input_tokens": 8025, "estimated_eval_tokens": 16306 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "This cluster is a loose theme cluster of model-specific output-tracing refactors, not true duplicates. The PRs touch different model families and forward paths, so they should stay separate.", "confidence": 0.94, "canonical_issue_reason": null, "canonical_pr_reason": "PR 44003 is the best representative of the shared theme because it is a larger output-tracing refactor spanning two related Mamba variants and introducing the new tracing helpers, but it is still not a duplicate of the other model-specific refactors.", "best_issue_reason": null, "best_pr_reason": "PR 44003 is the strongest single representative of this cluster\u2019s pattern, but only as a thematic exemplar, not as a merge target for the rest.", "soft_edge_verdicts": [ { "left": "pull_request:43998", "right": "pull_request:44003", "accept": false, "reason": "Different models and code paths: timm_backbone vs falcon_mamba/mamba. Same refactor style, not the same change." }, { "left": "pull_request:44001", "right": "pull_request:44003", "accept": false, "reason": "UnivNet audio generation and Mamba state/output tracing are unrelated implementations." }, { "left": "pull_request:44002", "right": "pull_request:44003", "accept": false, "reason": "UperNet segmentation wrapper vs Mamba model tracing; shared wording only." }, { "left": "pull_request:44000", "right": "pull_request:44003", "accept": false, "reason": "Vision-text dual encoder and Mamba are different model families with different forward contracts." }, { "left": "pull_request:44003", "right": "pull_request:44004", "accept": false, "reason": "Falcon/Mamba tracing refactor and CodeGen cache/output refactor affect different code paths." }, { "left": "pull_request:44000", "right": "pull_request:44004", "accept": false, "reason": "Vision-text encoder-decoder and CodeGen are unrelated model architectures." }, { "left": "pull_request:43999", "right": "pull_request:44001", "accept": false, "reason": "MobileNetV1 and UnivNet touch different modalities and output-tracing mechanics." }, { "left": "pull_request:43999", "right": "pull_request:44002", "accept": false, "reason": "MobileNetV1 classification/backbone refactor is not the same bug as UperNet segmentation refactor." }, { "left": "pull_request:43999", "right": "pull_request:44025", "accept": false, "reason": "MobileNetV1 and Depth Anything are separate vision models with different forward implementations." }, { "left": "pull_request:43999", "right": "pull_request:44026", "accept": false, "reason": "MobileNetV1 output tracing is unrelated to VisionEncoderDecoder wrapper changes." }, { "left": "pull_request:43999", "right": "pull_request:44027", "accept": false, "reason": "MobileNetV1 and SpeechEncoderDecoder are different domains and code paths." }, { "left": "pull_request:43998", "right": "pull_request:44002", "accept": false, "reason": "Timm backbone tracing and UperNet tracing are distinct model-specific refactors." }, { "left": "pull_request:43998", "right": "pull_request:44000", "accept": false, "reason": "Timm backbone and vision-text dual encoder do not share the same concrete implementation change." }, { "left": "pull_request:44026", "right": "pull_request:44029", "accept": false, "reason": "VisionEncoderDecoder wrapper changes are unrelated to RWKV layer output tracing." }, { "left": "pull_request:43998", "right": "pull_request:44001", "accept": false, "reason": "Timm backbone and UnivNet are unrelated models; same helper import is not enough." }, { "left": "pull_request:44029", "right": "pull_request:44030", "accept": false, "reason": "RWKV and DPR are different architectures; these are separate refactors, not one fix." }, { "left": "pull_request:44025", "right": "pull_request:44026", "accept": false, "reason": "Depth Anything and VisionEncoderDecoder are different code paths and different model wrappers." }, { "left": "pull_request:44025", "right": "pull_request:44027", "accept": false, "reason": "Depth Anything and SpeechEncoderDecoder are unrelated model families." }, { "left": "pull_request:44025", "right": "pull_request:44028", "accept": false, "reason": "Depth Anything and SuperPoint are separate vision models with different output schemas." }, { "left": "pull_request:44026", "right": "pull_request:44027", "accept": false, "reason": "VisionEncoderDecoder and SpeechEncoderDecoder are analogous wrappers but not the same concrete change." }, { "left": "pull_request:44026", "right": "pull_request:44028", "accept": false, "reason": "VisionEncoderDecoder wrapper refactor is unrelated to SuperPoint output tracing." }, { "left": "pull_request:44027", "right": "pull_request:44028", "accept": false, "reason": "SpeechEncoderDecoder and SuperPoint are entirely different model paths." }, { "left": "pull_request:44001", "right": "pull_request:44002", "accept": false, "reason": "UnivNet and UperNet share no concrete implementation path beyond the generic tracing theme." }, { "left": "pull_request:44000", "right": "pull_request:44001", "accept": false, "reason": "Vision-text dual encoder and UnivNet are unrelated architectures and should not be merged as duplicates." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 18, "component_count": 23, "review_unit_index": 2, "review_unit_count": 2, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-43998-13", "nodes": [ "pull_request:43998", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44004", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030" ], "soft_pairs": [ "pull_request:44000|pull_request:44002", "pull_request:43998|pull_request:44029", "pull_request:44002|pull_request:44029", "pull_request:43998|pull_request:44004", "pull_request:44001|pull_request:44004", "pull_request:44002|pull_request:44004", "pull_request:44025|pull_request:44029", "pull_request:44025|pull_request:44030", "pull_request:44026|pull_request:44030", "pull_request:44027|pull_request:44029", "pull_request:44027|pull_request:44030", "pull_request:44028|pull_request:44029", "pull_request:44028|pull_request:44030", "pull_request:44001|pull_request:44025", "pull_request:44001|pull_request:44026", "pull_request:44002|pull_request:44025", "pull_request:44000|pull_request:44025", "pull_request:44000|pull_request:44026" ], "prepared_review_unit_hash": "f440285ebf4bf813366ab6114de57b0c9d49721c8671fd65ea721a56789669e6", "component_budget": { "node_count": 13, "item_count": 13, "soft_pair_count": 42, "serialized_chars": 40929, "estimated_input_tokens": 10233, "estimated_eval_tokens": 20722 }, "budget": { "node_count": 11, "item_count": 11, "soft_pair_count": 18, "serialized_chars": 26340, "estimated_input_tokens": 6585, "estimated_eval_tokens": 13426 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": true, "analyst_result": { "summary": "These are thematically similar refactors around output tracing/can_return_tuple, but they target different model code paths and files, so they are not duplicates of the same underlying bug/change. No single canonical issue exists in the set.", "confidence": 0.95, "canonical_issue_reason": null, "canonical_pr_reason": null, "best_issue_reason": null, "best_pr_reason": "PR 44025 is the broadest self-contained refactor in the set (it touches two related Depth Anything files), but it is still model-specific and not a duplicate representative for the others.", "soft_edge_verdicts": [ { "left": "pull_request:44000", "right": "pull_request:44002", "accept": false, "reason": "Both are output-tracing refactors, but one is timm_backbone and the other is upernet; different model implementations, different forward signatures, no shared concrete bug." }, { "left": "pull_request:43998", "right": "pull_request:44029", "accept": false, "reason": "Both mention output tracing, but timm_backbone and rwkv are unrelated code paths and fixes; too broad to be one mergeable PR." }, { "left": "pull_request:44002", "right": "pull_request:44029", "accept": false, "reason": "upernet vs rwkv are distinct model-specific refactors; same theme, not the same underlying change." }, { "left": "pull_request:43998", "right": "pull_request:44004", "accept": false, "reason": "timm_backbone and codegen touch unrelated forward/capture logic in different models; not the same bug or change." }, { "left": "pull_request:44001", "right": "pull_request:44004", "accept": false, "reason": "univnet and codegen are separate model refactors with different call signatures and outputs; only the umbrella theme matches." }, { "left": "pull_request:44002", "right": "pull_request:44004", "accept": false, "reason": "upernet and codegen are unrelated model paths; this is not a single concrete fix." }, { "left": "pull_request:44025", "right": "pull_request:44029", "accept": false, "reason": "Depth Anything and RWKV are different models with different output-tracing implementations; too dissimilar to merge as duplicates." }, { "left": "pull_request:44025", "right": "pull_request:44030", "accept": false, "reason": "Depth Anything vs DPR are separate model-specific forward refactors, not one underlying issue." }, { "left": "pull_request:44026", "right": "pull_request:44030", "accept": false, "reason": "vision_encoder_decoder and DPR are different architectures and code paths; same pattern, not same change." }, { "left": "pull_request:44027", "right": "pull_request:44029", "accept": false, "reason": "speech_encoder_decoder and rwkv are unrelated implementations; no shared concrete defect or patch." }, { "left": "pull_request:44027", "right": "pull_request:44030", "accept": false, "reason": "speech_encoder_decoder and DPR are distinct model refactors, so they should not be deduped." }, { "left": "pull_request:44028", "right": "pull_request:44029", "accept": false, "reason": "superpoint and rwkv affect different modules and output shapes; same refactor motif only." }, { "left": "pull_request:44028", "right": "pull_request:44030", "accept": false, "reason": "superpoint and DPR are unrelated model code paths; not the same bug or change." }, { "left": "pull_request:44001", "right": "pull_request:44025", "accept": false, "reason": "univnet and Depth Anything are separate model implementations; this is a shared refactor style, not a duplicate fix." }, { "left": "pull_request:44001", "right": "pull_request:44026", "accept": false, "reason": "univnet vs vision_encoder_decoder are different model-specific output-tracing changes and could not plausibly be one PR." }, { "left": "pull_request:44002", "right": "pull_request:44025", "accept": false, "reason": "upernet and Depth Anything are related only by the generic refactor theme; different files, different model behavior." }, { "left": "pull_request:44000", "right": "pull_request:44025", "accept": false, "reason": "timm_backbone and Depth Anything are separate model-specific changes, not the same underlying issue." }, { "left": "pull_request:44000", "right": "pull_request:44026", "accept": false, "reason": "timm_backbone vs vision_encoder_decoder is too broad; these are independent refactors in different subsystems." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 19, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-44662-3", "nodes": [ "pull_request:44662", "pull_request:45075", "pull_request:45401" ], "soft_pairs": [ "pull_request:44662|pull_request:45401", "pull_request:44662|pull_request:45075", "pull_request:45075|pull_request:45401" ], "prepared_review_unit_hash": "c4435d4e753020ae16291e100e4dd73db52c4c847c6325738eeae1c965ac2040", "component_budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, "serialized_chars": 11071, "estimated_input_tokens": 2768, "estimated_eval_tokens": 5792 }, "budget": { "node_count": 3, "item_count": 3, "soft_pair_count": 3, "serialized_chars": 11071, "estimated_input_tokens": 2768, "estimated_eval_tokens": 5792 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are three distinct model-addition PRs that overlap on Transformers scaffolding, but each implements a different model family (PenguinVL, DeepSeek-OCR-2, Voxtral TTS). They are not duplicates and should not be merged as one change.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the three PRs add different models with different configs, processors, and model code paths.", "best_issue_reason": null, "best_pr_reason": "PR #45075 is the strongest representative of the set because it is the most complete standalone model addition and includes both implementation and conversion tooling; however, it is still not a duplicate of the others.", "soft_edge_verdicts": [ { "left": "pull_request:44662", "right": "pull_request:45401", "accept": false, "reason": "Different underlying changes: PenguinVL is a vision-language model, while Voxtral TTS is a text-to-speech model. Shared auto-registration/docs files are generic scaffolding, not the same bug or fix." }, { "left": "pull_request:44662", "right": "pull_request:45075", "accept": false, "reason": "Both add new model integrations, but for unrelated models with different architectures and code paths (PenguinVL vs DeepSeek-OCR-2). Overlap is only in shared framework wiring." }, { "left": "pull_request:45075", "right": "pull_request:45401", "accept": false, "reason": "Distinct model families and concrete implementations: OCR vs TTS. They touch some of the same registry/docs files, but they do not fix the same issue or represent one mergeable change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 20, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-44979-2", "nodes": [ "pull_request:44979", "pull_request:45363" ], "soft_pairs": [ "pull_request:44979|pull_request:45363" ], "prepared_review_unit_hash": "34d2dfff27ded10dd12bed516986227958efea61dedb27f5ec8c36bf933a835e", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4408, "estimated_input_tokens": 1102, "estimated_eval_tokens": 2460 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4408, "estimated_input_tokens": 1102, "estimated_eval_tokens": 2460 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These PRs are related by the general idea of model/module fusion, but they target different layers: #44979 introduces a generic Module Fusion API, while #45363 adds a specific n-to-1 KernelConfig-based fusion integration and example. They are conceptually adjacent, not the same underlying change.", "confidence": 0.87, "canonical_issue_reason": null, "canonical_pr_reason": "#44979 is the broader, earlier core API proposal that defines the fusion abstraction; #45363 is a narrower integration built around KernelConfig and specific fused modules.", "best_issue_reason": null, "best_pr_reason": "#44979 is the best cluster representative because it captures the foundational fusion API, while #45363 is a specific application of that idea rather than the same patch.", "soft_edge_verdicts": [ { "left": "pull_request:44979", "right": "pull_request:45363", "accept": false, "reason": "Do not merge: one PR adds a generic module-fusion API and tests, the other wires a concrete KernelConfig n-to-1 fusion path plus an example. Same broad subsystem, but not the same code-path fix or mergeable into one PR as a duplicate." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 21, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-45086-2", "nodes": [ "pull_request:45086", "pull_request:45317" ], "soft_pairs": [ "pull_request:45086|pull_request:45317" ], "prepared_review_unit_hash": "46f6e8e743afe527bdfbefbe5ab391331d9b42d259518b7f6eaa66b3c061478f", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4414, "estimated_input_tokens": 1104, "estimated_eval_tokens": 2464 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 4414, "estimated_input_tokens": 1104, "estimated_eval_tokens": 2464 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Both PRs address the same Mistral pre-tokenizer AttributeError in `tokenization_utils_tokenizers.py`; `45317` is the fuller follow-up with tests and the same fix path.", "confidence": 0.98, "canonical_issue_reason": null, "canonical_pr_reason": "`45317` is the stronger canonical PR: same code fix as `45086`, clearer title, and it adds a test file, making it the more complete patch.", "best_issue_reason": null, "best_pr_reason": "`45317` best represents the cluster because it contains the concrete fix plus validation, while `45086` is a smaller precursor/subset of the same change.", "soft_edge_verdicts": [ { "left": "pull_request:45086", "right": "pull_request:45317", "accept": true, "reason": "Same underlying bug and same code-path fix in the same file; `45317` appears to extend `45086` with tests rather than introduce a different change." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 22, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-45332-2", "nodes": [ "pull_request:45332", "pull_request:45333" ], "soft_pairs": [ "pull_request:45332|pull_request:45333" ], "prepared_review_unit_hash": "0a9d48d5f30893e38a5b0e99ed597331f3237f840ad7007491ace09e5baaeaf6", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5398, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5398, "estimated_input_tokens": 1350, "estimated_eval_tokens": 2956 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "These are related but distinct PRs: one adds heterogeneous per-layer configuration plumbing, and the other builds on that to add heterogeneous model/runtime support. They share helper/config files, but they are not the same concrete change or bug fix.", "confidence": 0.91, "canonical_issue_reason": null, "canonical_pr_reason": "PR #45332 is the better canonical PR because it is the broader end-to-end feature implementation, covering both per-layer config handling and the downstream modeling/cache changes that use it.", "best_issue_reason": null, "best_pr_reason": "PR #45332 is the best representative of the cluster since it subsumes the config-only work in #45333 and extends it into actual heterogeneous model support.", "soft_edge_verdicts": [ { "left": "pull_request:45332", "right": "pull_request:45333", "accept": false, "reason": "Reject: #45333 appears to implement the per-layer configuration layer, while #45332 adds the downstream heterogeneous modeling/cache support that consumes it. They are complementary, not duplicate fixes, and would not be the same single PR." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null }, { "label": "pull_request", "component_index": 23, "component_count": 23, "review_unit_index": 1, "review_unit_count": 1, "status": "reviewed", "reason": null, "source": "llm", "cache_hit": false, "model": "gpt-5.4-mini?service_tier=flex", "cluster_id": "cluster-45415-2", "nodes": [ "pull_request:45415", "pull_request:45425" ], "soft_pairs": [ "pull_request:45415|pull_request:45425" ], "prepared_review_unit_hash": "4658a41dfe9952dc9cb92343291d9ac9c4ae34b486521a298e6b3549f59c695a", "component_budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5297, "estimated_input_tokens": 1325, "estimated_eval_tokens": 2906 }, "budget": { "node_count": 2, "item_count": 2, "soft_pair_count": 1, "serialized_chars": 5297, "estimated_input_tokens": 1325, "estimated_eval_tokens": 2906 }, "overflow_policy": "truncate_then_skip", "trimmed": false, "aggressively_trimmed": false, "split": false, "analyst_result": { "summary": "Two typing-related PRs, but they cover different scopes: one is a broad repo-wide typing sweep, the other is a focused modeling_utils typing/check-types update. They are not the same underlying change.", "confidence": 0.86, "canonical_issue_reason": null, "canonical_pr_reason": "PR #45425 is the better representative because it targets a specific typing fix centered on modeling_utils/check_types, rather than a broad multi-file typing sweep.", "best_issue_reason": null, "best_pr_reason": "PR #45425 is the best fit for the cluster since it is the more concrete, self-contained typing change; PR #45415 is broader and touches many unrelated files, so it is less suitable as the canonical representative.", "soft_edge_verdicts": [ { "left": "pull_request:45415", "right": "pull_request:45425", "accept": false, "reason": "Both are typing-related, but they do not appear to fix the same concrete code-path problem. #45415 is a broad typing enablement across many files, while #45425 is a focused modeling_utils/type-checking tweak; shared filenames are incidental and they would not naturally merge into one PR." } ] }, "evaluator_result": null, "evaluator_used": false, "retried": false, "accepted_nontrivial_soft_edge": false, "error_kind": null, "error_message": null } ] }