diff --git a/analysis/current/analysis-report-hybrid.json b/analysis/current/analysis-report-hybrid.json new file mode 100644 index 0000000000000000000000000000000000000000..71f10390197d6b7b3e60ff6748e7abfb366ebf9a --- /dev/null +++ b/analysis/current/analysis-report-hybrid.json @@ -0,0 +1,2002 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260418T150536Z", + "generated_at": "2026-04-18T15:42:30Z", + "evidence_quality": "full", + "llm_enrichment": true, + "meta_bugs": [ + { + "cluster_id": "cluster-43979-11", + "summary": "Cluster of 1 issues and 10 PRs centered on issue #43979.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43979, + "canonical_pr_number": 44007, + "issue_numbers": [ + 43979 + ], + "pr_numbers": [ + 43996, + 44007, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43996, + "right_pr_number": 44007, + "code_similarity": 0.179, + "size_similarity": 0.576, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.429, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44013, + "code_similarity": 0.122, + "size_similarity": 0.318, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.392, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44044, + "code_similarity": 0.245, + "size_similarity": 0.864, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.479, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44066, + "code_similarity": 0.225, + "size_similarity": 0.818, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.408, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44072, + "code_similarity": 0.14, + "size_similarity": 0.303, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.528, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44085, + "code_similarity": 0.216, + "size_similarity": 0.783, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44129, + "code_similarity": 0.163, + "size_similarity": 0.643, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.229, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44154, + "code_similarity": 0.153, + "size_similarity": 0.535, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.31, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44722, + "code_similarity": 0.225, + "size_similarity": 0.848, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.368, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44013, + "code_similarity": 0.19, + "size_similarity": 0.553, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.531, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44044, + "code_similarity": 0.186, + "size_similarity": 0.667, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.354, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44066, + "code_similarity": 0.188, + "size_similarity": 0.704, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.315, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44072, + "code_similarity": 0.212, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.708, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44085, + "code_similarity": 0.195, + "size_similarity": 0.735, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.318, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44129, + "code_similarity": 0.103, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.191, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44154, + "code_similarity": 0.238, + "size_similarity": 0.93, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.344, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44722, + "code_similarity": 0.178, + "size_similarity": 0.679, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.28, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44044, + "code_similarity": 0.126, + "size_similarity": 0.368, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.351, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44066, + "code_similarity": 0.127, + "size_similarity": 0.389, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.325, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44072, + "code_similarity": 0.29, + "size_similarity": 0.952, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.667, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44085, + "code_similarity": 0.131, + "size_similarity": 0.406, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.329, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44129, + "code_similarity": 0.07, + "size_similarity": 0.205, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.192, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44154, + "code_similarity": 0.177, + "size_similarity": 0.594, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.389, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44722, + "code_similarity": 0.118, + "size_similarity": 0.375, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.287, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44066, + "code_similarity": 0.25, + "size_similarity": 0.947, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.404, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44072, + "code_similarity": 0.136, + "size_similarity": 0.351, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.442, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44085, + "code_similarity": 0.24, + "size_similarity": 0.906, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.394, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44129, + "code_similarity": 0.147, + "size_similarity": 0.555, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.243, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44154, + "code_similarity": 0.17, + "size_similarity": 0.62, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.306, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44722, + "code_similarity": 0.257, + "size_similarity": 0.982, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.402, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44072, + "code_similarity": 0.133, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.393, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44085, + "code_similarity": 0.763, + "size_similarity": 0.957, + "file_overlap": 0.5, + "area_overlap": 0.825, + "patch_similarity": 0.887, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ] + } + ] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44129, + "code_similarity": 0.145, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.263, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44154, + "code_similarity": 0.174, + "size_similarity": 0.654, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.286, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44722, + "code_similarity": 0.907, + "size_similarity": 0.964, + "file_overlap": 1.0, + "area_overlap": 0.808, + "patch_similarity": 0.874, + "shared_filenames": [ + "src/transformers/models/codegen/modeling_codegen.py", + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/codegen/modeling_codegen.py", + "left_ranges": [ + [ + 245, + 266 + ] + ], + "right_ranges": [ + [ + 228, + 234 + ] + ] + }, + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44085, + "code_similarity": 0.137, + "size_similarity": 0.387, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44129, + "code_similarity": 0.074, + "size_similarity": 0.195, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.231, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44154, + "code_similarity": 0.175, + "size_similarity": 0.566, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.414, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44722, + "code_similarity": 0.124, + "size_similarity": 0.357, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.347, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44129, + "code_similarity": 0.141, + "size_similarity": 0.503, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.272, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44154, + "code_similarity": 0.18, + "size_similarity": 0.684, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.289, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44722, + "code_similarity": 0.728, + "size_similarity": 0.923, + "file_overlap": 0.5, + "area_overlap": 0.791, + "patch_similarity": 0.78, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44154, + "code_similarity": 0.099, + "size_similarity": 0.344, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.199, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44722, + "code_similarity": 0.146, + "size_similarity": 0.545, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.247, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44154, + "right_pr_number": 44722, + "code_similarity": 0.164, + "size_similarity": 0.631, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.255, + "shared_filenames": [], + "shared_file_areas": [] + } + ] + }, + { + "cluster_id": "cluster-41211-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41211.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 41211, + "canonical_pr_number": 44339, + "issue_numbers": [ + 41211 + ], + "pr_numbers": [ + 41356, + 44339 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 41356, + "right_pr_number": 44339, + "code_similarity": 0.164, + "size_similarity": 0.096, + "file_overlap": 0.3, + "area_overlap": 0.078, + "patch_similarity": 0.182, + "shared_filenames": [ + "docs/source/en/model_doc/deimv2.md", + "src/transformers/models/deimv2/__init__.py", + "src/transformers/models/deimv2/configuration_deimv2.py", + "src/transformers/models/deimv2/modeling_deimv2.py", + "tests/models/deimv2/__init__.py", + "tests/models/deimv2/test_modeling_deimv2.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/deimv2.md", + "left_ranges": [ + [ + 1, + 132 + ] + ], + "right_ranges": [ + [ + 1, + 65 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/__init__.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/configuration_deimv2.py", + "left_ranges": [ + [ + 1, + 74 + ] + ], + "right_ranges": [ + [ + 1, + 266 + ] + ] + }, + { + "filename": "tests/models/deimv2/test_modeling_deimv2.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 1753 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-36246-4", + "summary": "Cluster of 2 issues and 2 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43836, + "issue_numbers": [ + 36246, + 43824 + ], + "pr_numbers": [ + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43366-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43366.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43366, + "canonical_pr_number": 43757, + "issue_numbers": [ + 43366 + ], + "pr_numbers": [ + 43757, + 45500, + 45506 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43757, + "right_pr_number": 45500, + "code_similarity": 0.096, + "size_similarity": 0.149, + "file_overlap": 0.2, + "area_overlap": 0.005, + "patch_similarity": 0.031, + "shared_filenames": [ + "src/transformers/modeling_gguf_pytorch_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 462, + 474 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + } + ] + }, + { + "left_pr_number": 43757, + "right_pr_number": 45506, + "code_similarity": 0.096, + "size_similarity": 0.149, + "file_overlap": 0.2, + "area_overlap": 0.005, + "patch_similarity": 0.031, + "shared_filenames": [ + "src/transformers/modeling_gguf_pytorch_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 462, + 474 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + } + ] + }, + { + "left_pr_number": 45500, + "right_pr_number": 45506, + "code_similarity": 1.0, + "size_similarity": 1.0, + "file_overlap": 1.0, + "area_overlap": 1.0, + "patch_similarity": 1.0, + "shared_filenames": [ + "docs/source/en/model_doc/gpt_oss.md", + "src/transformers/integrations/ggml.py", + "src/transformers/modeling_gguf_pytorch_utils.py", + "tests/quantization/ggml/test_ggml.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/gpt_oss.md", + "left_ranges": [ + [ + 70, + 93 + ] + ], + "right_ranges": [ + [ + 70, + 93 + ] + ] + }, + { + "filename": "src/transformers/integrations/ggml.py", + "left_ranges": [ + [ + 89, + 109 + ] + ], + "right_ranges": [ + [ + 89, + 109 + ] + ] + }, + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + }, + { + "filename": "tests/quantization/ggml/test_ggml.py", + "left_ranges": [ + [ + 351, + 358 + ], + [ + 386, + 406 + ] + ], + "right_ranges": [ + [ + 351, + 358 + ], + [ + 386, + 406 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45081-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45081, + "canonical_pr_number": 45317, + "issue_numbers": [ + 45081 + ], + "pr_numbers": [ + 45086, + 45317 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45086, + "right_pr_number": 45317, + "code_similarity": 0.589, + "size_similarity": 0.34, + "file_overlap": 0.5, + "area_overlap": 1.0, + "patch_similarity": 0.136, + "shared_filenames": [ + "src/transformers/tokenization_utils_tokenizers.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/tokenization_utils_tokenizers.py", + "left_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ], + "right_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43698-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43698.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43698, + "canonical_pr_number": 43779, + "issue_numbers": [ + 43698 + ], + "pr_numbers": [ + 43779, + 43816 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43779, + "right_pr_number": 43816, + "code_similarity": 0.418, + "size_similarity": 0.538, + "file_overlap": 1.0, + "area_overlap": 0.02, + "patch_similarity": 0.02, + "shared_filenames": [ + "src/transformers/integrations/integration_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/integration_utils.py", + "left_ranges": [ + [ + 2227, + 2241 + ], + [ + 2303, + 2309 + ] + ], + "right_ranges": [ + [ + 2278, + 2291 + ], + [ + 2309, + 2322 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] + } + ], + "duplicate_issues": [ + { + "cluster_id": "cluster-36246-4", + "canonical_issue_number": 43824, + "duplicate_issue_numbers": [ + 36246 + ], + "reason": "Issues in cluster-36246-4 are treated as duplicates because they share closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "duplicate_prs": [ + { + "cluster_id": "cluster-36246-4", + "canonical_pr_number": 43836, + "duplicate_pr_numbers": [ + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-36246-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-41211-3", + "canonical_pr_number": 44339, + "duplicate_pr_numbers": [ + 41356 + ], + "target_issue_number": 41211, + "reason": "PRs in cluster-41211-3 are treated as duplicates because they converge on issue #41211 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43240-3", + "canonical_pr_number": 43251, + "duplicate_pr_numbers": [ + 43254 + ], + "target_issue_number": 43240, + "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43366-4", + "canonical_pr_number": 43757, + "duplicate_pr_numbers": [ + 45500, + 45506 + ], + "target_issue_number": 43366, + "reason": "PRs in cluster-43366-4 are treated as duplicates because they converge on issue #43366 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-43698-3", + "canonical_pr_number": 43779, + "duplicate_pr_numbers": [ + 43816 + ], + "target_issue_number": 43698, + "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43979-11", + "canonical_pr_number": 44007, + "duplicate_pr_numbers": [ + 43996, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "target_issue_number": 43979, + "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, + { + "cluster_id": "cluster-45081-3", + "canonical_pr_number": 45317, + "duplicate_pr_numbers": [ + 45086 + ], + "target_issue_number": 45081, + "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "best_issue": { + "cluster_id": "cluster-43366-4", + "issue_number": 43366, + "reason": "Issue #43366 is the strongest global issue candidate because it is open, belongs to a cluster with 4 artifacts, and carries 5 discussion comments plus 7 inbound references.", + "score": 157.0 + }, + "best_pr": { + "cluster_id": "cluster-43979-11", + "pr_number": 44007, + "reason": "PR #44007 is the strongest global PR candidate because it is open, belongs to a cluster with 11 artifacts, links to 1 issues, and carries 2 review/discussion events.", + "score": 238.886 + } +} diff --git a/analysis/current/analysis-report-hybrid.llm-reviews.json b/analysis/current/analysis-report-hybrid.llm-reviews.json new file mode 100644 index 0000000000000000000000000000000000000000..ad1972b821b55694c7847d20c20d71fecc98274b --- /dev/null +++ b/analysis/current/analysis-report-hybrid.llm-reviews.json @@ -0,0 +1,23066 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260418T150536Z", + "generated_at": "2026-04-18T15:42:30Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "dcd9f595a958e37b4890a6765530239dc1f6801a44b3ae39d5bee95b8f597333", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are not duplicates: one is about Tapas tokenization vs TensorFlow behavior, one is about a misleading private-model auth error, and one is a generic model-loading failure for bert-base-uncased. The only plausible cluster center is the broad model-loading report (#16618), but the soft links should be rejected.", + "confidence": 0.79, + "canonical_issue_reason": "#16618 is the broadest and most central of the three, focused on model loading, while the other two are narrower and clearly different problems.", + "canonical_pr_reason": null, + "best_issue_reason": "#16618 is the best representative for this cluster because it is the most general issue title and the only one plausibly related to the soft-similarity links.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Tapas tokenization mismatch is a different code path and symptom than failing to load bert-base-uncased; they are not the same underlying bug." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "A bad error when downloading a private model without login is an authentication/error-reporting issue, not the same as a generic model-loading failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30277", + "issue:35545", + "issue:36010", + "issue:41720", + "issue:42915", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43828", + "issue:44292", + "issue:44322", + "issue:44509", + "issue:44512", + "issue:44805", + "issue:44918", + "issue:45161", + "issue:45237", + "issue:45464" + ], + "soft_pairs": [ + "issue:44322|issue:45464", + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:43638|issue:44805", + "issue:35545|issue:36010", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918" + ], + "prepared_review_unit_hash": "56f2c4dcf5377f5ae1f4ecea8ce3c641dc1271d3fa3d5658198cfd0f2f793b44", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 13, + "serialized_chars": 14253, + "estimated_input_tokens": 3564, + "estimated_eval_tokens": 7384 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. The only near-duplicate pair is the v5 docs cleanup pair, but even that is not the same concrete change. Most other soft links are model-specific runtime errors that share broad symptoms, not the same underlying bug.", + "confidence": 0.7, + "canonical_issue_reason": "issue:44805 is the best representative of the most generic failure mode in the cluster: a shape/indexing runtime error. It is broader than the more model-specific zero3/BERT or Qwen-specific reports, so it is the most plausible anchor if one issue must represent the set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44805 is the strongest global representative because it is the least model-specific and closest to a reusable bug pattern. The rest are either narrower variants or unrelated runtime/doc issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44322", + "right": "issue:45464", + "accept": false, + "reason": "Both are Qwen3.5-related, but one is a config attribute error and the other is a streaming chat/completions failure; different code paths and symptoms." + }, + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Both mention Qwen/GPT-OSS-style model execution problems, but one is a dtype mismatch in preprocessing and the other is an AMD GPU compatibility failure." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "A torch.autocast dtype mismatch for Phi-tiny-MoE is not the same underlying bug as GPT-OSS failing on AMD GPUs." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Qwen-3 NVFP4 execution and GPT-OSS-on-AMD are different model/backends failures with no clear shared code path." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "Jamba + DeepSpeed ZeRO3 collective mismatch and ModernBERT ONNX export are unrelated bugs." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "ONNX export error for ModernBERT and Qwen3Moe FP8 failure are different features and failure modes." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "ModernBERT ONNX export and Qwen3 auto-device-map CUDA assert are unrelated model/runtime paths." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are v5 docs removals, but they target different removed entries and likely need separate documentation edits." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Both are indexing errors, but one is a DeepSpeed ZeRO3/BERT initialization issue while the other is a mask-shape mismatch; not the same concrete bug." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "ModernBERT export to ONNX and GenerationMixin import failure are distinct issues." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "DeepSpeed ZeRO3 + BertModel out-of-bounds index and a tensor mask shape mismatch are different error conditions and code paths." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "A generic mask/tensor indexing mismatch is not the same bug as GPT-OSS MoE tensor-parallel-only failure." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "TRL SFT unpacking failure for Qwen3.5 and a mask-shape IndexError are different training-time bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 2, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34634", + "issue:34928", + "issue:35545", + "issue:36331", + "issue:39290", + "issue:43039", + "issue:43596", + "issue:43638", + "issue:43975", + "issue:44292", + "issue:44457", + "issue:44661", + "issue:44805", + "issue:45081", + "issue:45161", + "issue:45237" + ], + "soft_pairs": [ + "issue:44661|issue:44805", + "issue:30277|issue:43638", + "issue:34634|issue:35545", + "issue:43596|issue:43638", + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43975|issue:45237", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:39290|issue:45081" + ], + "prepared_review_unit_hash": "a273f6ab58c2c009e9fa5a99599110ad7ea3bccb669713d081a342feb93f55f6", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 14, + "serialized_chars": 14841, + "estimated_input_tokens": 3711, + "estimated_eval_tokens": 7678 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. The only likely duplicate pair is the two DeepSpeed ZeRO3 + Bert IndexError reports (43596/43638); the rest describe unrelated models, tokenizer, training, or export issues.", + "confidence": 0.79, + "canonical_issue_reason": "Issue 43596 is the cleaner representative of the shared DeepSpeed ZeRO3/Bert zero-sized tensor IndexError; 43638 appears to be a narrower variant of the same failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "43596 best captures the underlying bug with a concise, general title and clear error signature, making it the best canonical issue for the one true duplicate pair.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Different subsystems and failure modes: tokenizer mapping/add-new-model support vs a mask/tensor shape mismatch IndexError." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "Both involve DeepSpeed, but the concrete errors differ: collective mismatch in Jamba-v01 vs zero-sized tensor IndexError in Bert init." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "Unrelated problems: BarkProcessor voice_preset handling vs ModernBERT ONNX export." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "Same concrete DeepSpeed ZeRO3/Bert initialization failure pattern with the same IndexError class; the non-pretrained Bert detail looks like a repro variant." + }, + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Different model/output issues: detokenization bug vs LoRA merge/save/reload output mismatch." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Different code paths: Liger Kernel cross_entropy fallback vs GPT-OSS MoE tensor-parallel behavior." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "Qwen NVFP4 runtime failure is unrelated to LoRA merge/reload consistency." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Both are tensor shape errors, but the contexts differ substantially: FSDP checkpointing recompute vs mask/indexing mismatch." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Different DeepSpeed-related errors and model paths; one is a weight dimensionality runtime error, the other a collective mismatch." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "No shared underlying bug: detokenization in DeepSeek vs GPT-OSS AMD GPU execution." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "Both mention Bert/GPT-OSS style distributed setups, but the specific failures are different and not the same code-path bug." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "Activation-checkpointing/FSDP recompute mismatch is unrelated to CustomTrainer's unexpected num_items_in_batch argument." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "DeepSpeed ZeRO3 Bert IndexError is unrelated to GPT-OSS MoE tensor-parallel failure." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Different tokenizer/runtime bugs: Gemma3 config missing attribute under vLLM vs Mistral regex patch crashing on tokenizer backend access." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 3, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30277", + "issue:34928", + "issue:36331", + "issue:41720", + "issue:41762", + "issue:43039", + "issue:43425", + "issue:43541", + "issue:43638", + "issue:43716", + "issue:43827", + "issue:43828", + "issue:44292", + "issue:44512", + "issue:44560", + "issue:44805", + "issue:44918", + "issue:45161" + ], + "soft_pairs": [ + "issue:43828|issue:45161", + "issue:43425|issue:44292", + "issue:43541|issue:43716", + "issue:44918|issue:45161", + "issue:34928|issue:41720", + "issue:43828|issue:44292", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:43039|issue:44918", + "issue:41762|issue:43638", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43716|issue:44918", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805" + ], + "prepared_review_unit_hash": "b355ccd8c574bcbbc1eea51b8caf2e88dbea5f1e555545e32673d0be126e1773", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 16, + "serialized_chars": 15284, + "estimated_input_tokens": 3821, + "estimated_eval_tokens": 7898 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous and does not form a broad duplicate set. The only strong near-duplicate is the DeepSpeed ZeRO-3 \"index 0 out of bounds for dimension 0 with size 0\" pair (41762, 43638); the rest are different bugs, models, or subsystems and should stay separate.", + "confidence": 0.87, + "canonical_issue_reason": "issue:41762 is the clearest representative of the shared ZeRO-3 zero-length indexing failure and is slightly more specific/reproducible than 43638.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:41762 is the best issue to anchor the one real duplicate-like pair in this set; it captures the common failure mode most clearly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Both mention model/runtime limitations around MoE/dtype/device behavior, but they describe different failures and code paths; not the same underlying bug." + }, + { + "left": "issue:43425", + "right": "issue:44292", + "accept": false, + "reason": "Torch-version incompatibility is not the same as a Qwen NVFP4 runtime failure; too broad and unrelated." + }, + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Both involve dtype-ish runtime problems, but one is a grouped_mm tracing error and the other is an image preprocessor/model mismatch; different root causes." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "Different model/trainer behavior: SFT embedding unpacking vs GPT-OSS TP failure. Shared MoE/model wording is not enough." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "Activation checkpointing/FSDP tensor-size mismatch and Qwen auto device mapping cudaErrorAssert are distinct distributed/runtime issues." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "One is a torch.autocast dtype mismatch on Phi-tiny-MoE, the other is a Qwen NVFP4 runtime error; not the same bug." + }, + { + "left": "issue:43716", + "right": "issue:44292", + "accept": false, + "reason": "Both are model/runtime dtype-related, but one is an image preprocessor mismatch and the other is a Qwen NVFP4 execution problem." + }, + { + "left": "issue:44292", + "right": "issue:44918", + "accept": false, + "reason": "Different failure modes and subsystems; no concrete evidence they share a root cause." + }, + { + "left": "issue:43039", + "right": "issue:44918", + "accept": false, + "reason": "Liger Kernel cross_entropy routing and TRL SFT embedding unpacking are unrelated changes/failures." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": true, + "reason": "Both report the same ZeRO-3 failure pattern: 'index 0 is out of bounds for dimension 0 with size 0' during model loading/training, making them very likely the same underlying bug." + }, + { + "left": "issue:43827", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs-related references to removed commands, but they are different documentation pages and not the same concrete issue." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Both involve model-specific runtime issues, but the reported errors and affected components differ (dtype mismatch vs StopIteration in video input handling)." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Image-preprocessor dtype mismatch and TRL SFT embedding unpacking are different bugs." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Liger Kernel cross_entropy dispatch and a torch.autocast dtype mismatch are not the same code-path problem." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch during Jamba training and a Trainer API signature mismatch are completely different issues." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Cross-entropy dispatch under Liger Kernel is unrelated to a mask/index shape mismatch error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 4, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34928", + "issue:36010", + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:43039", + "issue:43425", + "issue:43531", + "issue:43596", + "issue:43638", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:44512", + "issue:44560", + "issue:44863", + "issue:45070" + ], + "soft_pairs": [ + "issue:34928|issue:36010", + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:43039|issue:44560", + "issue:39290|issue:41720", + "issue:43866|issue:44863", + "issue:43854|issue:43866", + "issue:43425|issue:45070", + "issue:39290|issue:43531", + "issue:41093|issue:43638" + ], + "prepared_review_unit_hash": "f2f70eb1ca163cbf28eea131a8016f7aa406cf3084ead543432d729dc1c7acf3", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13481, + "estimated_input_tokens": 3371, + "estimated_eval_tokens": 6998 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: most items are unrelated regressions or docs issues that only share broad error patterns. A few pairs are superficially similar (especially zero3/index-error reports and Qwen3 sliding_window reports), but not close enough to be the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out for the whole cluster; the items span distinct bugs across checkpointing/FSDP, imports, vLLM/Gemma, masking/indexing, device mapping, DeepSpeed ZeRO-3, checkpoint corruption, docs, and config compatibility.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:41762 is the strongest representative of one real duplicate family because it gives a concrete, specific DeepSpeed ZeRO-3 index-out-of-bounds failure; among the listed items it is one of the clearest and most actionable reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "Different failure modes and subsystems: activation checkpointing/FSDP tensor-size mismatch vs a GenerationMixin import error." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Both involve ZeRO-3 index errors, but the affected model/setup and exact failure context differ; not enough evidence of the same underlying bug." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs-related, but one is about return_all_scores behavior and the other about a removed run command; different documentation issues." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross_entropy call vs Qwen3-vl-embedding video StopIteration are unrelated change areas and error paths." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Both mention model/runtime errors, but Gemma3 sliding_window_pattern with vLLM is a different bug than Qwen3 auto-device-mapping cudaErrorAssert." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Both are model-loading problems, but they concern different models and likely different root causes; not the same concrete bug." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "One is a unit-test load failure for GLM-4.7-Flash; the other is a corrupted Ovis2 checkpoint. Different models and failure causes." + }, + { + "left": "issue:43425", + "right": "issue:45070", + "accept": false, + "reason": "Torch 2.10 incompatibility is a broad dependency issue, while the pydantic PretrainedConfig field regression is a separate configuration-model bug." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Both reference sliding_window, but they are different models and different symptoms; not a single concrete fixable code-path problem." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Both are index-related, but one is a mask/tensor shape mismatch and the other is a ZeRO-3 BertModel index-out-of-bounds issue; too different to merge." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 5, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:39290", + "issue:41093", + "issue:41762", + "issue:43296", + "issue:43366", + "issue:43404", + "issue:43541", + "issue:43572", + "issue:43792", + "issue:43828", + "issue:43866", + "issue:44841", + "issue:45070", + "issue:45084", + "issue:45362", + "issue:45464" + ], + "soft_pairs": [ + "issue:43792|issue:45070", + "issue:45362|issue:45464", + "issue:43404|issue:43866", + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762", + "issue:43572|issue:45070", + "issue:43296|issue:43366" + ], + "prepared_review_unit_hash": "a52bc6fff58a03ef6e44031962dd0f1e20e164832b2a6aeaaf7d3c4feca32011", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12473, + "estimated_input_tokens": 3119, + "estimated_eval_tokens": 6494 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated model-specific regressions and API/config issues, not a true duplicate cluster. The only broadly similar items are the two Qwen3.5 chat-related reports, but they still appear to describe different failure modes.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45070 is the broadest, most general regression report in the set (v5.4.0 / PretrainedConfig pydantic break) and is the best single issue to use as a loose cluster anchor, though it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Different model and failure mode: Whisper loading/runtime issue vs a PretrainedConfig pydantic regression." + }, + { + "left": "issue:45362", + "right": "issue:45464", + "accept": false, + "reason": "Both involve Qwen3.5 and chat, but one is a crash in transformers chat while the other is a streaming chat/completions API failure on a different model size; not the same concrete bug." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Mistral3 weight-tying bug and Ovis2 checkpoint corruption are unrelated issues." + }, + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Grouped_mm tracing error in Mixtral is a different code path from a torch.autocast dtype mismatch in Phi-tiny-MoE." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "ImportError for GenerationMixin is unrelated to the Gemma3 sliding_window_pattern attribute error with vLLM." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Processor failure for Voxtral differs from a template-node compilation TypeError; no shared underlying bug is evident." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Mask shape mismatch during indexing is a different failure from zero-sized dimension errors while loading Gemma3 with DeepSpeed ZeRO-3." + }, + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "Missing pad_token_idx in StableLmConfig is a specific config field regression, not the broader PretrainedConfig pydantic-model break." + }, + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "PaddleOCR-VL load failure in vLLM and GGUF gpt-oss support are different model/support requests, not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 6, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39290", + "issue:41093", + "issue:43054", + "issue:43366", + "issue:43531", + "issue:43541", + "issue:43596", + "issue:43742", + "issue:43828", + "issue:43994", + "issue:44291", + "issue:44387", + "issue:44589", + "issue:44661", + "issue:45005", + "issue:45070", + "issue:45161", + "issue:45464" + ], + "soft_pairs": [ + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:43828|issue:45464", + "issue:44291|issue:45070", + "issue:43531|issue:45070", + "issue:43366|issue:45464", + "issue:44661|issue:45464", + "issue:43541|issue:44589", + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:41093|issue:43596" + ], + "prepared_review_unit_hash": "6716d1a3fef821f8f949b815891237d8fa3bcc3dd83c3704ddafe2d18f250a53", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13714, + "estimated_input_tokens": 3429, + "estimated_eval_tokens": 7114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a loose set of unrelated bugs and feature requests across different model families and failure modes. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR cluster.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "Different problems: int4 quantization OOM vs tied-weights handling for translation models in v5." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Both mention MoE, but one is a Mixtral torch-dynamo grouped_mm runtime error and the other is TP-only failure on GPT-OSS MoE; not the same bug." + }, + { + "left": "issue:43828", + "right": "issue:45464", + "accept": false, + "reason": "Unrelated failures: autocast dtype mismatch in Phi-tiny-MoE vs streaming chat/completions API failure on Qwen3.5." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "Different regressions: unexpected _is_hf_initialized argument during init_empty_weights vs pydantic PretrainedConfig field breakage." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "Sliding-window handling for Qwen3-MoE is unrelated to the v5.4.0 pydantic config-field issue." + }, + { + "left": "issue:43366", + "right": "issue:45464", + "accept": false, + "reason": "GPT-OSS GGUF architecture support request is not the same as a streaming inference API failure." + }, + { + "left": "issue:44661", + "right": "issue:45464", + "accept": false, + "reason": "Tokenizer mapping/add-new-model-like failure is a model-registration bug, not the Qwen3.5 streaming bug." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Grouped_mm tracing error and Float8 storage lookup failure are distinct runtime issues." + }, + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Gemma3 sliding_window_pattern attribute error and MobileLLM key error are different loading/config bugs." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both involve SigLIP2, but one reports poor embedding quality while the other reports nonsensical AutoModel/pipeline outputs; not clearly the same defect." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Mask/tensor shape mismatch and deepspeed zero3 BertModel index error are different index errors with different contexts." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 7, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39186", + "issue:40990", + "issue:42915", + "issue:43404", + "issue:43541", + "issue:43645", + "issue:43873", + "issue:43975", + "issue:44292", + "issue:44360", + "issue:44492", + "issue:44512", + "issue:44530", + "issue:44863", + "issue:44918", + "issue:44960", + "issue:45005", + "issue:45399" + ], + "soft_pairs": [ + "issue:42915|issue:43541", + "issue:39186|issue:44918", + "issue:40990|issue:44960", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005", + "issue:43873|issue:45005", + "issue:42915|issue:45399", + "issue:44360|issue:44512", + "issue:43975|issue:44292", + "issue:44492|issue:44512" + ], + "prepared_review_unit_hash": "b6126ea0ddb933391761634cc9223b0f4da11ae39ced67338a4b118b407fcccd", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 13740, + "estimated_input_tokens": 3435, + "estimated_eval_tokens": 7126 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: model-specific runtime/loading bugs, one quantization/offloading issue, and a couple of documentation nits. None of the soft-similarity pairs look like the same underlying bug/change, so I would not merge any of them as duplicates. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "No true canonical duplicate stands out because the reports cover different models, error paths, and even docs-only changes. If forced to pick the closest umbrella, issue 45005 is the broadest v5/tied-weights report, but it does not represent most of the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45005 is the best representative only in a loose sense: it is the broadest and most umbrella-like item here, centered on a v5 regression around tied weights. Even so, it is not a good fit for the majority of the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:43541", + "accept": false, + "reason": "Different models and failures: FineGrainedFP8Config setup vs torch-dynamo grouped_mm tracing. Not the same bug path." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP 2-D weight runtime error and Qwen3.5 embedding unpacking in TRL are unrelated paths." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "Perplexity degradation on gpt-oss and GLM5 loading/runtime behavior do not point to the same underlying issue." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "Both mention tied weights, but the concrete cases differ too much (Mistral3 image-text-to-text vs translation models) to treat as one fix." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Custom model initialization in Jupyter is unrelated to the flash-attn2 fallback gating problem." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash is a different code path from tied-weight regressions." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint-loading failure is model-loading specific and not the same as the tied-weights regression." + }, + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Quantization/offloading behavior is a different subsystem from tied-weight handling." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "Qwen3Moe FP8 failure and flash-attn2 fallback checks are unrelated." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "A DSA indexer discussion is not the same as a docs note about a removed command." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "DeepSeek detokenization and Qwen NVFP4 runtime errors are different model-specific bugs." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "Docs typo in cache strategies and docs mentioning a removed command are separate documentation changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 8, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36331", + "issue:38175", + "issue:39692", + "issue:41553", + "issue:41762", + "issue:43054", + "issue:43644", + "issue:43828", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:43950", + "issue:44403", + "issue:44534", + "issue:44589", + "issue:44928" + ], + "soft_pairs": [ + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:43828|issue:44928", + "issue:39692|issue:43054", + "issue:41553|issue:43883", + "issue:43866|issue:44403", + "issue:36331|issue:41762", + "issue:41553|issue:43881", + "issue:43872|issue:44589", + "issue:38175|issue:43054" + ], + "prepared_review_unit_hash": "b9b5daa647367ccba6f14a4a53934f014f7e99d458cdc9cd663cf1057d2a565a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12847, + "estimated_input_tokens": 3212, + "estimated_eval_tokens": 6680 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated issues; the only clear duplicate subcluster is the non-persistent buffer corruption regression reported in 43644/43950/44534. All other soft pairs look superficially similar but describe different bugs or model-specific failures.", + "confidence": 0.95, + "canonical_issue_reason": "issue:43950 is the strongest canonical for the buffer-corruption subcluster: it names the regression precisely (`from_pretrained()` corrupting `register_buffer(persistent=False)` buffers) and is the most specific/root-cause-oriented report among the three matching titles.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43950 is the best representative overall because it most clearly describes the actual bug and is the most actionable anchor for the duplicate set; the remaining items in the cluster are not duplicates of it.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Same underlying regression: both report non-persistent buffers being filled/corrupted in Transformers v5." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Same bug and same code path: v5 regression corrupting non-persistent buffers during load." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Different failures: autocast dtype mismatch in Phi MoE vs RLHF gradient explosion from 3D position_ids/SDPA fallback." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "Both involve SigLIP2, but one is documentation/example errors and the other is degraded text embeddings; not the same bug." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Unrelated model-loading problems: Voxtral tokenizer error message vs Molmo missing `all_tied_weights_keys`." + }, + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "Corrupted checkpoint for Ovis2 is not the same as generic loading noise." + }, + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "Different errors and subsystems: Trainer API argument mismatch vs Gemma3 load failure under ZeRO-3." + }, + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Different model-specific loading issues with no evidence of the same root cause." + }, + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Both are bitsandbytes/storage-related, but they describe different exceptions and likely different incompatibilities." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": false, + "reason": "Same model family only; zero probabilities is a different symptom from worse text embeddings." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 9, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41762", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43054", + "issue:43493", + "issue:43643", + "issue:43782", + "issue:43856", + "issue:43881", + "issue:43883", + "issue:44368", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44960", + "issue:45020" + ], + "soft_pairs": [ + "issue:42947|issue:44387", + "issue:43881|issue:43883", + "issue:43054|issue:43493", + "issue:44403|issue:44451", + "issue:42947|issue:43856", + "issue:44403|issue:44488", + "issue:43643|issue:45020", + "issue:43782|issue:44960", + "issue:42491|issue:44368", + "issue:41762|issue:42915" + ], + "prepared_review_unit_hash": "c3bd6576ee565a6ea24f91305281adb5d5d4d2456ee8c821819beb26ea5ab8f6", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13331, + "estimated_input_tokens": 3333, + "estimated_eval_tokens": 6922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Only one soft pair looks like a true duplicate: the SigLIP2 text-embedding regression. Most other pairs share a subsystem but not the same concrete failure or code path.", + "confidence": 0.88, + "canonical_issue_reason": "issue:43493 is the clearest root-cause report for the SigLIP2 problem; issue:43054 reads like the user-facing symptom, while 43493 names the HF-vs-JAX implementation discrepancy directly.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43643 is the most actionable standalone issue in the set: it has a specific API surface (`AutoConfig.from_pretrained` with `trust_remote_code=True`) and a crisp, reproducible failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both are memory-related, but one is a LoRA/gradient-checkpointing correctness issue and the other is int4 quantization reserved-memory growth leading to OOM; different bugs." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different models and different failures (`glm-4v-9b` load failure vs Molmo missing `all_tied_weights_keys`); not the same underlying issue." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": true, + "reason": "Same SigLIP2 symptom space: 43054 reports worse text embeddings, and 43493 identifies the likely root cause as an HF vs JAX implementation discrepancy." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "44403 is about loading noise/warnings, while 44451 is a concrete model load failure; not the same bug." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Both mention Qwen3 MoE and memory, but one is about LoRA checkpointing behavior and the other about training memory inefficiency; too different to merge." + }, + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "Generic loading noise does not match a specific inability to load a model; different failure modes." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "Related to `trust_remote_code`, but 45020 is a broad regression report while 43643 is a specific missing-fields bug in `AutoConfig`; not enough evidence of the same concrete code-path." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different model families and different errors (`weight_only=True` load error vs a generic GLM5 issue); not the same underlying change." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "One is a compatibility break for a trained LoRA on hf4.x/hf5.x, the other is a warning about `tie_word_embeddings`; same ecosystem, but not the same bug." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Different models and different failure modes (`IndexError` under ZeRO-3 vs FineGrainedFP8Config failure); not a duplicate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 10, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43054", + "issue:43278", + "issue:43792", + "issue:43824", + "issue:43827", + "issue:43872", + "issue:43975", + "issue:44220", + "issue:44451", + "issue:44488", + "issue:44509", + "issue:44661", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45356", + "issue:45399" + ], + "soft_pairs": [ + "issue:43975|issue:44451", + "issue:45020|issue:45356", + "issue:43824|issue:43872", + "issue:44661|issue:45020", + "issue:43975|issue:44488", + "issue:43054|issue:43278", + "issue:44991|issue:45399", + "issue:43792|issue:44220", + "issue:45081|issue:45399", + "issue:43827|issue:44509" + ], + "prepared_review_unit_hash": "6ac239300b4ce6ad7c573470d7e15d7afead875758e1345a30beef9519e26699", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12932, + "estimated_input_tokens": 3233, + "estimated_eval_tokens": 6722 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated Transformers regression/issues around model loading, tokenizers, docs, and audio/vision paths. Only the two docs issues about removed pipeline task references look like true duplicates; the rest should stay separate.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45020 is the broadest and most central open report in the set, covering recent-version regressions that break model loading via remote code, so it works best as the umbrella issue for this mixed cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the strongest global representative because it is broad, actively open, and describes the most general failure mode among the issues here.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "Different symptoms and model families: one is a detokenization bug for DeepSeek-Coder, the other is a tokenizer load failure for ScandiBERT." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Both concern regressions, but the concrete bugs differ: generic remote-code loading breakage vs. Kimi-K2.5 tokenizer codec / warning behavior." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "ImportError for a specific VL class and a bitsandbytes Int8Params constructor mismatch are unrelated failures in different subsystems." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "The first is an add-new-model-like tokenizer-mapping edge case; the second is a broader remote_code loading regression. Not the same bug." + }, + { + "left": "issue:43975", + "right": "issue:44488", + "accept": false, + "reason": "Both involve loading or output issues for different models, but the reported failures are not the same code path or symptom." + }, + { + "left": "issue:43054", + "right": "issue:43278", + "accept": false, + "reason": "A quality regression in SigLIP2 text embeddings is not the same as a BF16-to-FP32 dtype mismatch in evaluation." + }, + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer loading for a specific model and flash-attn fallback selection are unrelated problems." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "The Whisper model runtime failure may involve fbank extraction, but the issue titles and descriptions indicate different concrete bugs and no clear shared fix." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Mistral regex patch crashes during tokenizer loading; the other issue is about flash-attn2 fallback being blocked by checks. Different paths, not duplicates." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Both report the same docs drift: references to removed v5 pipeline tasks (summarization/translation/text2text-generation) still appearing in documentation." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 11, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41553", + "issue:42915", + "issue:43257", + "issue:43404", + "issue:43742", + "issue:43854", + "issue:43866", + "issue:43872", + "issue:43940", + "issue:43950", + "issue:44291", + "issue:44488", + "issue:44863", + "issue:44991", + "issue:45020", + "issue:45305", + "issue:45313", + "issue:45399" + ], + "soft_pairs": [ + "issue:43404|issue:43950", + "issue:42915|issue:45020", + "issue:41553|issue:44991", + "issue:43257|issue:43866", + "issue:43742|issue:45399", + "issue:43742|issue:45020", + "issue:43872|issue:44291", + "issue:43940|issue:45313", + "issue:42915|issue:43872", + "issue:41553|issue:44488", + "issue:43854|issue:44863", + "issue:43257|issue:45305" + ], + "prepared_review_unit_hash": "ff1c2f92c0d259e8bb0e73ad84c6fb17446b5118b99a4c71aa1db4a161f95b0d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 14029, + "estimated_input_tokens": 3508, + "estimated_eval_tokens": 7272 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are separate model-loading/regression reports. The only strong duplicate match is the `_is_hf_initialized` / `init_empty_weights` TypeError pair; the rest are too model-specific or describe different failure modes.", + "confidence": 0.91, + "canonical_issue_reason": "issue:44291 is the clearest, most general report of the shared `_is_hf_initialized` regression and best represents the accepted duplicate pair.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44291 is the best global anchor because it states the underlying incompatibility most directly and broadly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "Both are loading-related, but one is about lm_head weight tying in Mistral3 while the other is a `from_pretrained()` buffer corruption regression; different bugs and code paths." + }, + { + "left": "issue:42915", + "right": "issue:45020", + "accept": false, + "reason": "Qwen3Moe FP8 failure vs. remote-code loading breakage are different problems, only loosely related by model loading." + }, + { + "left": "issue:41553", + "right": "issue:44991", + "accept": false, + "reason": "Both mention tokenizer loading, but they target different models and likely different tokenizer issues; not the same underlying bug." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "Qwen3 MOE weight conversion under accelerate+deepspeed is unrelated to a corrupted Ovis2 checkpoint." + }, + { + "left": "issue:43742", + "right": "issue:45399", + "accept": false, + "reason": "A MobileLLM key error and a flash-attn2 fallback check issue are distinct failures." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "MobileLLM key error is not the same as the broad remote_code regression." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": true, + "reason": "These describe the same `_is_hf_initialized` TypeError regression during model loading / `init_empty_weights`, with one being the bitsandbytes manifestation." + }, + { + "left": "issue:43940", + "right": "issue:45313", + "accept": false, + "reason": "Both are Qwen DeepSpeed ZeRO-3 loading failures, but the model variants and reported missing-weight symptoms differ too much to treat as the same concrete bug." + }, + { + "left": "issue:42915", + "right": "issue:43872", + "accept": false, + "reason": "FP8 config failure for Qwen3Moe is unrelated to the bitsandbytes `_is_hf_initialized` keyword incompatibility." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Different tokenizer-loading failures for different models; no evidence of the same root cause." + }, + { + "left": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "GLM-4.7-Flash test loading failure and NemotronH checkpoint loading failure are separate model-specific issues." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Weight conversion/loading under DeepSpeed is not the same as gradient averaging with GAS and `model_accepts_loss_kwargs=True`." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 12, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:36331", + "issue:41093", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43828", + "issue:43872", + "issue:44479", + "issue:44530", + "issue:44560", + "issue:44589", + "issue:44805", + "issue:44863", + "issue:44910", + "issue:44918", + "issue:45084" + ], + "soft_pairs": [ + "issue:36331|issue:41093", + "issue:41093|issue:44805", + "issue:44479|issue:44560", + "issue:44530|issue:44863", + "issue:43872|issue:45084", + "issue:43366|issue:43828", + "issue:24643|issue:30064", + "issue:43366|issue:43531", + "issue:43550|issue:44910", + "issue:44560|issue:44918", + "issue:43366|issue:44589" + ], + "prepared_review_unit_hash": "5d0924a5d684775dd7223fe15100e68bd9f93fbc61a1e2668242aae0dce121e8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13761, + "estimated_input_tokens": 3441, + "estimated_eval_tokens": 7138 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One likely duplicate pair stands out: the two generic mask/tensor shape-mismatch IndexError reports. The remaining items describe different models, error types, or subsystems and should stay separate.", + "confidence": 0.76, + "canonical_issue_reason": "issue:44805 is the best canonical issue for the accepted duplicate pair because it reports the same mask-vs-indexed-tensor shape mismatch as issue:41093 and provides the fuller, later report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44805 is the strongest representative issue in this set: it matches the duplicate mask-shape bug and has the most detailed report of that failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "Different failures: a Trainer API keyword-argument breakage vs a tensor mask shape mismatch. Not the same underlying bug." + }, + { + "left": "issue:41093", + "right": "issue:44805", + "accept": true, + "reason": "Same concrete error family: both report an IndexError from a mask shape not matching the indexed tensor shape, indicating the same underlying bug." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": false, + "reason": "Both involve video/Qwen models, but the reported regressions and error paths differ; too little evidence they are the same bug." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "Different subsystems and symptoms: PagedAttentionCache linear_attention crash vs inability to load NemotronH checkpoints." + }, + { + "left": "issue:43872", + "right": "issue:45084", + "accept": false, + "reason": "Unrelated errors: bitsandbytes constructor incompatibility vs a torch compile/template-node TypeError." + }, + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "No shared concrete code-path problem; one is GGUF architecture support, the other is a dtype mismatch under autocast." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "Completely different areas: DeepSpeed training weight-dimension error vs image processor segmentation-map handling." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "Both mention model support issues, but they concern different models and different failure modes." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different attention/compile failures on different models; not enough evidence of the same underlying code-path bug." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "One is a video StopIteration in Qwen3-vl-embedding, the other is an SFT trainer embedding-unpacking failure; distinct errors." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "Different problem classes: GGUF architecture support vs missing Float8 storage type." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 13, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:38175", + "issue:42617", + "issue:42915", + "issue:43366", + "issue:43638", + "issue:43645", + "issue:43646", + "issue:43824", + "issue:43950", + "issue:43957", + "issue:43994", + "issue:44560", + "issue:44661", + "issue:44805", + "issue:44877", + "issue:45030", + "issue:45325" + ], + "soft_pairs": [ + "issue:43645|issue:43646", + "issue:43950|issue:45325", + "issue:43824|issue:43957", + "issue:42915|issue:43957", + "issue:42617|issue:43366", + "issue:38175|issue:43994", + "issue:44560|issue:44805", + "issue:43646|issue:43950", + "issue:36010|issue:42915", + "issue:43638|issue:44661", + "issue:44877|issue:45030" + ], + "prepared_review_unit_hash": "fad29bbafc79ebadd92068896db66da9b982d88e689f972a53b631b7169a8877", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13806, + "estimated_input_tokens": 3452, + "estimated_eval_tokens": 7160 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous, but there are two clear duplicate-like issue pairs: the Transformers 5.0 custom model initialization regression, and the Siglip2 bad-output regression. A third pair about stricter config validation also looks likely to share the same underlying loading regression. The rest look unrelated.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43646 is the broadest and cleanest representative of the custom-model-initialization regression; issue:43645 is the narrower Jupyter-specific report.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue:43646 is the best overall representative among the cluster items because it states the core custom-model initialization breakage without extra environment-specific noise.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43645", + "right": "issue:43646", + "accept": true, + "reason": "Same underlying regression: Transformers 5.0 breaks custom model initialization; 43645 just adds the Jupyter notebook context." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Both involve model-loading behavior, but the symptoms and code paths differ: non-persistent buffer corruption vs. Qwen2.5-VL rope/position-id scaling." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Different failures: one is an import/export issue for a missing model class, the other is a meta-device loading regression for some models." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Both are loading-related, but the concrete problems differ too much: FineGrainedFP8Config failure vs. meta-device initialization breakage." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "Unrelated topics: a 3d_parallel.py runtime failure versus GGUF support for gpt-oss architecture." + }, + { + "left": "issue:38175", + "right": "issue:43994", + "accept": true, + "reason": "Same model-family regression with the same user-visible symptom class: Siglip2 produces invalid/garbled outputs, described as zero probabilities in one report and nonsensical results in the other." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Both are runtime errors, but they point to different multimodal tensor-shape issues and do not clearly share the same bug." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": false, + "reason": "These are separate regressions: custom model initialization in Transformers 5.0 versus silent corruption of non-persistent buffers during from_pretrained()." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "Different areas and failures: a GenerationMixin import error versus a FineGrainedFP8Config model-load failure." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different bug classes: a zero-sized tensor/indexing failure under deepspeed zero3 versus add-new-model-like failing inside TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": true, + "reason": "Both report the same strict config-validation regression preventing model config loading; the specific model names differ, but the underlying change is the same." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 14, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:42491", + "issue:43317", + "issue:43596", + "issue:43644", + "issue:43708", + "issue:43720", + "issue:44075", + "issue:44451", + "issue:44488", + "issue:44534", + "issue:44661", + "issue:44756", + "issue:44960", + "issue:44977", + "issue:45313", + "issue:45357" + ], + "soft_pairs": [ + "issue:43596|issue:44661", + "issue:43317|issue:44756", + "issue:43644|issue:44534", + "issue:38175|issue:39692", + "issue:45313|issue:45357", + "issue:44960|issue:44977", + "issue:44451|issue:44488", + "issue:43708|issue:44075", + "issue:42491|issue:43720" + ], + "prepared_review_unit_hash": "8021c8db19b0c4d0f9358d4cc7ad563c29e3dc9bc13454a27b53ac6cb3ee1d2a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13002, + "estimated_input_tokens": 3251, + "estimated_eval_tokens": 6758 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One clear duplicate pair is the Transformers v5 non-persistent buffer regression; the other soft pairs are only loosely related by subsystem or model family and should stay separate.", + "confidence": 0.88, + "canonical_issue_reason": "issue:43644 is the better canonical issue for the duplicate pair: it is older and more specific about the regression ('v5.0.0 fills non-persistent buffers with junk'), while issue:44534 is a later rewording of the same bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43644 is the strongest issue candidate overall because it describes a concrete, reproducible regression and has a clear duplicate counterpart, making it the best anchor for triage.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "Different bugs: one is a DeepSpeed ZeRO-3/BertModel indexing failure, the other is an add-new-model-like tokenizer mapping failure." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "Both mention loading/offload pain, but the concrete problems differ: dequantized model gpu+cpu offload failure vs Strix Halo mmap OOM avoidance." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": true, + "reason": "Same underlying regression in Transformers v5 about non-persistent buffers being filled with junk; titles are near-paraphrases of the same bug." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "SigLIP2 model probability issue is not the same as a documentation-example mismatch and quantization failure; different concrete failures." + }, + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Different code paths and symptoms: ZeRO-3 weight loading for language_model vs save_pretrained writing incorrect visual encoder keys." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Different models and failures; no evidence they share the same concrete bug or change." + }, + { + "left": "issue:44451", + "right": "issue:44488", + "accept": false, + "reason": "Both are BERT load failures, but they concern different models and there is no clear evidence of the same underlying defect." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Unrelated training-resume step-count bug vs optimizer SGD arguments not being applied." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "Different regressions in different areas: Qwen3 MoE LoRA compatibility vs BitNet packed-weight loading." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 15, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:41720", + "issue:42491", + "issue:42915", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:43994", + "issue:44387", + "issue:44488", + "issue:44661", + "issue:44683", + "issue:44756", + "issue:44991", + "issue:45276", + "issue:45362", + "issue:45406" + ], + "soft_pairs": [ + "issue:43856|issue:43873", + "issue:41720|issue:42915", + "issue:43854|issue:43931", + "issue:44488|issue:44991", + "issue:39692|issue:43994", + "issue:43856|issue:44387", + "issue:45362|issue:45406", + "issue:44661|issue:44683", + "issue:42491|issue:45276", + "issue:42915|issue:44756" + ], + "prepared_review_unit_hash": "9fe0264396228b40520412c4b14d56a288d1f28a4aa25642843bfa00e8167148", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13311, + "estimated_input_tokens": 3328, + "estimated_eval_tokens": 6912 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated models, loading paths, quantization/offloading, tokenizer registration, and UI/serve code. None of the soft pairs looks like the same underlying bug, so no duplicate group stands out.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits this cluster because the items are about different models and different failure modes, not one shared bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global canonical candidate; the cluster is too mixed for one representative issue to cover it well.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "Both mention quantization/memory, but one is Qwen3 MoE training memory use and the other is offloading behavior during quantization; different bugs and code paths." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Both involve Qwen3 family failures, but one is auto device mapping cudaErrorAssert and the other is FineGrainedFP8Config loading; not the same concrete issue." + }, + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading problems, but they affect different models with different symptoms (unit-test load failure vs weight-shape mismatch)." + }, + { + "left": "issue:44488", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-load failures, but the affected models differ and there is no clear evidence of one shared underlying tokenizer bug." + }, + { + "left": "issue:39692", + "right": "issue:43994", + "accept": false, + "reason": "Both are SigLIP2-related, but one is a docs example with model/processor mismatch plus quantization failure, while the other reports nonsensical inference results in AutoModel/pipeline usage." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "Both discuss memory under quantization, but one is offloading behavior and the other is increased CUDA reserved memory causing OOM; not the same fix." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different models and different surfaces: Qwen3.5 chat crashes versus Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Completely unrelated: tokenizer-mapping registration bug versus compiled flex_attention failing on torch >= 2.9." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Different models and unrelated behaviors: Qwen3 MoE LoRA compatibility across versions versus Gemma4 token embedding resize not propagating." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "Qwen3Moe FP8 loading failure and Strix Halo mmap OOM are unrelated issues with no shared code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 16, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:42491", + "issue:43257", + "issue:43404", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:44164", + "issue:44291", + "issue:44387", + "issue:45276", + "issue:45335" + ], + "soft_pairs": [ + "issue:43257|issue:43854", + "issue:43644|issue:43645", + "issue:42491|issue:43931", + "issue:45276|issue:45335", + "issue:42491|issue:43856", + "issue:43404|issue:43454", + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:43873|issue:44387" + ], + "prepared_review_unit_hash": "c3024abf2eaf56089aeb1469a11b0a75d084357997f67f4096630ea22ada8e0f", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12588, + "estimated_input_tokens": 3147, + "estimated_eval_tokens": 6550 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly a bag of unrelated Transformers issues. None of the soft pairs look like clear duplicates with the same concrete bug, though the Gemma/t5Gemma resize-token-embeddings pair is the closest match.", + "confidence": 0.67, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole set; the items span several unrelated bugs. If forced to pick the closest anchor, issue #45276 is the strongest representative of the most similar pair.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45276 is the best global issue candidate because it sits in the strongest near-duplicate pair and has the broadest wording of the resize_token_embeddings regression.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Both are Qwen/GLM model-loading problems, but they concern different models and different failure modes; not the same underlying bug." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "One is about non-persistent buffers getting junk values, the other about custom model initialization in Jupyter; related release regressions but distinct code paths." + }, + { + "left": "issue:42491", + "right": "issue:43931", + "accept": false, + "reason": "Different symptoms and targets: a LoRA compatibility break on hf5.x vs a weight-shape mismatch for a specific Qwen3-VL model." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": false, + "reason": "Both mention resize_token_embeddings, but they target different model architectures and different missing updates; not clearly one duplicate bug." + }, + { + "left": "issue:42491", + "right": "issue:43856", + "accept": false, + "reason": "Both involve Qwen3 MoE, but one is a cross-version LoRA usability issue and the other is a training memory-efficiency issue." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": false, + "reason": "Both are about lm_head tying in multimodal models, but they are different model families and likely separate implementation fixes." + }, + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "An import path break for GenerationMixin is unrelated to Whisper pipeline return_language behavior." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "Both touch save/load initialization, but extra_state handling and unexpected _is_hf_initialized argument are different failures." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Both concern quantization memory/offloading, but one is offloading malfunction and the other is increased reserved CUDA memory/OOM; different concrete bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 17, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:41720", + "issue:42947", + "issue:43381", + "issue:43425", + "issue:43854", + "issue:43957", + "issue:44155", + "issue:44291", + "issue:44292", + "issue:44387", + "issue:44402", + "issue:44488", + "issue:44756", + "issue:44912", + "issue:44960", + "issue:45216" + ], + "soft_pairs": [ + "issue:44402|issue:44488", + "issue:41720|issue:44155", + "issue:44387|issue:44756", + "issue:39692|issue:44960", + "issue:43425|issue:43957", + "issue:42947|issue:43381", + "issue:43854|issue:45216", + "issue:44292|issue:44912", + "issue:44291|issue:44756" + ], + "prepared_review_unit_hash": "7ece9605aa9003cc6f75ca36360af41156f01681905054d055e19675dc27bbbc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12485, + "estimated_input_tokens": 3122, + "estimated_eval_tokens": 6500 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a mix of unrelated issues across loading, quantization, checkpointing, and model-specific regressions. None of the soft pairs look like the same underlying bug/change, so I would not merge any of them.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue: the items span distinct bugs and regressions, so there is no true duplicate anchor for the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "If one standalone report had to be kept as the representative, issue 44387 is the strongest candidate because it is concrete and well-scoped; however, it does not subsume the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Both concern model loading failures, but one is specifically about tokenizer vocab inconsistencies while the other is a separate model-load report; the root cause is not clearly the same." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "Completely different problems: Qwen3 auto device mapping CUDA assert vs AudioFlamingo3 batched inference token/embedding leakage." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "Int4 quantization OOM is unrelated to disabling mmap on Strix Halo; different failure modes and different triggers." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "SigLIP2 docs/model-processor mismatch and quantization failure is unrelated to a GLM5 issue." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Both mention loading/compatibility, but one is a torch version incompatibility report and the other is a meta-device loading regression for specific models; not the same bug." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Both involve gradient checkpointing, but one is about PEFT LoRA not enabling it effectively and the other is about checkpointing being invalid in eval mode; separate issues." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "Different models and different regressions: GLM-4.7-Flash unit-test loading vs Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Both are quantization-related model-loading reports, but they affect different models and different quantization paths (NVFP4 vs MXFP4 fallback)." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "init_empty_weights argument mismatch is unrelated to mmap-induced OOM on Strix Halo; different code paths and symptoms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 18, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43381", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43957", + "issue:44164", + "issue:44402", + "issue:44451", + "issue:44479", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44928", + "issue:44960", + "issue:45276", + "issue:45292", + "issue:45335" + ], + "soft_pairs": [ + "issue:45276|issue:45292", + "issue:44743|issue:44960", + "issue:43957|issue:44756", + "issue:43381|issue:44928", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:44479|issue:44877", + "issue:45292|issue:45335", + "issue:43645|issue:44164", + "issue:44402|issue:44451" + ], + "prepared_review_unit_hash": "27f4dec53feec6e06ee90a3bb34d9afaef71d7a0e627c212fe91e110cec1ea4d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12814, + "estimated_input_tokens": 3204, + "estimated_eval_tokens": 6664 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The only strong duplicate family here is the resize_token_embeddings/output_embeddings regression around Gemma/T5Gemma. The other soft pairs look like unrelated bugs in different subsystems and should stay separate.", + "confidence": 0.79, + "canonical_issue_reason": "issue 45292 is the broadest clean statement of the resize_token_embeddings/output_embeddings bug and best represents the related Gemma/T5Gemma reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue 45292 is the best overall representative because it captures the shared resizing bug without being too model-specific, while still covering the related variants.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "Same underlying resize_token_embeddings failure: both report output_embeddings not being updated, with 45276 just adding a Gemma4-specific manifestation." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "Different bugs: one is recurrent state reset with cache in modular_qwen3_5.py, the other is a GLM5 issue with no clear code-path overlap." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Different problem domains: meta-device model loading failure versus mmap/OOM behavior on Strix Halo." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Unrelated failures: gradient checkpointing in eval mode versus RLHF gradient explosion from 3D position_ids/SDPA fallback." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "save/from_pretrained extra_state handling is unrelated to disabling mmap to avoid OOM." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom model initialization in notebooks is unrelated to Trainer resume_from_checkpoint max_steps calculation." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Different regressions in different areas: Qwen VL video input versus strict config loading for granite_speech." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Same resize_token_embeddings bug family; 45335 is a model-specific manifestation where decoder.embed_tokens is not updated." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "Notebook custom-model initialization is not the same bug as extra_state save/load handling." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Too little evidence they share the same concrete bug; both mention loading/tokenizer problems, but the affected models and failure modes are different." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 19, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39404", + "issue:43257", + "issue:43425", + "issue:43611", + "issue:43645", + "issue:43716", + "issue:43828", + "issue:43906", + "issue:43931", + "issue:43957", + "issue:44265", + "issue:44530", + "issue:44589", + "issue:44756", + "issue:44863", + "issue:44898", + "issue:44912", + "issue:45084" + ], + "soft_pairs": [ + "issue:43425|issue:43828", + "issue:43257|issue:44863", + "issue:43828|issue:43957", + "issue:44589|issue:44756", + "issue:43906|issue:44530", + "issue:43425|issue:43716", + "issue:43931|issue:44863", + "issue:39404|issue:44912", + "issue:43611|issue:43645", + "issue:44530|issue:45084", + "issue:44265|issue:44898" + ], + "prepared_review_unit_hash": "ba265471f02343c7ba5146bb40111b0550faf23faf4cc672b01c06977ececb9f", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13664, + "estimated_input_tokens": 3416, + "estimated_eval_tokens": 7088 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated model-loading, dtype/quantization, cache, and compilation bugs. None of the soft pairs look like true duplicates, so all soft edges are rejected.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "Different bugs: one is a broad Torch 2.10 compatibility report, the other is an autocast dtype-mismatch on Phi-tiny-MoE." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Different model families and failure modes: Qwen3 MoE conversion/loading with accelerate+deepspeed vs NemotronH checkpoint loading." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Both mention dtype/loading breakage, but the concrete paths differ: autocast on Phi-tiny-MoE vs meta-device model loading regressions." + }, + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Unrelated symptoms: missing Float8 storage vs mmap/OOM on Strix Halo." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "The first is an isolated repro of another issue; the second is a PagedAttentionCache linear_attention group-type crash on Qwen3.5. Different code paths and models." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "General Torch version incompatibility is not the same as a Mistral-3 image-preprocessor/model dtype mismatch." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Different loading failures for different architectures: Qwen3-VL weight-shape mismatch vs NemotronH checkpoint incompatibility." + }, + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline return_language regression is unrelated to MXFP4 quantization fallback on git-oss-20b." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both are Transformers 5.0 regressions, but one concerns base_model_prefix loading and the other notebook-based custom model initialization; not the same concrete bug." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "PagedAttentionCache group-type crash and a template-node compilation error are different failures in different subsystems." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "torch.export.export failures with torch_compilable_check are unrelated to Perceiver interpolation failures at non-default resolution." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 20, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:40990", + "issue:42947", + "issue:43299", + "issue:43425", + "issue:43596", + "issue:43856", + "issue:43901", + "issue:43931", + "issue:43994", + "issue:44079", + "issue:44479", + "issue:44509", + "issue:44589", + "issue:44683", + "issue:44756", + "issue:44805", + "issue:44918", + "issue:45084" + ], + "soft_pairs": [ + "issue:43901|issue:44509", + "issue:43299|issue:43931", + "issue:44479|issue:44805", + "issue:44589|issue:45084", + "issue:43596|issue:44683", + "issue:43425|issue:44918", + "issue:43856|issue:44756", + "issue:40990|issue:42947", + "issue:43994|issue:44079" + ], + "prepared_review_unit_hash": "0a4a74facb6020f8a5f16d05512d5acc2a0db91a7e83abbb6292bc152a5ed388", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13037, + "estimated_input_tokens": 3260, + "estimated_eval_tokens": 6776 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster does not look like a duplicate set: the items cover unrelated bugs and docs regressions across loading, training, memory, storage, and compiler compatibility. None of the soft pairs appear to be the same underlying issue/change.", + "confidence": 0.88, + "canonical_issue_reason": "No clear canonical duplicate exists because the issues are heterogeneous. If a representative issue must be chosen, #43931 is the most concrete and reproducible model-loading report.", + "canonical_pr_reason": null, + "best_issue_reason": "#43931 is the strongest representative: it has a specific model name, a clear loading failure, and a well-scoped error signature, making it easier to triage than the broader or more ambiguous reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Both are docs-related, but they refer to different pipeline API/docs regressions and do not describe the same change." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Both involve Qwen3-VL loading, but one is a MoE-loading breakage and the other is a weight-shape mismatch on a different model variant; not enough to treat as the same bug." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "Both mention shape-related failures in multimodal inputs, but one is a video-input regression across several models and the other is a specific mask/tensor indexing mismatch." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "These are unrelated failure modes: missing Float8 storage vs compilation of non-template nodes." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "Different subsystems and error classes: ZeRO-3/Bert indexing vs flex_attention compilation on newer Torch." + }, + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Torch version incompatibility is not the same bug as TRL SFT input-embedding unpacking failure." + }, + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/OOM, but they are different causes in different areas: Qwen3 MoE training memory inefficiency vs Strix Halo mmap OOM." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "Unrelated topics: perplexity on a model/eval setup vs gradient checkpointing ineffectiveness with PEFT LoRA." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "A SigLIP2 inference problem and a ModelOutput key-assignment bug are not the same concrete issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 21, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:32090", + "issue:36032", + "issue:39404", + "issue:40990", + "issue:43421", + "issue:43425", + "issue:43452", + "issue:43582", + "issue:43632", + "issue:43883", + "issue:43957", + "issue:44164", + "issue:44479", + "issue:44488", + "issue:44671", + "issue:44991", + "issue:45081", + "issue:45161" + ], + "soft_pairs": [ + "issue:44671|issue:44991", + "issue:39404|issue:44671", + "issue:36032|issue:43452", + "issue:44991|issue:45081", + "issue:43582|issue:43632", + "issue:32090|issue:39404", + "issue:44488|issue:45081", + "issue:43425|issue:45161", + "issue:43957|issue:44164", + "issue:43883|issue:44488", + "issue:40990|issue:43421", + "issue:44164|issue:44479" + ], + "prepared_review_unit_hash": "a9c5a5dbf39934a3c132c6cb044d5e0edd9d5df12813ea7d2aed8d48e185a5fa", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 14087, + "estimated_input_tokens": 3522, + "estimated_eval_tokens": 7300 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are broadly related to Transformers v5 regressions, mostly around tokenizers/model loading, but the paired issues describe different models, symptoms, and code paths. None of the soft edges look like true duplicates.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "No single issue cleanly represents the cluster because it is heterogeneous; if forced, issue 44991 is the closest broad tokenizer-loading regression, but it is still too specific to serve as a real canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "Both mention v5 model behavior, but one is CamemBERT MLM prediction correctness and the other is tokenizer loading for a specific model; different bugs." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper pipeline return_language regression is unrelated to CamemBERT masked-LM prediction errors." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "T5 tokenizer add_special_tokens conflict and gguf_file/from_pretrained breakage are different tokenizer-loading failures with different triggers." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "est-roberta tokenizer loading failure is not the same as the Mistral regex patch crash; different models and failure points." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "Apple Silicon caching_allocator_warmup TypeError is a runtime/device-specific error, not the same as the _is_hf_initialized v5 flag regression." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper pipeline return_language behavior." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Unable to load cjvt/sleng-bert is a tokenizer/model loading issue, but not the same as the Mistral backend_tokenizer AttributeError." + }, + { + "left": "issue:43425", + "right": "issue:45161", + "accept": false, + "reason": "Torch 2.10 incompatibility is an environment/version issue, not the same as GPT-OSS MoE tensor-parallel failure." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "meta-device loading breakage and save/from_pretrained extra_state handling involve different mechanisms and errors." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Molmo missing all_tied_weights_keys and sleng-bert loading failure are distinct model-specific issues." + }, + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "High perplexity on gpt-oss-20b and runtime post-processor update behavior are unrelated problems." + }, + { + "left": "issue:44164", + "right": "issue:44479", + "accept": false, + "reason": "extra_state save/load handling and Qwen video input regression affect different subsystems and code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 22, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34567", + "issue:40444", + "issue:42947", + "issue:43541", + "issue:43611", + "issue:43643", + "issue:43644", + "issue:43646", + "issue:43716", + "issue:43883", + "issue:44206", + "issue:44220", + "issue:44488", + "issue:44530", + "issue:44756", + "issue:44987", + "issue:44991", + "issue:45084" + ], + "soft_pairs": [ + "issue:42947|issue:44756", + "issue:43716|issue:45084", + "issue:44488|issue:44987", + "issue:43611|issue:43644", + "issue:44206|issue:44220", + "issue:43883|issue:44991", + "issue:34567|issue:40444", + "issue:43541|issue:45084", + "issue:43644|issue:43646", + "issue:43643|issue:44530" + ], + "prepared_review_unit_hash": "8936f938395211e2636331c041a32e66c6f7e281d388e87a5dda36941f001b45", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13314, + "estimated_input_tokens": 3329, + "estimated_eval_tokens": 6914 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous. Several pairs are only loosely related by subsystem or release version, but none clearly appear to be the same concrete bug/change. I rejected all soft edges; the closest one is the audio feature-extraction pair, but the evidence is still too weak.", + "confidence": 0.79, + "canonical_issue_reason": "Issue 43611 is the clearest, broadest standalone regression report in the set, centered on model loading in Transformers 5.0.0.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43611 is the most representative and well-scoped issue here if one must pick an anchor, though the overall set is not a true duplicate cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42947", + "right": "issue:44756", + "accept": false, + "reason": "Different bugs: one is about `num_input_tokens_seen`/training accounting, the other about mmap/OOM on Strix Halo." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Different failure modes in different code paths: image preprocessor dtype mismatch vs template compilation error." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Both are loading failures, but for different models and likely different causes; no shared concrete bug is evident." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Both are Transformers 5.0.0 regressions, but one is about `base_model_prefix` loading and the other about non-persistent buffers being initialized incorrectly." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": false, + "reason": "Both involve audio feature extraction, but one is specifically about an unsupported `center` argument in `LasrFeatureExtractor`; the other is a broader `_torch_extract_fbank_features()` issue. Not enough to treat as the same bug." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated model/tokenizer loading issues affecting different models and components." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "Completely different areas: trainer token accounting vs multimodal IterableDataset failure." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Different problems: torch dynamo tracing on Mixtral grouped_mm vs template compilation error." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Related to Transformers 5.0.0 initialization/loading, but the symptoms and likely root causes differ." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "Different subsystems: AutoConfig fields with `trust_remote_code` vs PagedAttentionCache linear_attention failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 23, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36032", + "issue:39404", + "issue:42915", + "issue:43262", + "issue:43317", + "issue:43377", + "issue:43404", + "issue:43452", + "issue:43482", + "issue:43645", + "issue:43825", + "issue:44451", + "issue:44466", + "issue:44530", + "issue:44849", + "issue:44987", + "issue:44991", + "issue:45325" + ], + "soft_pairs": [ + "issue:44451|issue:44987", + "issue:43317|issue:43482", + "issue:36032|issue:42915", + "issue:43452|issue:43482", + "issue:43262|issue:43377", + "issue:39404|issue:43825", + "issue:42915|issue:44530", + "issue:44849|issue:45325", + "issue:36032|issue:43645", + "issue:44987|issue:44991", + "issue:43404|issue:44466" + ], + "prepared_review_unit_hash": "e6f31c14914534f0097182d5ed8395b6baa008044507d0a58515067da0e5e51e", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13897, + "estimated_input_tokens": 3475, + "estimated_eval_tokens": 7206 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a grab bag of unrelated Transformers regressions across tokenizers, Whisper/pipeline behavior, audio processors, GGUF loading, tied weights, Qwen variants, and custom-model notebook init. None of the soft pairs look like the same concrete bug or change, so all are rejected; there are no PRs here.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44451", + "right": "issue:44987", + "accept": false, + "reason": "Both are model-loading failures in v5, but they affect different checkpoints and likely different failure points; not enough evidence of the same underlying bug." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "One is a dequantized model offload/device_map issue, the other is a GGUF loading failure for Qwen2.5; different code paths and symptoms." + }, + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "T5 tokenizer import/attribute conflict is unrelated to Qwen3Moe + FineGrainedFP8Config loading failure." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": false, + "reason": "Both mention GGUF, but one is a generic `gguf_file` API break in auto loaders while the other is a Qwen2.5-specific v5 loading failure; not the same concrete bug." + }, + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Audio chat-template sampling-rate defaulting and MIMI batch-vs-single padding-mask mismatch are different bugs in different components." + }, + { + "left": "issue:39404", + "right": "issue:43825", + "accept": false, + "reason": "Whisper `return_language` regression and pipeline translation-task error messaging are distinct pipeline issues." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "Different models and different failures: FP8 config loading vs PagedAttentionCache linear_attention crash." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Qwen3.5 hidden-state bug and Qwen2.5-VL rope-index scaling bug are unrelated model-specific defects." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer method conflict in T5 is unrelated to custom model definition/initialization breakage in Jupyter notebooks." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Both are v5 loading regressions, but they involve different models and likely different tokenizer/model-loading failures; too broad to merge." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "One is a missing `lm_head` tie in Mistral3ForConditionalGeneration, the other is device-dependent serialization of tied weights; related theme, but not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 24, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41553", + "issue:43322", + "issue:43377", + "issue:43540", + "issue:43582", + "issue:43645", + "issue:43646", + "issue:43819", + "issue:43994", + "issue:44220", + "issue:44403", + "issue:44451", + "issue:44534", + "issue:44610", + "issue:44683", + "issue:45081", + "issue:45216", + "issue:45357" + ], + "soft_pairs": [ + "issue:43646|issue:44534", + "issue:44403|issue:45081", + "issue:43994|issue:44610", + "issue:43645|issue:44534", + "issue:43377|issue:43819", + "issue:44451|issue:45081", + "issue:43322|issue:43540", + "issue:44220|issue:44683", + "issue:45216|issue:45357", + "issue:41553|issue:43582" + ], + "prepared_review_unit_hash": "2e87fbc4e1bda6df2a75db829113f4b637913be5178851cf5bb7596aa990c863", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13333, + "estimated_input_tokens": 3334, + "estimated_eval_tokens": 6924 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most pairs are clearly unrelated despite superficial theme overlap. The only likely duplicate-style match is the Qwen3.5 `save_pretrained` regression pair, where the second issue looks like a continuation of the same incorrect visual-encoder-key saving bug.", + "confidence": 0.89, + "canonical_issue_reason": "issue:45216 is the better canonical issue: it is the earlier and broader report of the Qwen3.5 `save_pretrained` regression, and issue:45357 reads like a follow-up continuation of the same serialization bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45216 is the strongest issue to anchor the cluster because it describes the core regression more generally and has follow-up references, while issue:45357 is narrower and version-specific.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Different failure modes: custom model init in notebooks vs non-persistent buffers being corrupted. Same version surface, but not the same underlying bug." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "One is a loading noise/logging complaint, the other is an AttributeError crash in Mistral regex patching. Not the same code-path problem." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "Siglip2 inference correctness and OmDet-Turbo image sizing are unrelated model/processor bugs." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Notebook custom-model initialization regression is distinct from the buffer initialization/junk data issue." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "MIMI padding-mask batching mismatch and DAC `from_latents`/STE mismatch concern different models and different invariants." + }, + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "ScandiBERT loading failure and Mistral tokenizer regex patch crash are separate tokenizer/model issues." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Llava-Next segfault and Qwen3OmniMoe video ValueError are different multimodal pipelines and different symptom classes." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Audio feature extraction failure and compiled flex_attention torch-compatibility failure are unrelated subsystems." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Both report the same Qwen3.5 `save_pretrained` serialization bug, specifically incorrect visual encoder keys; the second looks like a continuation of the same regression." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "AutoTokenizer Voxtral error messaging and Apple Silicon allocator warmup TypeError are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 25, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:40990", + "issue:42915", + "issue:43296", + "issue:43404", + "issue:43531", + "issue:43632", + "issue:44062", + "issue:44265", + "issue:44291", + "issue:44479", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44987", + "issue:45081", + "issue:45084", + "issue:45127", + "issue:45325" + ], + "soft_pairs": [ + "issue:42915|issue:45084", + "issue:44265|issue:44610", + "issue:43632|issue:44756", + "issue:44987|issue:45081", + "issue:43296|issue:43531", + "issue:43404|issue:45127", + "issue:44062|issue:44987", + "issue:44479|issue:45325", + "issue:40990|issue:44811", + "issue:42915|issue:44291" + ], + "prepared_review_unit_hash": "24b857bc7f851cd6217d09835e10eddd7fc8153660660f264aa36681bb3ae3fe", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13579, + "estimated_input_tokens": 3395, + "estimated_eval_tokens": 7046 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly distinct regression reports across different models and code paths; no soft pair looks like a true duplicate, so all candidate merges are rejected.", + "confidence": 0.95, + "canonical_issue_reason": "Issue 44291 is the closest thing to a cluster anchor because it describes a broad v5 loading/init regression around `_is_hf_initialized`, which is the most general theme among the loading-related reports.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44291 is the best representative issue: broad, root-cause-like, and centered on a core transformers v5 loading failure rather than a model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:45084", + "accept": false, + "reason": "Different failures and subsystems: Qwen3Moe FP8 config vs a template compilation TypeError." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "Unrelated bugs: torch.export with torch_compilable_check vs OmDet-Turbo image size mismatch." + }, + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Different code paths and symptoms: `_is_hf_initialized` regression vs mmap/OOM behavior on Strix Halo." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Both are loading-related, but one is a model load failure and the other is a Mistral tokenizer regex crash; not the same bug." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "Different models and defects: PaddleOCR-VL load failure vs Qwen3-MoE sliding window behavior." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "Both mention tied weights, but one is an untied lm_head bug and the other is LoRA merge collapse with extended vocab; not the same concrete issue." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "Generic loading regressions, but the concrete errors and affected code paths are different." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Same broad VL area, but one is a video-input regression and the other is rope-index scaling for still images." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Completely unrelated: perplexity evaluation on gpt-oss-20b vs Whisper batch_decode special-token handling." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "Both are model-loading regressions, but the concrete failures differ: FP8 config vs unexpected `_is_hf_initialized` argument." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 26, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36032", + "issue:40444", + "issue:42915", + "issue:43317", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43742", + "issue:43856", + "issue:44534", + "issue:44589", + "issue:44683", + "issue:44821", + "issue:44877", + "issue:45020", + "issue:45084", + "issue:45325" + ], + "soft_pairs": [ + "issue:43317|issue:43856", + "issue:42915|issue:44589", + "issue:36032|issue:43742", + "issue:43632|issue:44534", + "issue:43611|issue:44534", + "issue:40444|issue:45325", + "issue:43531|issue:44877", + "issue:44821|issue:45084", + "issue:44683|issue:45020" + ], + "prepared_review_unit_hash": "e88a022b2baa47183d59438968480cc63a533008efee82749ba6464a0e156ef8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12330, + "estimated_input_tokens": 3083, + "estimated_eval_tokens": 6422 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: it mixes many unrelated Transformers regressions and model-specific bugs (tokenizer loading, Qwen/Qwen2.5-VL issues, v5 loading changes, config/loading edge cases).", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the broadest umbrella item in the set and the most plausible representative if one issue must be chosen, but it still does not duplicate the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "Both involve Qwen3/MoE and memory/offload, but one is a dequantized model loading failure with device_map offload and the other is training memory usage; different bugs and code paths." + }, + { + "left": "issue:42915", + "right": "issue:44589", + "accept": false, + "reason": "Qwen3MoE + FP8 appears in both, but one is a FineGrainedFP8Config failure and the other is a missing Float8 storage object error; these are distinct failures." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "T5 tokenizer special-token conflict and MobileLLM-125M loading key error are unrelated model-loading problems." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 regressions, but one is about `_is_hf_initialized` and the other about non-persistent buffers being filled with junk; not the same bug." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "Loading via `base_model_prefix` and non-persistent buffer initialization are different regression surfaces and not plausibly one merged fix." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "Both touch Qwen2.5-VL, but one is an IterableDataset/multi-image finetuning failure and the other is a rope index bug for still-image temporal positions; separate issues." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and granite_speech strict-config loading are unrelated." + }, + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "AutoImageProcessor URL loading and a non-template-node compile error are unrelated subsystems and failure modes." + }, + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "Compiled flex_attention on torch >= 2.9 and remote_code model breakage are different compatibility regressions with different code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 27, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:41720", + "issue:43377", + "issue:43421", + "issue:43425", + "issue:43792", + "issue:43818", + "issue:44403", + "issue:44568", + "issue:44589", + "issue:44829", + "issue:44991", + "issue:45357", + "issue:45479", + "issue:45491" + ], + "soft_pairs": [ + "issue:43421|issue:44568", + "issue:43818|issue:45357", + "issue:43425|issue:44403", + "issue:43792|issue:44589", + "issue:36032|issue:41720", + "issue:44403|issue:44991", + "issue:44829|issue:45479", + "issue:43377|issue:45491", + "issue:33666|issue:40444", + "issue:35707|issue:40444" + ], + "prepared_review_unit_hash": "a02d8785ed8eeb2c864a56a7047101c6837882a40e030a72a922cccff9a783f6", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13618, + "estimated_input_tokens": 3405, + "estimated_eval_tokens": 7066 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a duplicate cluster; they span unrelated bugs/features across tokenizers, vision models, training, loading, and runtime compatibility. All soft-similarity pairs look like false positives.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue: the issues cover distinct subsystems and failure modes, so none is a clear duplicate anchor for the group.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44403 is the broadest/general loading-related report, but only as a loose representative; it is not a true canonical duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer-related, but one is about runtime post-processor updates and the other is a specific BOS/EOS regression in mdeberta-v3-base; different bugs and fixes." + }, + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen/VL problems: missing temporal attention and weight sharing vs incorrect visual encoder keys on save_pretrained." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Torch 2.10 incompatibility is an environment/version issue, while 44403 is about noisy loading logs; not the same bug." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper failing to run and missing Float8 storage are unrelated failure modes with different code paths." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "T5Tokenizer constructor conflict and Qwen3 auto device-map cudaErrorAssert are unrelated issues." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Both touch loading, but one is benign log noise and the other is a concrete tokenizer loading failure for a specific model." + }, + { + "left": "issue:44829", + "right": "issue:45479", + "accept": false, + "reason": "Both concern degenerate sequence-classification behavior, but the triggers differ: flash_attention_3 vs problem_type/num_labels loss configuration." + }, + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Both involve batched/masked sequence behavior, but they affect different models and mechanisms: MIMI padding mask vs Gemma3 sliding-window all-padding windows." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Both are Qwen2-VL fine-tuning reports, but one is multi-GPU training and the other is iterable-dataset/multiple-images failure; not the same bug." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL iterable-dataset multi-image failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 28, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:42175", + "issue:43296", + "issue:43482", + "issue:43525", + "issue:43531", + "issue:43632", + "issue:43644", + "issue:43749", + "issue:43881", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44488", + "issue:44534", + "issue:44589", + "issue:45072", + "issue:45081" + ], + "soft_pairs": [ + "issue:44079|issue:44534", + "issue:44291|issue:44589", + "issue:43482|issue:43525", + "issue:43749|issue:44079", + "issue:43881|issue:44488", + "issue:43644|issue:44079", + "issue:44079|issue:45072", + "issue:43632|issue:45072", + "issue:43531|issue:44291", + "issue:42175|issue:43296", + "issue:44265|issue:45072", + "issue:41720|issue:45081" + ], + "prepared_review_unit_hash": "4c1f5f1c3305431cc0d0ec5977a470568ea0cde6db2e1d557525e1ce04be6d15", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 13941, + "estimated_input_tokens": 3486, + "estimated_eval_tokens": 7228 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated Transformers/vLLM bug reports. The only clear duplicate-looking pair is the non-persistent buffers regression (issues 43644 and 44534), while the provided soft-edge candidates are all different bugs or different model-specific failures and should not be merged.", + "confidence": 0.86, + "canonical_issue_reason": "Issue 43644 is the strongest canonical issue: it is the original, clearer report of the non-persistent buffers regression and has more discussion/inbound attention than the later reworded duplicate 44534.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43644 is the best representative issue for the cluster because it captures the clearest duplicated bug and has the most established thread.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 regressions, but they describe different failures: ModelOutput key assignment vs non-persistent buffer initialization. Different code paths." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "Both mention loading/type errors under v5, but one is about the unexpected _is_hf_initialized argument in init_empty_weights and the other about missing Float8 storage support. Not the same bug." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Different model-specific loading problems: Qwen2.5-GGUF with Transformers v5 versus Llama4Config missing pad_token_id. Same area, different defects." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "FSDP CPU-efficient loading failure and ModelOutput key assignment failure are unrelated regressions." + }, + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "Different models and symptoms; these are separate loading failures, not the same underlying bug." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Non-persistent buffer junk initialization is unrelated to ModelOutput key handling." + }, + { + "left": "issue:44079", + "right": "issue:45072", + "accept": false, + "reason": "ModelOutput key assignment and bfloat16 dtype mismatch in inference are different issues." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "_is_hf_initialized flag breakage is a different Transformers v5 issue from the dtype mismatch in inference." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and init_empty_weights/_is_hf_initialized argument handling are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Backend-install packaging issue for TensorFlow vs model loading failure for PaddleOCR-VL; different problem domains." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is a different code path from the bfloat16 dtype mismatch bug." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "A800 cudaErrorAssert during Qwen3 auto device mapping is unrelated to the Mistral tokenizer backend_tokenizer AttributeError." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 29, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:40444", + "issue:42915", + "issue:43454", + "issue:43482", + "issue:43611", + "issue:43646", + "issue:43792", + "issue:43828", + "issue:43957", + "issue:43994", + "issue:44387", + "issue:44466", + "issue:44617", + "issue:44683", + "issue:44898", + "issue:45325", + "issue:45381" + ], + "soft_pairs": [ + "issue:40444|issue:45381", + "issue:44387|issue:44617", + "issue:43828|issue:44683", + "issue:45325|issue:45381", + "issue:43611|issue:43646", + "issue:43994|issue:44898", + "issue:42915|issue:43482", + "issue:43454|issue:44466", + "issue:43792|issue:43957" + ], + "prepared_review_unit_hash": "2cada510c6043fe0b2fe95e33ccd81afdc95ed6feeec753751cbc55027d99033", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12543, + "estimated_input_tokens": 3136, + "estimated_eval_tokens": 6528 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No clear duplicates here: the soft-similarity pairs are mostly broad subsystem overlaps, not the same concrete bug/change. I would reject all edges and treat this as a heterogeneous set of unrelated issues.", + "confidence": 0.83, + "canonical_issue_reason": "No single canonical issue for the cluster; the items span unrelated regressions and only share broad themes (Qwen2.5-VL position ids, tied weights, loading/init regressions).", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick the closest anchor, 45325 is the most code-path-specific report (Qwen2.5-VL get_rope_index / position_ids), but it still does not subsume the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL, but one is finetuning with multiple images and the other is video-input position ids; related area, not the same bug." + }, + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Different problems: a Transformers 5.x int4 memory regression vs a model-specific Sam3Video OOM." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Different failure modes and code paths: autocast dtype mismatch vs compiled flex_attention on torch>=2.9." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": false, + "reason": "Same model and helper area, but one is still-image temporal scaling and the other is video vision_position_ids; too different to merge as one duplicate." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both involve Transformers 5 loading/init regressions, but one is base_model_prefix handling and the other is custom model initialization; not clearly the same bug." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "Different vision-model regressions with different symptoms and models; only broad similarity in classification/image handling." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "Unrelated models and failures: Qwen3Moe FP8 config vs Qwen2.5 GGUF loading on Transformers v5." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "Both mention lm_head/tied weights, but one is a model config failing to tie weights and the other is device-dependent serialization behavior; related family, not clearly the same bug." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "Different reports: whisper-large-v2 runtime failure vs meta-device loading regression affecting some models on Transformers 5+." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 30, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:43262", + "issue:43317", + "issue:43526", + "issue:43606", + "issue:43701", + "issue:43746", + "issue:43873", + "issue:43881", + "issue:43883", + "issue:44186", + "issue:44265", + "issue:44291", + "issue:44877", + "issue:44991", + "issue:45325" + ], + "soft_pairs": [ + "issue:43526|issue:43883", + "issue:43701|issue:45325", + "issue:43881|issue:44991", + "issue:43317|issue:43873", + "issue:43262|issue:43746", + "issue:44291|issue:44877", + "issue:43606|issue:44265", + "issue:34634|issue:35707", + "issue:44186|issue:44265" + ], + "prepared_review_unit_hash": "098dc4a96059c07005fd222b1dc563eea43affe3dad045442866a546d8778603", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12620, + "estimated_input_tokens": 3155, + "estimated_eval_tokens": 6566 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set does not form a tight duplicate cluster: the issues span unrelated areas like audio processors, tokenizer loading, model offloading, export, and vision-language internals. No PRs are present, and none of the soft pairs look like the same concrete bug.", + "confidence": 0.27, + "canonical_issue_reason": "If forced to choose, issue #43873 is the broadest and most reusable representative: it concerns offloading/quantization, is still open, and has inbound references. That said, the cluster is too heterogeneous for a strong canonical issue.", + "canonical_pr_reason": null, + "best_issue_reason": "#43873 is the best single issue candidate because it is the only open, externally referenced, broadly scoped bug here and could subsume nearby offloading-related reports better than the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "Different components and failure modes: BeitImageProcessorFast label reduction vs Molmo weight-tying attribute error." + }, + { + "left": "issue:43701", + "right": "issue:45325", + "accept": false, + "reason": "Unrelated topics: checkpoint resume key mismatch vs Qwen2.5-VL rope/position-id scaling." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading problems, but one is glm-4v-9b model load failure and the other is tokenizer loading for a different model; no shared bug path." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both mention offload/quantization, but the concrete problems differ: dequantized model loading with device_map=auto vs general offloading behavior." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio sampling-rate defaulting and PEFT checkpoint loading are unrelated." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "Both touch loading in recent transformers, but one is an init_empty_weights argument regression and the other is strict config handling; different code paths." + }, + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "Bark CPU-offload device mismatch and torch.export/torch_compilable_check export failures are distinct issues." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "Voice preset handling in BarkProcessor is unrelated to progressive generation with inputs_embeds/past_key_values." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and torch.export failures come from different subsystems and different failure mechanisms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 31, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:35707", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:43296", + "issue:43377", + "issue:43454", + "issue:43550", + "issue:43606", + "issue:43632", + "issue:43819", + "issue:44164", + "issue:44610", + "issue:44877", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45325" + ], + "soft_pairs": [ + "issue:43632|issue:44164", + "issue:43606|issue:43819", + "issue:35707|issue:41720", + "issue:44610|issue:45325", + "issue:43377|issue:44898", + "issue:43377|issue:44610", + "issue:43454|issue:43550", + "issue:43454|issue:45072", + "issue:43296|issue:44991", + "issue:43296|issue:44877", + "issue:39692|issue:40444" + ], + "prepared_review_unit_hash": "8f12b2e686ce8b4ed53a3c43e37b1522324e623e2b609a8fb44fb4ab80765f97", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13778, + "estimated_input_tokens": 3445, + "estimated_eval_tokens": 7146 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is over-broad: the soft pairs mostly share only a general subsystem or regression theme, not the same concrete bug. I would not merge any of the proposed issue pairs, and there are no PRs in the cluster.", + "confidence": 0.88, + "canonical_issue_reason": "Issue #43632 is the broadest, most regression-like report in the set and has the clearest cross-cutting impact, so it is the best anchor issue if one must be chosen, though the cluster itself does not look like true duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "#43632 is the strongest representative issue because it describes a clear framework-level regression, is well-scoped, and is more central than the other items that are mostly model-specific or documentation/example failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43632", + "right": "issue:44164", + "accept": false, + "reason": "Both are loader/state-related regressions, but one is about `_is_hf_initialized` in Transformers v5 and the other is about `extra_state` handling in save/from_pretrained; different failure modes and fixes." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "Device mismatch with CPU offload and DAC `from_latents`/forward mismatch are unrelated bugs in different model paths." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to Qwen3 auto device mapping CUDA asserts." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "Both mention Qwen2.5-VL, but one is a processor input-size mismatch and the other is a rope/indexing bug; different code paths." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "Missing padding mask support in MIMI encoder is not the same as Perceiver non-default-resolution handling." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "These concern different models and different preprocessing/model-consistency bugs." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "Tying `lm_head` weights and torch.compile SDPA failures are separate model implementation issues." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "Garbage text generation from untied weights is unrelated to dtype mismatches in bfloat16 inference." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "Both are load-time compatibility problems, but they affect different models and different breakpoints; not the same bug." + }, + { + "left": "issue:43296", + "right": "issue:44877", + "accept": false, + "reason": "PaddleOCR-VL loading in vLLM and strict config rejection for `granite_speech` are separate configuration/load failures." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "A broken SigLIP2 documentation example and Qwen2.5-VL finetuning with multi-image iterable data are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 32, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:43322", + "issue:43404", + "issue:43454", + "issue:43526", + "issue:43720", + "issue:43749", + "issue:43792", + "issue:43881", + "issue:44186", + "issue:44423", + "issue:44466", + "issue:44534", + "issue:44683", + "issue:44977", + "issue:45072", + "issue:45357", + "issue:45406" + ], + "soft_pairs": [ + "issue:43792|issue:44683", + "issue:43749|issue:44534", + "issue:44466|issue:45357", + "issue:41720|issue:43322", + "issue:43454|issue:43720", + "issue:41720|issue:44977", + "issue:43526|issue:43881", + "issue:43404|issue:45357", + "issue:44423|issue:45406", + "issue:44186|issue:45072" + ], + "prepared_review_unit_hash": "d4e5fbb0da0ce86544c663320a2d6d66f7a79d47c8bf30a696ffdae0c69a5cf0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13455, + "estimated_input_tokens": 3364, + "estimated_eval_tokens": 6984 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is heterogeneous: most items are distinct bugs across different models/features, and none of the soft pairs look like the same underlying defect closely enough to merge. The strongest single issue report is the FSDP loading regression, but it is not a duplicate of the others.", + "confidence": 0.41, + "canonical_issue_reason": "Issue 43749 has the most discussion and inbound references, and it describes a concrete, high-signal regression with a clear reproduction area (FSDP_CPU_RAM_EFFICIENT_LOADING).", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43749 is the best standalone issue in the set because it is the most developed report and the clearest actionable regression; the rest are mostly unrelated model-specific failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Different bugs: Whisper model runtime failure vs. compiled flex_attention breakage on torch>=2.9." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "One is FSDP CPU-efficient loading regression; the other is non-persistent buffers being filled with junk in v5. Different failure modes." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Both mention model serialization, but one is tied lm_head serialization and the other is incorrect visual encoder keys in Qwen3.5; not the same code-path." + }, + { + "left": "issue:41720", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3 auto device map cudaErrorAssert on A800 is unrelated to Llava Next segmentation fault during loading." + }, + { + "left": "issue:43454", + "right": "issue:43720", + "accept": false, + "reason": "AyaVision lm_head tying bug is unrelated to BitNet packed-weight unpacking during accelerate loading." + }, + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "Both involve Qwen3-family models, but one is a device-mapping cuda assert and the other is a flash-attention generation bug." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "BeitImageProcessorFast reduce_labels bug is unrelated to glm-4v-9b loading failure." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "lm_head weight tying in Mistral3 is not the same as Qwen3.5 visual encoder key serialization." + }, + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both crash in serve multimodal paths, but they are different AttributeErrors in different processor/model setups." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes on NER/padding are unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel bfloat16 inference." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 33, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34567", + "issue:40990", + "issue:42617", + "issue:42915", + "issue:43550", + "issue:43577", + "issue:43708", + "issue:43720", + "issue:43873", + "issue:44186", + "issue:44479", + "issue:44610", + "issue:44756", + "issue:44877", + "issue:44898", + "issue:44918", + "issue:44991" + ], + "soft_pairs": [ + "issue:34567|issue:40990", + "issue:43873|issue:44756", + "issue:43577|issue:43708", + "issue:42617|issue:42915", + "issue:44877|issue:44991", + "issue:44479|issue:44918", + "issue:44610|issue:44898", + "issue:43550|issue:43720", + "issue:44186|issue:44610" + ], + "prepared_review_unit_hash": "5a2363bcfd33f5a4d52fd7ce879d73ccda0d37628ba3f0cd4bed3e8eca3e30ba", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12519, + "estimated_input_tokens": 3130, + "estimated_eval_tokens": 6516 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are a heterogeneous mix of unrelated bugs across Trainer state, perplexity, model loading, quantization, tokenization, vision preprocessing, and config handling. The soft-similarity links are topical, but none look like the same underlying defect or change.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a good canonical representative; the candidates span unrelated subsystems and failure modes.", + "canonical_pr_reason": "No pull requests were provided.", + "best_issue_reason": "None of the issues is a strong global representative for the cluster. If forced, the broadest is 43873, but it is still not representative of the rest.", + "best_pr_reason": "No pull requests were provided.", + "soft_edge_verdicts": [ + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "Trainer token accounting and perplexity on gpt-oss/WikiText are unrelated problems." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention offloading/memory behavior, but one is quantization-specific and the other is a Strix Halo mmap OOM issue; not the same bug." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "One is a Blip2 dtype loading bug; the other is checkpoint resume/max_steps logic. Different code paths and symptoms." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "3d_parallel.py runtime failure and Qwen3Moe FineGrainedFP8Config failure are distinct model/config issues." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config loading for granite_speech and tokenizer loading for EMBEDDIA/est-roberta are unrelated loader regressions." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Video-input regression for several Qwen VL variants and Qwen3.5 embedding unpacking with TRL are different failures." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "OmDet-Turbo processor output size mismatch and Perceiver non-default-resolution failure are separate preprocessing/model issues." + }, + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "torch.compile SDPA failure in Bamba-9B-v2 and packed-weights unpacking during accelerate loading are different bugs." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "LayoutLMv2Tokenizer crashing on NER/batched padding and OmDet-Turbo image-size mismatch are not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 34, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36331", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:42994", + "issue:43316", + "issue:43317", + "issue:43482", + "issue:43531", + "issue:43550", + "issue:43572", + "issue:43606", + "issue:44292", + "issue:44479", + "issue:44671", + "issue:44743", + "issue:44913", + "issue:45161" + ], + "soft_pairs": [ + "issue:42994|issue:43482", + "issue:36331|issue:42915", + "issue:36331|issue:42617", + "issue:44671|issue:44743", + "issue:42915|issue:43317", + "issue:43316|issue:44913", + "issue:43550|issue:43606", + "issue:43531|issue:43572", + "issue:44292|issue:44479", + "issue:41720|issue:42617", + "issue:41720|issue:45161" + ], + "prepared_review_unit_hash": "a0bb96444379894fe38136daa1e693669b9ef5ffbef601d7563b0711929e2c63", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13431, + "estimated_input_tokens": 3358, + "estimated_eval_tokens": 6972 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the issues span unrelated trainer API errors, quantization/loading failures, model-specific regressions, config serialization, and device-mapping/offload problems. None of the soft pairs look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:44479 is the clearest standalone regression report, but the set is too mixed to treat any issue as a true canonical representative of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Saving a quantized model and loading a Qwen2.5-GGUF model under v5 are different failure points and likely different code paths." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "CustomTrainer compute_loss kwarg mismatch is unrelated to Qwen3Moe failing with FineGrainedFP8Config." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Trainer compute_loss API break and failure to run 3d_parallel.py are different bugs with different surfaces." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression and Qwen3.5 recurrent-state reset are unrelated model-specific issues." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and dequantized model load/offload failure concern different mechanisms and symptoms." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Gemma3TextConfig API discrepancy does not match GPTNeoX rotary_pct reverting on reload." + }, + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "torch.compile + SDPA failure in Bamba is a different bug from CPU offload device mismatch in bark-small." + }, + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig pad_token_idx persistence are unrelated regressions." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime failure and a video-input regression across Qwen VL models are different failure modes." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Auto device mapping CUDA assert on Qwen3 is not the same underlying issue as 3d_parallel.py failing to run." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Qwen3 auto device mapping failure and GPT-OSS MoE tensor-parallel failure are separate model/runtime problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 35, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:36010", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43322", + "issue:43388", + "issue:43540", + "issue:43782", + "issue:43873", + "issue:44186", + "issue:44361", + "issue:44514", + "issue:44545", + "issue:45059", + "issue:45406", + "issue:45464" + ], + "soft_pairs": [ + "issue:43540|issue:43782", + "issue:43388|issue:45059", + "issue:44186|issue:44361", + "issue:41720|issue:43873", + "issue:42915|issue:43322", + "issue:45406|issue:45464", + "issue:36010|issue:42617", + "issue:44514|issue:44545", + "issue:30064|issue:34634" + ], + "prepared_review_unit_hash": "7b0bef9c6536ba9e9be3aee069722fcad9aed7f6234855d3fa60ec0fb024f5ff", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 12988, + "estimated_input_tokens": 3247, + "estimated_eval_tokens": 6750 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated issue reports. The only clear duplicate pair is the two Qwen2_5_VLProcessor batched-input padding=False crashes; the rest are similar only at a subsystem level, not the same bug.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 44545 is the cleanest canonical representative for the duplicated Qwen2_5_VLProcessor.apply_chat_template batched-input crash: it has the concise title, matches the same failure mode as 44514, and has slightly more reference activity.", + "canonical_pr_reason": null, + "best_issue_reason": "44545 is the best issue to keep as the representative duplicate target among this set because it states the bug clearly and appears to be the more central report for the exact same crash.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Both are Qwen multimodal issues, but one is a video-input ValueError in Qwen3OmniMoe and the other is a weight_only load error in Qwen3VL; different code paths and failure modes." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "gather_for_metrics label truncation in evaluation is unrelated to SAM3 PCS odd behavior with text and bounding boxes; same broad area only." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Both are tokenizer-related, but they affect different models and different bugs: LayoutLMv2 NER/batched padding crashes vs MLukeTokenizer AttributeError on tasks." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Qwen3 auto device mapping cudaErrorAssert and offloading-with-quantization not working are both memory/device-management topics, but not the same concrete bug." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure and Llava Next segfault are unrelated model-specific loading/runtime issues." + }, + { + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Gemma4Processor missing _tokenizer in serve and Qwen3.5 streaming inference failure are distinct processor/runtime problems." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "An ImportError for GenerationMixin and inability to run 3d_parallel.py do not describe the same bug or code path." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "These are the same reported crash on Qwen2_5_VLProcessor.apply_chat_template with batched input and padding=False; only wording differs." + }, + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "Image Processor void segmentation maps and BarkProcessor voice_preset are completely different processor bugs with no shared failure path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 36, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33357", + "issue:35707", + "issue:41720", + "issue:43421", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43746", + "issue:43927", + "issue:44368", + "issue:44492", + "issue:44509", + "issue:44625", + "issue:44918", + "issue:44936", + "issue:45357", + "issue:45479" + ], + "soft_pairs": [ + "issue:33357|issue:35707", + "issue:43606|issue:43720", + "issue:41720|issue:43746", + "issue:43550|issue:43653", + "issue:44492|issue:44509", + "issue:44625|issue:45479", + "issue:44368|issue:45357", + "issue:43421|issue:43927", + "issue:43606|issue:43653", + "issue:44918|issue:44936" + ], + "prepared_review_unit_hash": "729f36681e778deb95363e7e60e1bcaeba67941a09c0b5286aa65dda3da19213", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13640, + "estimated_input_tokens": 3410, + "estimated_eval_tokens": 7076 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a single duplicate cluster; they cover unrelated bugs and docs issues across different models, tokenizers, and trainer paths. No canonical issue or PR stands out for the whole set.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "Different failure modes and code paths: MacOS bus error with CLIP vs generation behavior with inputs_embeds/past_key_values." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "Both are runtime-loading problems, but one is CPU offload device mismatch and the other is packed-weight unpacking during accelerate loading; not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Unrelated subsystems and symptoms: auto device mapping cuda assert vs PEFT adapter loading from local checkpoints." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "One is torch.compile + SDPA failure in a model, the other is tokenizer special-token registration; no shared underlying defect." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "A typo in cache strategy docs/code is unrelated to documentation still mentioning removed pipeline tasks." + }, + { + "left": "issue:44625", + "right": "issue:45479", + "accept": false, + "reason": "Both mention num_labels, but one is config propagation for Qwen3.5 and the other is a broader zero-loss classification bug across models." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "Different model/config issues: tie_word_embeddings warning during LoRA fine-tuning vs incorrect visual encoder keys saved by save_pretrained." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "TokenizerBackend post-processor refresh on runtime special-token edits is distinct from DiaConfig losing custom token IDs after save/load." + }, + { + "left": "issue:43606", + "right": "issue:43653", + "accept": false, + "reason": "Device mismatch under CPU offload is unrelated to BigBirdTokenizer mask token special-token registration and decode output." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "Both involve trainer usage, but one is unpacking Qwen3.5 input embeddings with TRL SFT and the other is evaluate() failing after train(); different bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 37, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34567", + "issue:34634", + "issue:35707", + "issue:41720", + "issue:43381", + "issue:43421", + "issue:43531", + "issue:43653", + "issue:43782", + "issue:43844", + "issue:44062", + "issue:44451", + "issue:44589", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45381" + ], + "soft_pairs": [ + "issue:43782|issue:45381", + "issue:44451|issue:44991", + "issue:34634|issue:41720", + "issue:43381|issue:43844", + "issue:44898|issue:45072", + "issue:44062|issue:44451", + "issue:34567|issue:35707", + "issue:43421|issue:43653", + "issue:43531|issue:44589" + ], + "prepared_review_unit_hash": "5919d0a49ba3d03da0ce15a666fd7497addc4a014eb5787de71b19cd7af83edb", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12514, + "estimated_input_tokens": 3129, + "estimated_eval_tokens": 6514 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it spans tokenizer special-token handling, model loading, generation, training, device mapping, quantization/storage, and architecture-specific bugs. None of the soft pairs look like the same concrete bug/change, so all soft edges should be rejected and there is no clear canonical issue/PR for the whole set.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43782", + "right": "issue:45381", + "accept": false, + "reason": "Both involve Qwen3-family vision/language models, but one is a from_pretrained weight-loading failure and the other is a video vision_position_ids bug. Different code paths and likely different fixes." + }, + { + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-loading regressions for specific checkpoints, but they affect different models and report different failure modes. Too broad to be the same underlying bug." + }, + { + "left": "issue:34634", + "right": "issue:41720", + "accept": false, + "reason": "BarkProcessor voice_preset handling and Qwen3 auto device mapping cudaErrorAssert are unrelated subsystems and failures." + }, + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Both touch training/gradient behavior, but one is about checkpointing in eval mode and the other about gradients exploding with DeepSpeed ZeRO-3. Not the same bug." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Different model families and different failure classes: Perceiver positional interpolation vs dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "Special-token/AddedToken construction issues are tokenizer-adjacent, but these are different concrete problems: duplicate special argument vs inability to load a specific tokenizer." + }, + { + "left": "issue:34567", + "right": "issue:35707", + "accept": false, + "reason": "TrainerState token counting and generation with inputs_embeds/past_key_values are separate code paths with different symptoms and fixes." + }, + { + "left": "issue:43421", + "right": "issue:43653", + "accept": false, + "reason": "Both concern special tokens, but one is a TokenizersBackend post-processor update issue and the other is BigBirdTokenizer mask-token registration/empty decode. Related area, not the same bug." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and Float8 storage lookup errors are unrelated; one is model config logic, the other is a storage/type support problem." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 38, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:42886", + "issue:43421", + "issue:43531", + "issue:43596", + "issue:43645", + "issue:43994", + "issue:44077", + "issue:44186", + "issue:44220", + "issue:44610", + "issue:44625", + "issue:44671", + "issue:44843", + "issue:44987", + "issue:45042", + "issue:45081", + "issue:45356" + ], + "soft_pairs": [ + "issue:42886|issue:44843", + "issue:44987|issue:45042", + "issue:44077|issue:44625", + "issue:43994|issue:44671", + "issue:43421|issue:44186", + "issue:39692|issue:44610", + "issue:43596|issue:44220", + "issue:43531|issue:43645", + "issue:45081|issue:45356" + ], + "prepared_review_unit_hash": "4f199d7f5faedf08f080f29a1d62dc42533d4efbcdb7a81f4a95580676fea0e8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13285, + "estimated_input_tokens": 3322, + "estimated_eval_tokens": 6900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous, with no clear duplicate group across the issues. The only near-match is the Mistral/fix_mistral_regex pair, but the reported symptoms still differ enough to keep them separate.", + "confidence": 0.89, + "canonical_issue_reason": "No single issue stands out as the canonical duplicate target; the set spans unrelated tokenizer, model, processor, and runtime problems.", + "canonical_pr_reason": "No PRs are present in this cluster.", + "best_issue_reason": "No best issue: the cluster is too diverse to use one issue as the representative canonical item.", + "best_pr_reason": "No PRs are present in this cluster.", + "soft_edge_verdicts": [ + { + "left": "issue:42886", + "right": "issue:44843", + "accept": false, + "reason": "Both involve offline tokenizer loading, but one is general cache/offline failure and the other is a specific _patch_mistral_regex model_info call path; not the same concrete bug." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "These are unrelated failures: loading a specific model breaks in one case, while PIL image processors incorrectly requiring torchvision is a separate image backend issue." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "Different components and symptoms: patchtsmixer post_init validation vs Qwen3.5 num_labels propagation." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Both are model output quality problems, but they affect different models and code paths (SigLIP2 AutoModel/pipeline vs CamemBERT masked LM)." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Both are tokenizer-related, but one is about runtime special-token/post-processor updates and the other is LayoutLMv2 crashes with NER padding/truncation; different bugs." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "Both mention processor/model mismatch, but they concern different models and different failures (SigLIP2 doc example vs OmDet-Turbo image size expectations)." + }, + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "A deepspeed/zero3 BertModel initialization error is unrelated to a feature extraction failure in _torch_extract_fbank_features()." + }, + { + "left": "issue:43531", + "right": "issue:43645", + "accept": false, + "reason": "These are separate runtime/modeling issues: Qwen3-MoE sliding_window behavior vs custom model initialization in Jupyter notebooks." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Closest pair, but still not clearly the same bug: 45081 is an AttributeError in fix_mistral_regex loading Mistral tokenizers, while 45356 is a Kimi-K2.5 regression about codec handling and a misleading warning." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 39, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:40444", + "issue:43540", + "issue:43618", + "issue:43697", + "issue:43716", + "issue:43720", + "issue:43746", + "issue:43819", + "issue:43873", + "issue:44008", + "issue:44220", + "issue:44661", + "issue:44843", + "issue:45081", + "issue:45183", + "issue:45381", + "issue:45479" + ], + "soft_pairs": [ + "issue:45183|issue:45479", + "issue:43540|issue:44008", + "issue:43746|issue:45381", + "issue:44220|issue:44661", + "issue:39692|issue:43873", + "issue:43746|issue:43873", + "issue:40444|issue:43746", + "issue:43716|issue:44008", + "issue:43618|issue:43697", + "issue:44843|issue:45081", + "issue:43720|issue:43819" + ], + "prepared_review_unit_hash": "66755ac57e607b85b6f5b2ec9049c979adf494e688c1dced04d1d6a8c6a07120", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 14014, + "estimated_input_tokens": 3504, + "estimated_eval_tokens": 7264 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it groups distinct bugs across multimodal inference, tokenizer patching, loading/offloading, and model-specific forward-path issues. I do not see any true duplicates among the soft-edge pairs.", + "confidence": 0.91, + "canonical_issue_reason": "No single canonical issue fits the cluster: the reports target different models and different code paths, so they should not be merged into one issue.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43746 is the most self-contained and actionable report, but it is still not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45183", + "right": "issue:45479", + "accept": false, + "reason": "Different bugs: XOR input validation/error messaging vs degenerate zero loss in single-label classification." + }, + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Different models and failures: Qwen3OmniMoe video input processing vs Gemma3n audio-mask name collision." + }, + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "Unrelated code paths: PEFT local checkpoint loading vs Qwen2.5-VL video position_ids bug." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Audio feature extraction bug vs tokenizer-mapping add-new-model-like failure; no shared underlying defect." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "Both mention quantization, but one is a doc example/model-processor mismatch and the other is offloading behavior with quantization." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "Different failure modes: local checkpoint loading with adapters vs quantization/offloading mismatch." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "Multimodal finetuning with iterable datasets vs checkpoint loading; these are distinct bugs." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Different model-specific issues: image preprocessor dtype mismatch vs variable collision in Gemma3n forward." + }, + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "Different problems in different models: missing CLIP attentions assignment vs RTDetrV2 output divergence." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Same Mistral regex area, but one is offline-mode model_info access and the other is a backend_tokenizer attribute crash; separate bugs." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "Different underlying defects: packed-weight loading in BitNet vs DAC.from_latents mismatch with missing STE." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 40, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:43335", + "issue:43421", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43653", + "issue:43720", + "issue:43756", + "issue:43818", + "issue:44062", + "issue:44186", + "issue:44488", + "issue:44811", + "issue:44898", + "issue:45072", + "issue:45381" + ], + "soft_pairs": [ + "issue:43335|issue:43756", + "issue:43653|issue:43720", + "issue:44062|issue:44488", + "issue:30064|issue:36010", + "issue:43421|issue:44811", + "issue:44186|issue:44898", + "issue:43540|issue:45381", + "issue:43550|issue:45072", + "issue:43577|issue:43818" + ], + "prepared_review_unit_hash": "1e82bad5f347011117dbd3ccf7aa2f4d29ca320a95c4ae260eddc97cce8720ea", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13329, + "estimated_input_tokens": 3333, + "estimated_eval_tokens": 6922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated bug reports across different models and subsystems. The soft pairs share broad areas like tokenizers or video inputs, but not the same concrete root cause, so none should be merged as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "No single issue clearly subsumes the rest; the cluster is heterogeneous rather than one duplicate bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43653 is the clearest stand-alone bug report with direct user/CI impact and a narrow, actionable tokenizer failure, so it is the best representative issue in this mixed set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "Different model/config bugs: sparse-layer construction vs RoPE layer dropping; same general architecture space, not the same defect." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "Unrelated failure modes: BigBird special-token registration vs BitNet accelerate loading/unpacking." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "Both are tokenizer/load errors, but the titles point to different concrete bugs and models; not enough to treat as the same root cause." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Image processor segmentation-map handling is unrelated to the GenerationMixin import error." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Both involve special tokens, but one is runtime post-processor updates and the other is Whisper batch_decode skip_special_tokens behavior; different code paths." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and Perceiver interpolation failures are separate model/input bugs." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "Both concern video inputs, but they affect different Qwen model families and describe different positional/input bugs." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "torch.compile + SDPA failure in Bamba is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "BLIP2 dtype loading issue and Video-LLaVA temporal-attention/weight-sharing bug are distinct model-loading vs architecture problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 41, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43644", + "issue:43653", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:43927", + "issue:44448", + "issue:44561", + "issue:44671", + "issue:44877", + "issue:44977", + "issue:45020" + ], + "soft_pairs": [ + "issue:43550|issue:43927", + "issue:43643|issue:44877", + "issue:43782|issue:44977", + "issue:43577|issue:43582", + "issue:44561|issue:45020", + "issue:44448|issue:44671", + "issue:43531|issue:43643", + "issue:43644|issue:43749", + "issue:43653|issue:43819" + ], + "prepared_review_unit_hash": "947c20dd0d9426dc8650995e1dd2dc8c1d239d72f977053ad0656fffc60f3474", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12469, + "estimated_input_tokens": 3118, + "estimated_eval_tokens": 6492 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is loosely related by broad regression themes (remote code/config loading, dtype/loading, model-specific generation), but the paired issues do not appear to describe the same concrete bug. No soft edge is a safe duplicate merge.", + "confidence": 0.29, + "canonical_issue_reason": "issue:45020 is the broadest umbrella and the only open issue; it best matches the cluster\u2019s general theme of recent Transformers versions breaking remote_code models.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the most suitable representative because it is the most general report and can absorb the narrower remote_code-related regressions better than the model-specific issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "Different models and different failure modes: torch.compile+SDPA on Bamba vs save/load token-ID loss on DiaConfig." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Both involve config/remote_code loading, but one is missing fields from AutoConfig and the other is strict config blocking granite_speech; not the same bug." + }, + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and symptoms: Qwen3VL weight_only load error vs Qwen3.5 flash-attention generation issue." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "Unrelated regressions: BLIP2 dtype propagation vs Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "45020 is an umbrella remote_code regression report, but 44561 is a specific missing-function breakage; too broad to treat as the same underlying bug." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Both are output-quality regressions in v5, but they concern different models and likely different code paths." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to AutoConfig remote_code field loss." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Different subsystems and symptoms: non-persistent buffer initialization junk vs FSDP CPU RAM efficient loading failure." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Tokenizer special-token registration bug and DAC STE mismatch are separate model/component issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 42, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30064", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43550", + "issue:43650", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43906", + "issue:43994", + "issue:44186", + "issue:44442", + "issue:44805", + "issue:44936", + "issue:44991", + "issue:45081", + "issue:45406" + ], + "soft_pairs": [ + "issue:43825|issue:43994", + "issue:44805|issue:44936", + "issue:43550|issue:43819", + "issue:44186|issue:44442", + "issue:41720|issue:44991", + "issue:43825|issue:43827", + "issue:42175|issue:45081", + "issue:43650|issue:43906", + "issue:30064|issue:36331", + "issue:45081|issue:45406" + ], + "prepared_review_unit_hash": "f4f5610d3cc2060055e7349bc63f617fde5c8676696e951e70f5e0afc6ba7dc7", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13489, + "estimated_input_tokens": 3373, + "estimated_eval_tokens": 7002 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is noisy: the items span unrelated bugs in tokenizers, trainers, vision, serving, docs, and model-specific runtime failures. I don\u2019t see any true duplicate pairs to merge; the most actionable standalone report is the Mistral tokenizer crash issue.", + "confidence": 0.34, + "canonical_issue_reason": "Issue 45081 is the most concrete and actionable report in the set: it has a clear traceback, a narrow failure mode, and direct references to the affected tokenizer-loading path.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45081 is the best representative issue because it describes a specific, reproducible code-path failure rather than a broad symptom or a documentation-only mismatch.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "Both mention pipeline usage, but one is an error-message regression and the other is a model output correctness issue; different bugs and code paths." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Both are generic trainer failures, but the reported symptoms and likely root causes differ: mask shape mismatch vs post-train evaluate breakage." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "Different model-specific failures: torch.compile with SDPA on Bamba vs DAC latent reconstruction mismatch missing STE." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "Both are tokenizer-related, but they affect different tokenizers and failure modes: NER/batched padding crash vs AutoTokenizer loading failure." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated issues: Qwen3 auto device mapping CUDA assert vs tokenizer loading regression for a different model family." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "Related to v5 pipeline removals, but one is runtime behavior and the other is stale documentation; not the same underlying bug." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Package/backend installation issue versus a Mistral tokenizer attribute error; different subsystems and failure causes." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "No evidence of the same bug: one is an empty/nonsensical issue title, the other is an isolated reproduction of a separate referenced issue." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Vision image-processing segmentation-map failure and trainer compute_loss signature mismatch are unrelated." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Both are AttributeError crashes in tokenizer/processor loading, but they hit different objects and code paths (Mistral regex patch vs Gemma4Processor serve path)." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 43, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34634", + "issue:36010", + "issue:43262", + "issue:43582", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43782", + "issue:43819", + "issue:44060", + "issue:44368", + "issue:44485", + "issue:44509", + "issue:44610", + "issue:44671", + "issue:44857", + "issue:45072", + "issue:45325" + ], + "soft_pairs": [ + "issue:43606|issue:45072", + "issue:43782|issue:45325", + "issue:44610|issue:45072", + "issue:44485|issue:44509", + "issue:34634|issue:36010", + "issue:43720|issue:45072", + "issue:43653|issue:44671", + "issue:43582|issue:44857", + "issue:44060|issue:44368", + "issue:43262|issue:43819" + ], + "prepared_review_unit_hash": "48ed4151d2cecb7fd43373e4d07441a733da4f3bc2a4718a9e5037e047e39123", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13614, + "estimated_input_tokens": 3404, + "estimated_eval_tokens": 7064 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair appears to describe the same underlying bug or change; all candidate duplicates are distinct model- or subsystem-specific issues, so no merge-worthy canonical item stands out.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "Different bugs in different areas: BarkProcessor CPU offload/device mismatch vs general dtype mismatch in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen-VL related, but one is a from_pretrained weight_only loading error and the other is a rope/index scaling bug in get_rope_index; not the same code path." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "OmDet processor input-size mismatch is unrelated to bfloat16 inference dtype mismatches." + }, + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "One concerns GLM-5 RoPE implementation; the other is a docs cleanup for removed pipeline tasks." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice_preset and a transformers GenerationMixin import error are unrelated failures." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "BitNet packed-weight loading during accelerate is a different bug than dtype mismatches in inference." + }, + { + "left": "issue:43653", + "right": "issue:44671", + "accept": false, + "reason": "Tokenizer special-token registration vs CamemBERT masked-LM prediction correctness are separate issues." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "Apple Silicon warmup TypeError and CUDA float16 loss crash are different platform-specific failures." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Both mention tied-weights warnings, but they target different Qwen variants and different warning behavior; not the same concrete bug." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio processor chat-template sampling-rate defaulting and DAC latent reconstruction mismatch are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 44, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34634", + "issue:36331", + "issue:43452", + "issue:43454", + "issue:43526", + "issue:43577", + "issue:43638", + "issue:43653", + "issue:43697", + "issue:43749", + "issue:44291", + "issue:44554", + "issue:44568", + "issue:44912", + "issue:44936", + "issue:44991", + "issue:45488" + ], + "soft_pairs": [ + "issue:43749|issue:44291", + "issue:43526|issue:43697", + "issue:43526|issue:44554", + "issue:43638|issue:44936", + "issue:43653|issue:44912", + "issue:44568|issue:45488", + "issue:43454|issue:43577", + "issue:43452|issue:44991", + "issue:34634|issue:36331" + ], + "prepared_review_unit_hash": "8dee1c188b77feea4e287257927951851dbf052cce79880b29900d462d0688f8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12795, + "estimated_input_tokens": 3199, + "estimated_eval_tokens": 6654 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is a grab bag of unrelated bugs across tokenizers, trainers, model loading, quantization, and vision models. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR pair, so all soft edges should be rejected.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading-related, but one is about FSDP_CPU_RAM_EFFICIENT_LOADING and the other about init_empty_weights/_is_hf_initialized; different code paths and failure modes." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and RTDetrV2 output differences are separate vision bugs in different components with no shared concrete defect." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "A preprocessing label bug and an MPS attention correctness issue are unrelated subsystems and cannot be the same bug." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "DeepSpeed ZeRO-3 index error during training with a non-pretrained Bert model is unrelated to trainer.evaluate() failing after trainer.train()." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "BigBirdTokenizer special-token registration/empty decode and MXFP4 quantization fallback to bf16 are different defects in different layers." + }, + { + "left": "issue:44568", + "right": "issue:45488", + "accept": false, + "reason": "Both concern tokenizers, but one is mdeberta-v3 add_special_tokens behavior and the other is LlamaTokenizer overriding tokenizer.json pre-tokenizer; not the same concrete bug." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "AyaVision weight-tying failure and BLIP2 dtype propagation are distinct model-loading/model-setup issues." + }, + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "gguf_file breakage for AutoTokenizer/AutoModel loading and EMBEDDIA/est-roberta tokenizer loading in v5 are separate compatibility problems, not one duplicate fix." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "BarkProcessor voice_preset failure and CustomTrainer.compute_loss unexpected kwarg are entirely unrelated bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 45, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:38175", + "issue:39692", + "issue:43257", + "issue:43329", + "issue:43645", + "issue:43696", + "issue:43746", + "issue:43854", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44811", + "issue:44912", + "issue:44977", + "issue:45042", + "issue:45356" + ], + "soft_pairs": [ + "issue:43746|issue:44811", + "issue:44589|issue:45042", + "issue:43257|issue:43329", + "issue:43854|issue:44977", + "issue:38175|issue:44610", + "issue:36010|issue:43645", + "issue:43257|issue:43696", + "issue:44568|issue:45356", + "issue:39692|issue:44912" + ], + "prepared_review_unit_hash": "5faa610dc76784bb629b324746edd0f4395d72bee462453215ffe8c29a361fa0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12712, + "estimated_input_tokens": 3178, + "estimated_eval_tokens": 6612 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous and does not look like a true duplicate set. The soft-edge pairs are all distinct bugs or feature regressions, so they should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue fits this cluster: it mixes unrelated reports across imports, tokenizers, model loading, quantization, processors, and documentation examples.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue exists here. If forced, issue 45042 is the broadest/highest-impact processor bug, but it still does not represent the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43746", + "right": "issue:44811", + "accept": false, + "reason": "Different code paths and symptoms: GraniteSpeech local checkpoint loading with PEFT adapters vs Whisper processor.batch_decode skip_special_tokens handling." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "Unrelated failures: Float8 storage lookup error vs PIL backend image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43257", + "right": "issue:43329", + "accept": false, + "reason": "Different underlying bugs: Qwen3 MOE weight conversion under accelerate/deepspeed vs undefined variables in multimodal video token counting." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "Both involve model loading/inference, but they concern different models and different failure modes: unit-test loading vs flash-attention generation breakage." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "Both are image-model issues, but one is zero probabilities on SigLIP2 and the other is a processor/model input-size mismatch for OmDet-Turbo." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "Distinct regressions: missing GenerationMixin import vs custom-model initialization in Jupyter notebooks on Transformers 5.0." + }, + { + "left": "issue:43257", + "right": "issue:43696", + "accept": false, + "reason": "Accelerate/deepspeed weight conversion bug is unrelated to an OOM when running GPT-oss-20b." + }, + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-related, but they affect different models and different issues: missing BOS/EOS addition vs codec handling and warning behavior." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "Different models and different problems: SigLIP2 docs/model-processor mismatch and quantization failure vs git-oss-20b MXFP4 quantization fallback to bf16." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 46, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:36331", + "issue:42175", + "issue:43262", + "issue:43329", + "issue:43531", + "issue:43550", + "issue:43720", + "issue:43749", + "issue:43819", + "issue:43927", + "issue:44479", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44912", + "issue:44938", + "issue:45042" + ], + "soft_pairs": [ + "issue:43819|issue:43927", + "issue:42175|issue:44938", + "issue:43720|issue:44610", + "issue:43550|issue:44912", + "issue:43262|issue:44811", + "issue:43749|issue:44756", + "issue:43531|issue:45042", + "issue:43329|issue:44479", + "issue:36010|issue:36331" + ], + "prepared_review_unit_hash": "b0bad7f846b1ff7788b65051c7f8e198c2b3d181680883052dcc8275b32beae3", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13199, + "estimated_input_tokens": 3300, + "estimated_eval_tokens": 6856 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These look like a bundle of unrelated Hugging Face/transformers issues rather than duplicates. The soft pairs share broad themes at most (audio, video, loading, quantization), but each reports a different concrete symptom, code path, or model-specific failure.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43819", + "right": "issue:43927", + "accept": false, + "reason": "Different bugs: DAC.from_latents forward mismatch vs DiaConfig custom token IDs being dropped on save/load and causing generation IndexError." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "One is missing TensorFlow in a torch extra install; the other is a Python 3.14 load/import failure. Different environments and failure modes." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "BitNet packed-weight loading under accelerate is unrelated to OmDet-Turbo processor output size mismatch (640px vs 224px)." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "Both are model/runtime inference problems, but they affect different models and different mechanisms: torch.compile+SDPA vs MXFP4 quantization fallback." + }, + { + "left": "issue:43262", + "right": "issue:44811", + "accept": false, + "reason": "Both involve audio processors, but the concrete bugs differ: chat template sampling-rate default vs batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "FSDP CPU RAM-efficient loading is a training/loading pipeline bug; disabling mmap on Strix Halo is a hardware-specific OOM workaround. Not the same issue." + }, + { + "left": "issue:43531", + "right": "issue:45042", + "accept": false, + "reason": "Completely different subsystems and symptoms: Qwen3-MoE sliding window behavior vs PIL image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43329", + "right": "issue:44479", + "accept": false, + "reason": "Both mention video/VLM paths, but one is an undefined-function bug in multimodal token counting, while the other is a broader video-input regression for several models." + }, + { + "left": "issue:36010", + "right": "issue:36331", + "accept": false, + "reason": "Unrelated API regressions: GenerationMixin import path vs CustomTrainer.compute_loss signature change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 47, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:43257", + "issue:43329", + "issue:43450", + "issue:43452", + "issue:43540", + "issue:43577", + "issue:43582", + "issue:43697", + "issue:43756", + "issue:43931", + "issue:44291", + "issue:44479", + "issue:45042", + "issue:45072", + "issue:45357", + "issue:45381" + ], + "soft_pairs": [ + "issue:43257|issue:43756", + "issue:43257|issue:43931", + "issue:43577|issue:45072", + "issue:41720|issue:43540", + "issue:43452|issue:43582", + "issue:43329|issue:43450", + "issue:43697|issue:45357", + "issue:44291|issue:45042", + "issue:44479|issue:45381" + ], + "prepared_review_unit_hash": "af5bd09ab418c408e4fc09d4b9d62de9e2b054e6876f0300e1139addc464a816", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12769, + "estimated_input_tokens": 3193, + "estimated_eval_tokens": 6642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous set of unrelated model-loading and multimodal bugs. The only strong duplicate-like relationship is the Qwen video regression pair: issue 44479 and issue 45381 both describe the same video-input / vision_position_ids regression in the Qwen VL family.", + "confidence": 0.89, + "canonical_issue_reason": "issue 44479 is the broadest and most representative issue in the only apparent duplicate family, covering the Qwen VL video-input regression across multiple affected models.", + "canonical_pr_reason": null, + "best_issue_reason": "issue 44479 is the best overall issue candidate because it describes the widest impact, the clearest regression, and subsumes the more specific Qwen2.5-VL video bug report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43257", + "right": "issue:43756", + "accept": false, + "reason": "Different failures and code paths: Qwen3 MOE weight conversion under accelerate/deepspeed vs Smollm3 RoPE-layer dropping." + }, + { + "left": "issue:43257", + "right": "issue:43931", + "accept": false, + "reason": "Both involve Qwen3 model loading, but one is MOE weight conversion and the other is visual weight-shape mismatch; not the same bug." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype mismatches, but they affect different architectures and likely different conversion/inference paths." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert is unrelated to the Qwen3OmniMoe video-input ValueError." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "Completely different subsystems: GGUF tokenizer/model loading vs Apple Silicon caching allocator warmup." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Both are video-related, but one is a missing helper/merge-size bug in token counting and the other is a batched-shape bug in processors." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "RTDetrV2 output drift in v5 is unrelated to Qwen3.5 save_pretrained visual encoder key serialization." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "init_empty_weights/_is_hf_initialized TypeError is unrelated to PIL backend processors requiring torchvision." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Both describe the same Qwen VL video-input regression, centered on incorrect vision_position_ids for video processing; 45381 looks like a specific instance of the broader 44479 bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 48, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33357", + "issue:34567", + "issue:42175", + "issue:42915", + "issue:43526", + "issue:43540", + "issue:43742", + "issue:43824", + "issue:43927", + "issue:44062", + "issue:44479", + "issue:44488", + "issue:44560", + "issue:44821", + "issue:44912", + "issue:44991", + "issue:45479" + ], + "soft_pairs": [ + "issue:42175|issue:43824", + "issue:44062|issue:44991", + "issue:42915|issue:44821", + "issue:43540|issue:44560", + "issue:42175|issue:44488", + "issue:42915|issue:43742", + "issue:43927|issue:44912", + "issue:43540|issue:44479", + "issue:43526|issue:45479", + "issue:33357|issue:34567" + ], + "prepared_review_unit_hash": "bfc89bc5a278abd3ef614f331fbd46ae2e254388acddf4b7456faa65453b90f0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12829, + "estimated_input_tokens": 3208, + "estimated_eval_tokens": 6672 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No convincing duplicate cluster here: the issues span unrelated subsystems (tokenizers, multimodal/video, quantization, training state, model loading). All soft pairs should be rejected.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the most central user-facing report in this set by discussion and inbound references, but it is not a duplicate anchor for the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Different failures: missing TensorFlow backend on pip install vs an import error for a specific Qwen2.5-VL class." + }, + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "Different root causes: AddedToken special-arg conflict vs tokenizer loading failure for a specific model." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "Unrelated areas: FP8 quantization failure for Qwen3Moe vs loading AutoImageProcessor from a URL." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Both are video-related, but they concern different Qwen model families and different concrete errors; not enough to treat as the same bug." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "Different issue classes: backend dependency installation vs loading a particular model checkpoint." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Different code paths: quantization config handling vs key error while loading MobileLLM-125M." + }, + { + "left": "issue:43927", + "right": "issue:44912", + "accept": false, + "reason": "Config save/load custom-token-ID corruption is unrelated to MXFP4 quantization fallback behavior." + }, + { + "left": "issue:43540", + "right": "issue:44479", + "accept": false, + "reason": "Both mention video input, but one is a Qwen3OmniMoe processing error while the other is a broader regression across several Qwen VL variants; not the same concrete bug." + }, + { + "left": "issue:43526", + "right": "issue:45479", + "accept": false, + "reason": "Different semantics: BeitImageProcessorFast label reduction bug vs single-label classification with num_labels=1 causing zero loss." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "Completely unrelated: MacOS bus error with CLIP model vs TrainerState token counter not updating." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 49, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:34689", + "issue:36010", + "issue:38175", + "issue:42175", + "issue:43582", + "issue:43646", + "issue:43742", + "issue:43749", + "issue:43782", + "issue:43906", + "issue:44291", + "issue:44484", + "issue:44704", + "issue:44898", + "issue:45137", + "issue:45381", + "issue:45405" + ], + "soft_pairs": [ + "issue:43906|issue:45381", + "issue:45137|issue:45405", + "issue:44484|issue:44704", + "issue:36010|issue:42175", + "issue:38175|issue:44898", + "issue:43582|issue:43782", + "issue:43742|issue:44291", + "issue:43646|issue:43749", + "issue:28282|issue:34689" + ], + "prepared_review_unit_hash": "3d0a310e920f6902682fa43dc12d21df5e7df93fc199e6ca05c11cb79cf59fc0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13089, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues span unrelated bugs/regressions in loading, generation, vision models, backend packaging, and distributed/runtime paths. None of the soft pairs look like the same underlying defect or change.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 is the strongest standalone report: detailed repro, active discussion, inbound references, and a concrete broken code path (FSDP CPU RAM efficient loading).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "Different problems: one is an isolated reproduction of an earlier issue, the other is a qwen2.5-vl video vision_position_ids bug. No shared code path or concrete fix overlap is evident." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "Unrelated: DeepSpeed ZeRO3 deque IndexError versus a PEFT version bump/package release issue. Different layers and remedies." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "Both mention model-loading utilities, but one is about max_shard_size behavior in save_pretrained while the other is cached_file kwargs propagation in AutoProcessor.from_pretrained. Not the same bug." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "One is a GenerationMixin import error; the other is missing TensorFlow support in a torch-only install. Different failure modes and code paths." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "Both affect vision models, but the symptoms differ: zero probabilities in SigLIP2 versus Perceiver interpolation failure at non-default resolution. Not the same underlying defect." + }, + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Apple Silicon caching_allocator_warmup TypeError is a runtime/backend issue; Qwen3VL weight_only=True loading error is a model-loading problem. No plausible single fix." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading-time errors, but one is a KeyError for facebook/MobileLLM-125M and the other is an unexpected _is_hf_initialized argument with init_empty_weights in 5.0.0rc0. Different root causes." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Custom model initialization breakage in 5.0.0 is distinct from FSDP_CPU_RAM_EFFICIENT_LOADING regression. They affect different initialization/loading mechanisms." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Missing PyTorch ImportError and Llama 3.2 Vision loading regression are separate issues with different environments and code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 50, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:34567", + "issue:36331", + "issue:43452", + "issue:43493", + "issue:43643", + "issue:43688", + "issue:43749", + "issue:43756", + "issue:43994", + "issue:44062", + "issue:44291", + "issue:44488", + "issue:44821", + "issue:45081", + "issue:45325", + "issue:45375" + ], + "soft_pairs": [ + "issue:43643|issue:45375", + "issue:43756|issue:45325", + "issue:44291|issue:44821", + "issue:43493|issue:43994", + "issue:44062|issue:45081", + "issue:28282|issue:29127", + "issue:43452|issue:44488", + "issue:34567|issue:36331", + "issue:43688|issue:43749" + ], + "prepared_review_unit_hash": "1f62a6241e8dd9ad18b6ec9291599447695c0248ec88da37c507f0bf9d68de29", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13364, + "estimated_input_tokens": 3341, + "estimated_eval_tokens": 6938 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated standalone issues. The only likely duplicate pair is the SigLIP2 implementation mismatch reports; the rest differ by model, code path, or failure mode.", + "confidence": 0.74, + "canonical_issue_reason": "Issue #43493 is the best canonical issue: it describes the underlying SigLIP2 HF-vs-JAX implementation discrepancy, which also explains the user-facing bad outputs reported in #43994.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43493 is the strongest representative for the cluster because it states the concrete root-cause-level bug rather than only the downstream symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both involve missing config fields, but they affect different models and code paths; not the same concrete bug." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Both are RoPE-related, but they describe different model-specific logic errors and are not the same fix." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "One is an unexpected argument during model loading, the other is an AutoImageProcessor URL loading issue; different failures." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Both point to the same SigLIP2 implementation problem causing incorrect outputs; #43994 looks like the user-visible symptom of the discrepancy in #43493." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "These are unrelated tokenizer/parser failures with different exceptions and code paths." + }, + { + "left": "issue:28282", + "right": "issue:29127", + "accept": false, + "reason": "Different subsystems and symptoms: missing PyTorch import versus LayoutLMv3 error handling." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "Both are loading-related, but the titles indicate different model/load paths and no clear shared concrete bug." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "TrainerState token accounting and custom compute_loss signature are separate trainer bugs." + }, + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Auxiliary-loss normalization and FSDP CPU RAM efficient loading are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 51, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43452", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43697", + "issue:43742", + "issue:44514", + "issue:44561", + "issue:44589", + "issue:44792", + "issue:44821", + "issue:44964", + "issue:45042", + "issue:45081", + "issue:45216", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:44792|issue:44964", + "issue:43742|issue:44589", + "issue:43531|issue:44821", + "issue:44514|issue:45290", + "issue:45042|issue:45081", + "issue:43452|issue:43577", + "issue:43643|issue:44561", + "issue:44514|issue:45381", + "issue:43697|issue:45216" + ], + "prepared_review_unit_hash": "85cb7bcd886d44375c2f18f0ffd66dbde6ddab6879f4dc8ab0799e283368155b", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12662, + "estimated_input_tokens": 3166, + "estimated_eval_tokens": 6588 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues cover unrelated regressions across loading, tokenization, chat templating, vision/image processing, model dtype, and backend compatibility. No soft pair looks like the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": "No canonical issue fits well because the cluster is heterogeneous; the reported bugs do not share one underlying root cause.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44821 is the broadest, most user-facing loading problem and is the least misleading representative, but it is still not a true cluster canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Different problems: a janus test failure versus a Phi-4 multimodal loading failure; same general area, not the same bug." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "Both are loading/runtime errors, but one is a key error for a specific model and the other is a Float8 storage lookup failure; different failure modes and causes." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to AutoImageProcessor-from-URL loading." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": false, + "reason": "Both involve chat/template processing, but one crashes on batched padding=False input and the other on assistant tool-call messages with no content; distinct code paths." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "Image processor torchvision dependency issue versus Mistral tokenizer regex patch crash; different components and bugs." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "GGUF AutoTokenizer/AutoModel loading breakage is unrelated to BLIP2 dtype propagation remaining float32." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Different trust_remote_code-related regressions: missing fields in AutoConfig return object versus removal of is_torch_fx_available breaking remote-code models." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "Both touch Qwen2.5-VL chat/vision flows, but one is a processor chat-template crash and the other is incorrect vision_position_ids in video inputs; not the same bug." + }, + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Different regressions: RTDetrV2 output divergence in v5 versus Qwen3.5 save_pretrained checkpoint correctness." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 52, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41762", + "issue:43295", + "issue:43452", + "issue:43653", + "issue:43688", + "issue:43720", + "issue:43819", + "issue:44464", + "issue:44568", + "issue:44625", + "issue:44743", + "issue:44811", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44936", + "issue:45356", + "issue:45440" + ], + "soft_pairs": [ + "issue:41762|issue:44936", + "issue:43452|issue:44843", + "issue:43688|issue:45440", + "issue:43653|issue:44568", + "issue:43720|issue:44898", + "issue:43819|issue:44811", + "issue:44625|issue:44877", + "issue:43295|issue:45356", + "issue:44464|issue:44743" + ], + "prepared_review_unit_hash": "2621b45f84bcfba5a272f59411c2c3a32885d398320be74ce4a9b0eeb047b388", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13368, + "estimated_input_tokens": 3342, + "estimated_eval_tokens": 6940 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues span unrelated bugs across tokenizers, configs, loading, generation, and model-specific behavior. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue stands out; the items are largely unrelated, so picking one as the duplicate anchor would be arbitrary.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44843 is the best representative by triage value: it is open, narrowly scoped, and has a clear failure path (`AutoTokenizer.from_pretrained` unconditionally calling `model_info()` in `_patch_mistral_regex`, breaking offline mode).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41762", + "right": "issue:44936", + "accept": false, + "reason": "Different failures: Gemma3 + DeepSpeed ZeRO-3 load-time IndexError vs trainer.evaluate() failing after training. No shared code path or concrete duplicate symptom." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "Both involve tokenizer/loading, but the bugs differ: gguf_file breaks AutoTokenizer/AutoModel loading, while 44843 is an offline-mode regression from an unconditional `model_info()` call." + }, + { + "left": "issue:43688", + "right": "issue:45440", + "accept": false, + "reason": "Unrelated model bugs: auxiliary-loss normalization in OLMoE/GPT Oss vs native DeepseekV3MoE diverging from remote implementation." + }, + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer/special-token regressions, but the concrete bugs differ: BigBird mask token decode issue vs mdeberta-v3 BOS/EOS not added with `add_special_tokens=True`." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "Different subsystems and symptoms: BitNet packed-weight loading/unpacking vs Perceiver image classification failing at non-default resolution." + }, + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "Distinct model behaviors: DAC `from_latents` mismatch due to missing STE vs Whisper processor `batch_decode()` ignoring `skip_special_tokens`." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Both concern config handling, but one is Qwen3.5 `num_labels` propagation and the other is strict config blocking `granite_speech` loading; not the same bug." + }, + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "Both are regressions, but they affect different tokenizer/processor paths: processor.tokenizer image handling vs Kimi-K2.5 codec handling and a misleading warning." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "Both touch generation/cache behavior, but one is chunked generation with compiled forward while the other is recurrent states resetting in modular_qwen3_5.py; not mergeable as one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 53, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:24643", + "issue:29127", + "issue:38175", + "issue:40444", + "issue:41720", + "issue:43295", + "issue:43525", + "issue:43577", + "issue:43643", + "issue:43697", + "issue:43906", + "issue:44361", + "issue:44442", + "issue:44466", + "issue:44811", + "issue:44843", + "issue:44991", + "issue:45005" + ], + "soft_pairs": [ + "issue:40444|issue:44811", + "issue:44466|issue:45005", + "issue:41720|issue:43906", + "issue:24643|issue:29127", + "issue:43525|issue:43643", + "issue:44843|issue:44991", + "issue:44361|issue:44442", + "issue:43577|issue:43697", + "issue:38175|issue:43295" + ], + "prepared_review_unit_hash": "506103b25e88b2900291cb390e0194966a7a5a6e8820aca9a797a0f33a6dbde1", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13285, + "estimated_input_tokens": 3322, + "estimated_eval_tokens": 6900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No convincing duplicate cluster emerged: the paired issues mostly share broad vocabulary (tokenizers, tied weights, v5 regressions, model-specific failures) but describe different code paths and symptoms.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45005 is the best umbrella-style issue here because it is the broadest current report around tied-weight regressions in v5 and could plausibly absorb related follow-ups, even though the other issues are not confirmed duplicates of it.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40444", + "right": "issue:44811", + "accept": false, + "reason": "Different models and failures: Qwen2.5-VL multi-image iterable dataset handling vs Whisper processor.batch_decode skip_special_tokens behavior." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": false, + "reason": "Related theme but not the same bug: 44466 is device-dependent lm_head.weight serialization, while 45005 is a broader tied-weights issue on translation models." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "Qwen3 auto device-mapping cudaErrorAssert on A800 is not the same as an isolated reproduction of issue 38071; no concrete shared bug is evident." + }, + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "DeepSpeed 2-D weight runtime error and LayoutLMv3 box/clarity error are unrelated symptoms in different workflows." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config is a specific model/config bug, not the same as generic missing fields from trust_remote_code AutoConfig loading." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "HF_HUB_OFFLINE breakage from unconditional model_info() is a different failure mode than tokenizer loading failure for EMBEDDIA/est-roberta." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "MLukeTokenizer AttributeError and FastSpeech2ConformerTokenizer load failure involve different tokenizers and code paths." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "BLIP2 dtype propagation remaining float32 is unrelated to RTDetrV2 output differences in Transformers v5." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "Siglip2 zero probabilities and processor.tokenizer regression are different model families and different symptoms; not the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 54, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:35141", + "issue:43122", + "issue:43531", + "issue:43653", + "issue:43742", + "issue:43906", + "issue:44361", + "issue:44464", + "issue:44479", + "issue:44936", + "issue:44977", + "issue:45356" + ], + "soft_pairs": [ + "issue:43653|issue:44361", + "issue:44479|issue:44936", + "issue:30064|issue:33666", + "issue:44977|issue:45356", + "issue:43122|issue:43906", + "issue:28282|issue:33357", + "issue:28282|issue:32090", + "issue:28282|issue:35141", + "issue:44464|issue:44977", + "issue:43531|issue:43742" + ], + "prepared_review_unit_hash": "6fcb2d3c069c00b551f0f309dd8f854953d4e8bd36c6234c1999b245472cdd24", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12812, + "estimated_input_tokens": 3203, + "estimated_eval_tokens": 6662 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated and span different bugs in tokenizers, training, generation, vision preprocessing, and model loading. I do not see a true duplicate cluster here, so all soft-edge pairs are rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue stands out because the set is heterogeneous and the soft-similarity links do not point to the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for duplicate-canonicalization: each item describes a distinct problem in a different code path or model family.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "Both are tokenizer-related, but one is a BigBird special-token registration/decode bug and the other is an MLuke task-time AttributeError; different failures and code paths." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Video-input regression for specific Qwen models is unrelated to trainer.evaluate failing after train; one is a model/input bug, the other is a trainer lifecycle issue." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Image processor void-segmentation handling and Qwen2-VL multi-GPU training are different subsystems with different symptoms." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Flash-attention generation failure for Qwen3.5 is not the same as a Kimi-K2.5 tokenizer codec regression and warning mismatch." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "The first reports tokenizer output changing across versions; the second is only an isolated reproduction of another issue, with no clear evidence it is the same bug." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "PyTorch-missing ImportError and a MacOS bus error on a CLIP model are unrelated failure modes." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "Missing PyTorch dependency vs. NoneType broadcast error in Trainer are distinct problems." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "ImportError on AutoModel has nothing to do with token embedding resizing reinitializing output embeddings." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Chunked-generation inconsistency with compiled forward is a different generation-path bug than Qwen3.5 flash-attention output issues." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to a key error when loading MobileLLM-125M." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 55, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:43012", + "issue:43329", + "issue:43452", + "issue:43504", + "issue:43550", + "issue:43577", + "issue:43653", + "issue:43720", + "issue:43927", + "issue:44361", + "issue:44451", + "issue:44464", + "issue:44479", + "issue:44554", + "issue:44625", + "issue:45325" + ], + "soft_pairs": [ + "issue:43577|issue:44554", + "issue:43653|issue:43927", + "issue:43329|issue:45325", + "issue:43452|issue:44451", + "issue:43012|issue:43577", + "issue:41720|issue:44464", + "issue:44479|issue:44625", + "issue:43504|issue:43720", + "issue:43550|issue:44361" + ], + "prepared_review_unit_hash": "99ff388930faffba5b69383ee24aab16e54c2f358bdbdb8941d7b0a78bdf3bfa", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12750, + "estimated_input_tokens": 3188, + "estimated_eval_tokens": 6632 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated bugs in Qwen/VL, tokenizers, config loading, dtype handling, compilation, and specific model loaders. I do not see a valid duplicate set or a single canonical issue/PR.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "Both mention dtype/precision, but one is a BLIP2 loading mismatch and the other is an MPS attention correctness bug; different models, code paths, and failure modes." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "Tokenizer special-token decoding vs DiaConfig save/load token-ID loss; both are token-related but the concrete bugs are unrelated." + }, + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Both are multimodal/VL issues, but one is an undefined-variable bug in multimodal token counting and the other is a rope-index scaling regression; different functions and symptoms." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "Both are loading problems, but gguf_file/pretrained-loading breakage is unrelated to ScandiBERT model loading failure." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "These are both dtype-adjacent, but one is a PyTorch compile warning and the other is a model-loading bug leaving weights in float32; not the same underlying issue." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert vs chunked generation inconsistency under compiled forward are distinct runtime paths and bug classes." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "Both involve Qwen3.5 naming, but one is a video-input regression and the other is num_labels propagation in config; separate features and failures." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Legacy-field BEiT loading issue vs BitNet packed-weight unpacking during accelerate loading are different model-loading defects." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 and MLukeTokenizer AttributeError are unrelated subsystems and symptoms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 56, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:32090", + "issue:33666", + "issue:34567", + "issue:43334", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43716", + "issue:43742", + "issue:44610", + "issue:44821", + "issue:44849", + "issue:45200", + "issue:45229", + "issue:45357" + ], + "soft_pairs": [ + "issue:44849|issue:45357", + "issue:43504|issue:43550", + "issue:45200|issue:45229", + "issue:43334|issue:43572", + "issue:43742|issue:44821", + "issue:43540|issue:43716", + "issue:32090|issue:34567", + "issue:32090|issue:33666", + "issue:43504|issue:44610", + "issue:43525|issue:43572" + ], + "prepared_review_unit_hash": "edf989bad2c3ef8633653c096c871df34a1cd2b8c4b93cabc31f15c3a8ad9260", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12728, + "estimated_input_tokens": 3182, + "estimated_eval_tokens": 6620 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a true duplicate cluster; they span unrelated model/config/processor/training bugs. All soft edges are superficial thematic matches and should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single issue is a convincing canonical duplicate target because the reports cover different models and different failure modes.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative here; the set is too heterogeneous for one issue to stand in as a canonical duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen3.5-related, but one is about output_hidden_states behavior and the other is about save_pretrained visual encoder keys; different bugs and different code paths." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated problems: BEiT pretrained loading with a legacy field versus Bamba torch.compile/SDPA failure. Same broad ML area only." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "Gemma 4 text-only fine-tuning with missing mm_token_type_ids is unrelated to a multi-GPU inference CUDA OOM." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Both mention missing pad-token config attributes, but they affect different models and fields (pad_token_id vs pad_token_idx) with no evidence of the same underlying bug." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "A MobileLLM load key error and an AutoImageProcessor-from-URL loading issue are different failure modes in different loading paths." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "Different models and modalities: Qwen3OmniMoe video processing versus Mistral-3 image preprocessor dtype mismatch." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "Trainer broadcast TypeError and TrainerState num_input_tokens_seen not updating are separate trainer issues with different symptoms and fixes." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "One is a specific Trainer _gpu_broadcast_one TypeError; the other is a broad Qwen2-VL multi-GPU training request/problem statement, not the same bug." + }, + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "Both involve model/preprocessor loading, but the concrete bugs differ: legacy field handling in BEiT versus an input-size mismatch in OmDet-Turbo." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Similar pad-token attribute theme, but different models and different missing fields; not enough to conclude the same bug or a mergeable shared fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 57, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33666", + "issue:36010", + "issue:43262", + "issue:43278", + "issue:43493", + "issue:43606", + "issue:43688", + "issue:43720", + "issue:43756", + "issue:43825", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:44488", + "issue:44843", + "issue:44913", + "issue:45084" + ], + "soft_pairs": [ + "issue:43278|issue:43688", + "issue:43262|issue:45084", + "issue:43756|issue:44913", + "issue:43493|issue:43825", + "issue:33666|issue:36010", + "issue:43688|issue:43873", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:44488|issue:44843", + "issue:43278|issue:43844" + ], + "prepared_review_unit_hash": "deba43ccfa4c370efd4a68724a734b58b4610211405642a76baf195d5e2191a0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12901, + "estimated_input_tokens": 3226, + "estimated_eval_tokens": 6708 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated bug reports that share only broad subsystem vocabulary (training, loading, offload, configs). I do not see any true duplicate pairs among the soft candidates.", + "confidence": 0.91, + "canonical_issue_reason": "No single canonical issue fits this set: the reports cover different bugs, different code paths, and different products/models.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative because the cluster is not a duplicate set; at best, one could pick a broad open report, but it would not summarize the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "Both mention training behavior and numerics, but one is a BF16/FP32 dtype mismatch in evaluation and the other is auxiliary-loss normalization in MoE models; different bugs and code paths." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "One is an audio processor sampling-rate default issue, the other is a compile-time template-node TypeError; no shared underlying bug." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Both involve rotary-related configuration, but one is about Smollm3 dropping RoPE layers and the other is config reload losing rotary_pct; distinct failure modes." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "SigLIP2 implementation discrepancy and pipeline translation-task messaging are unrelated; same broad quality umbrella only." + }, + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Multi-GPU training for Qwen2-VL and a GenerationMixin import error are completely separate issues." + }, + { + "left": "issue:43688", + "right": "issue:43873", + "accept": false, + "reason": "Both involve offloading/quantization-adjacent behavior, but one is about auxiliary-loss normalization and the other about CPU offload with quantization; not the same bug." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "BitNet accelerate-loading with packed weights and DiaConfig custom token IDs loss are different model/config-loading problems." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU-offload device mismatch for bark-small and DiaConfig token-ID persistence are unrelated failures." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Both concern model/tokenizer loading, but one is a specific model failing to load and the other is HF_HUB_OFFLINE being broken by an unconditional model_info call; different root causes." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "Both reference training instability, but one is a dtype change between train/eval and the other is gradient growth with ZeRO-3; not the same concrete defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 58, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33666", + "issue:36331", + "issue:39692", + "issue:43257", + "issue:43299", + "issue:43329", + "issue:43493", + "issue:43540", + "issue:43606", + "issue:43720", + "issue:43756", + "issue:43844", + "issue:44060", + "issue:44361", + "issue:44964", + "issue:44977", + "issue:45405" + ], + "soft_pairs": [ + "issue:43540|issue:44977", + "issue:43720|issue:44361", + "issue:43257|issue:43299", + "issue:43329|issue:43606", + "issue:39692|issue:43493", + "issue:33666|issue:36331", + "issue:43756|issue:44060", + "issue:44964|issue:45405", + "issue:43493|issue:43844" + ], + "prepared_review_unit_hash": "c85d51fb7c7a97f58c370989ed6c436b60985277c26234d0b872595bcf64614a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12666, + "estimated_input_tokens": 3167, + "estimated_eval_tokens": 6590 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change; most are broad-family false positives with different models, code paths, or scopes. There are no PRs in the set. I picked the clearest concrete regression issue as the best anchor issue.", + "confidence": 0.66, + "canonical_issue_reason": "Issue 43299 is the clearest, most actionable regression report: it names a specific broken version boundary (5.0.0.dev0) and a concrete loading failure path for Qwen3VL MoE models.", + "canonical_pr_reason": null, + "best_issue_reason": "43299 is the strongest standalone issue because it is precise, reproducible, and regression-oriented; among these unrelated reports it is the best representative anchor.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Both are Qwen-related, but one is a video-input ValueError in Qwen3OmniMoe and the other is a flash-attention generation bug in Qwen3.5; different models and code paths." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "Packed-weight loading in BitNet/accelerate is unrelated to a tokenizer AttributeError in MLukeTokenizer." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3 MoE loading, but one is an accelerate+deepspeed conversion issue and the other is a transformers 5.0.0.dev0 regression for Qwen3VL MoE; different failure modes." + }, + { + "left": "issue:43329", + "right": "issue:43606", + "accept": false, + "reason": "A multimodal token-counting video-branch bug is unrelated to a Bark CPU offload device-mismatch CI failure." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "Same SigLIP2 family, but one is documentation example errors and the other is an implementation discrepancy versus JAX; not the same bug." + }, + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Multi-GPU training support for Qwen2-VL is unrelated to a CustomTrainer compute_loss signature incompatibility." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "Smollm3 RoPE-layer behavior and Qwen3-Next tied-weights warnings affect different models and different mechanisms." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "A Phi-4 multimodal load failure is unrelated to a PEFT dependency-version bump issue." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 model discrepancy and ZeRO-3 gradient inflation are different bugs with different triggers and subsystems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 59, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33357", + "issue:33666", + "issue:43504", + "issue:43572", + "issue:43606", + "issue:43646", + "issue:43688", + "issue:43746", + "issue:43824", + "issue:43994", + "issue:44291", + "issue:44423", + "issue:44466", + "issue:44493", + "issue:44521", + "issue:44589", + "issue:44734", + "issue:45290" + ], + "soft_pairs": [ + "issue:43824|issue:44589", + "issue:44423|issue:44734", + "issue:43688|issue:43746", + "issue:43572|issue:44291", + "issue:43646|issue:44466", + "issue:44521|issue:45290", + "issue:43994|issue:44493", + "issue:43504|issue:43606", + "issue:33357|issue:33666" + ], + "prepared_review_unit_hash": "8d0473d85cff5b240d60235378dc81e09965b98b814549cd952ceff4cfc9a72f", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13317, + "estimated_input_tokens": 3330, + "estimated_eval_tokens": 6916 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a duplicate cluster; they cover unrelated bugs, model families, and code paths. All soft similarity pairs should be rejected.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue: the issues span distinct failures (model loading, serving, serialization, training, chat templating, and config regressions) rather than one underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a clear representative duplicate target because there is no coherent duplicate set to canonicalize.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "Different failures: Qwen2.5-VL import/name availability vs Float8 storage object lookup. No shared code-path bug." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Both hit serving, but one is a multimodal `str.to` crash and the other is KV-cache continuation indexing. Different concrete bugs." + }, + { + "left": "issue:43688", + "right": "issue:43746", + "accept": false, + "reason": "Aux-loss normalization in MoE models vs GraniteSpeech PEFT/local checkpoint loading. Unrelated functionality." + }, + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "StableLmConfig missing field after v5 update vs `init_empty_weights` unexpected argument. Different initialization regressions." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Custom model init breaks after v5 vs device-dependent `lm_head.weight` serialization. Not the same underlying code path." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both involve `apply_chat_template`, but one is all-zero assistant masks for multimodal inputs and the other crashes on tool-call messages without content. Different bugs." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "Siglip2 AutoModel/pipeline wrong results vs widespread unexpected `position_id` keys. Separate issues." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "BEiT preset load failure due to legacy field vs bark-small CPU-offload device mismatch. No common fixable bug." + }, + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "MacOS bus error with pretrained CLIP model vs Qwen2-VL multi-GPU training. Completely different scenarios." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 60, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:15354", + "issue:38175", + "issue:39692", + "issue:43257", + "issue:43278", + "issue:43295", + "issue:43504", + "issue:43540", + "issue:43575", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43927", + "issue:44112", + "issue:44361", + "issue:44792", + "issue:45200", + "issue:45405" + ], + "soft_pairs": [ + "issue:39692|issue:43295", + "issue:43504|issue:43653", + "issue:43927|issue:44361", + "issue:43575|issue:45405", + "issue:43278|issue:43701", + "issue:43257|issue:45405", + "issue:43606|issue:44112", + "issue:44792|issue:45405", + "issue:15354|issue:38175", + "issue:43540|issue:45200" + ], + "prepared_review_unit_hash": "d8e574b85a8bfd0381af75907bfda2764460906aae1338dd50f01a308fdf1edb", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13465, + "estimated_input_tokens": 3367, + "estimated_eval_tokens": 6990 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are a collection of unrelated model-, tokenizer-, config-, and CI-specific issues; none of the proposed soft pairs appear to be the same underlying bug, so all soft edges are rejected.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue: the reports cover different subsystems and failure modes rather than one duplicated problem.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for deduping; there is no shared underlying defect strong enough to make one issue representative of the rest.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "SigLIP2 docs/example errors with model/processor mismatch and quantization vs a v4.57.5 regression in processor.tokenizer/image handling; different bug classes." + }, + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Beit preset load failure due to a legacy field vs BigBirdTokenizer mask token special-token registration/empty decode; unrelated components and symptoms." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "DiaConfig losing custom token IDs on save/load is a config serialization bug, while MLukeTokenizer failing on tasks is an AttributeError in task handling." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "Qwen2 TP loading OOM is a model-loading/memory issue; MIN_PEFT_VERSION being bumped to an unreleased version is a dependency/versioning issue." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "Embedding dtype changing BF16->FP32 during evaluate is unrelated to resume_from_checkpoint key mismatch; different code paths and failure modes." + }, + { + "left": "issue:43257", + "right": "issue:45405", + "accept": false, + "reason": "Qwen3 MOE weights not converted with accelerate+deepspeed is a loading/conversion bug, not the PEFT version bump/release issue." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Bark-small CPU offload device mismatch affects model execution, while the GraniteSpeech report is a stale CI test override; not the same underlying defect." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "janus image-generation test failure is a model-specific runtime/test issue, whereas 45405 is about an unreleased PEFT dependency version." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "torch.jit.script GeneratorExp export limitation for ViT is unrelated to zero probabilities in SigLIP2; different models and failure mechanisms." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Qwen3OmniMoe video-processing ValueError is a multimodal input parsing bug, while Gemma 4 mm_token_type_ids defaulting is a training/fine-tuning configuration issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 61, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:37428", + "issue:39401", + "issue:43232", + "issue:43334", + "issue:43525", + "issue:43582", + "issue:43844", + "issue:43957", + "issue:44315", + "issue:44554", + "issue:44568", + "issue:44849", + "issue:44964", + "issue:44977", + "issue:45072", + "issue:45198", + "issue:45356", + "issue:45446" + ], + "soft_pairs": [ + "issue:44554|issue:45446", + "issue:43232|issue:44849", + "issue:43582|issue:44977", + "issue:39401|issue:44568", + "issue:45072|issue:45198", + "issue:37428|issue:43957", + "issue:43844|issue:44964", + "issue:44315|issue:44849", + "issue:43334|issue:43525", + "issue:44554|issue:45356" + ], + "prepared_review_unit_hash": "78facb9e88dcb7b3727d339beff85d7412c069cca5148dc2d5ab875cd50ce5bf", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13573, + "estimated_input_tokens": 3394, + "estimated_eval_tokens": 7044 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous. Most soft pairs are only superficially similar and do not look like the same underlying bug; the only near-match is the pair of missing `pad_token_id` config issues, but even that is model-specific and not clearly one fix.", + "confidence": 0.91, + "canonical_issue_reason": "No single issue cleanly subsumes the cluster; the items span unrelated tokenizer, loading, training, allocator, and attention bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong global representative issue exists. If forced, the closest thematic match is one of the `pad_token_id` config errors, but the cluster is too mixed for a meaningful canonical issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44554", + "right": "issue:45446", + "accept": false, + "reason": "Different bugs: an MPS attention correctness issue vs a flex_attention PyTorch version-check/import problem." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "Different code paths and symptoms: generation kwargs update after `sync_gpus` vs Qwen3.5 `output_hidden_states` behavior." + }, + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "Apple Silicon allocator warmup `TypeError` and Qwen3.5 flash-attention generation are unrelated failures." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer-related, but one is wrong `offset_mapping` and the other is missing BOS/EOS insertion for a different tokenizer." + }, + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "Different subsystems and failures: bfloat16 dtype mismatch in inference vs save/tokenization failure in Wav2Vec2." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "Both concern model loading, but one is a missing flash-attention symbol and the other is a `meta`-device loading regression." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "Training instability under ZeRO-3 and inability to load Phi-4-multimodal-instruct are different issues." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Different problems: Liger Kernel not applied with `model_init` vs Qwen3.5 `output_hidden_states` bug." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": false, + "reason": "Same broad symptom (`pad_token_id` missing), but on different model configs and not clearly the same concrete bug or fix." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "An MPS attention correctness bug and a Kimi-K2.5 tokenizer regression are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 62, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:37428", + "issue:40990", + "issue:43295", + "issue:43388", + "issue:43493", + "issue:43526", + "issue:43701", + "issue:43824", + "issue:43873", + "issue:44464", + "issue:44466", + "issue:44610", + "issue:44625", + "issue:44898", + "issue:45127", + "issue:45200", + "issue:45325", + "issue:45412" + ], + "soft_pairs": [ + "issue:44898|issue:45412", + "issue:43295|issue:44610", + "issue:45200|issue:45325", + "issue:37428|issue:43824", + "issue:44466|issue:45127", + "issue:43493|issue:43701", + "issue:43873|issue:44464", + "issue:40990|issue:44625", + "issue:43388|issue:43526" + ], + "prepared_review_unit_hash": "3a29b6c169831f85f70d7e512df66e971d01fcb7c1825c95d3e428707759f627", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13391, + "estimated_input_tokens": 3348, + "estimated_eval_tokens": 6952 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the items cover unrelated bugs across imports, vision processors, training metrics, generation, quantization, and model-specific regressions. None of the soft pairs look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "No true canonical issue: the cluster is heterogeneous. If one anchor is needed, issue 43824 is the strongest because it is a concrete, high-engagement regression report with a clear import failure, but it does not represent the rest of the set.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43824 is the best single representative only in the sense of being the most concrete and widely discussed bug report in the set. It is not a duplicate target for the other issues, but it is the strongest anchor if one issue must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Different models and failure modes: Perceiver image resolution/interpolation bug vs RT-DETR memory not being released on deletion." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Both involve model/processor regressions, but the concrete bugs differ: processor.tokenizer/image-input handling vs OmDet-Turbo preprocessing size mismatch." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Unrelated code paths: Gemma 4 multimodal token-type defaults vs Qwen2.5-VL rope-index scaling for still images." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different symbols and different model areas; not the same underlying missing-export bug." + }, + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "Generation inconsistency with compiled forward is unrelated to LoRA merge/extended-vocab collapse and tied-weight serialization behavior." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "Different bugs: SigLIP2 implementation discrepancy vs checkpoint resume key mismatch." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization/offloading behavior is a different problem from chunked generation inconsistencies under compiled forward." + }, + { + "left": "issue:40990", + "right": "issue:44625", + "accept": false, + "reason": "Perplexity quality regression on a model is unrelated to num_labels propagation between configs." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Both touch labels, but one is gather_for_metrics dropping tuple elements in the last batch while the other is BeitImageProcessorFast reduce_labels returning a single label; different layers and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 63, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:42915", + "issue:43295", + "issue:43572", + "issue:43575", + "issue:43824", + "issue:43844", + "issue:43873", + "issue:43975", + "issue:44442", + "issue:44464", + "issue:44514", + "issue:44521", + "issue:44589", + "issue:44936", + "issue:44987", + "issue:45198", + "issue:45412" + ], + "soft_pairs": [ + "issue:43572|issue:44589", + "issue:43572|issue:44987", + "issue:44514|issue:44521", + "issue:43844|issue:44936", + "issue:43824|issue:43975", + "issue:43575|issue:44464", + "issue:44442|issue:45198", + "issue:38175|issue:45412", + "issue:42915|issue:43824", + "issue:43295|issue:43873" + ], + "prepared_review_unit_hash": "36c774ed1723220a40faed3e4c0c8d9bd05be0604d6aa717677374c21fd1b14c", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13306, + "estimated_input_tokens": 3327, + "estimated_eval_tokens": 6910 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All proposed soft pairs look like false positives: the issues span unrelated models, tokenizer/processor regressions, training/eval bugs, quantization/offload failures, and memory-management problems. There is no coherent duplicate cluster here.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43572", + "right": "issue:44589", + "accept": false, + "reason": "Different failure classes: missing config field vs missing Float8 storage type; no shared code-path bug." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "Unrelated symptoms and models; one is a StableLm config regression, the other a model-load failure on a specific repo." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both involve Qwen2.5 VL chat/template code, but one is a batched-input crash and the other is incorrect zero assistant masks; not the same concrete bug." + }, + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "Training instability with ZeRO-3 is unrelated to evaluate() failing after train(); different lifecycle and failure mode." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Different models and bugs: import/export of Qwen2.5 VL class vs DeepSeek detokenization regression." + }, + { + "left": "issue:43575", + "right": "issue:44464", + "accept": false, + "reason": "TP OOM on loading a Qwen model is unrelated to chunked generation inconsistencies under compiled forward." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "Both are tokenizer-related but for different speech models and different problems (load failure vs save_pretrained/tokenization failure)." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "Siglip zero-probabilities and RT-DETR memory not released are unrelated model/runtime issues." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "Different subsystems and root causes: FineGrainedFP8Config failure vs missing Qwen2.5 VL import." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "A processor/tokenizer regression is not the same as quantization offloading behavior; no shared concrete bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 64, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:43278", + "issue:43295", + "issue:43450", + "issue:43493", + "issue:43653", + "issue:43827", + "issue:43856", + "issue:43901", + "issue:43994", + "issue:44112", + "issue:44360", + "issue:44561", + "issue:44568", + "issue:44849", + "issue:44991", + "issue:45003" + ], + "soft_pairs": [ + "issue:43295|issue:43450", + "issue:43278|issue:43856", + "issue:44849|issue:45003", + "issue:44360|issue:44561", + "issue:43295|issue:43994", + "issue:43827|issue:43901", + "issue:38175|issue:43493", + "issue:43653|issue:44112", + "issue:44568|issue:44991" + ], + "prepared_review_unit_hash": "1a4a6bbb4b72a18858430099ffabdc734cfa055a8a1fb625ba4bf541a853c049", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12540, + "estimated_input_tokens": 3135, + "estimated_eval_tokens": 6526 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One likely duplicate pair stands out in the SigLIP2 area: the zero-probabilities report and the HF-vs-JAX discrepancy report. The remaining soft pairs look related only at a broad subsystem level or are plainly different bugs.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43493 is the better canonical issue for the SigLIP2 problem because it states the broader root discrepancy against the JAX implementation, while issue:38175 reads like a downstream symptom (unexpected zero probabilities).", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43493 is the strongest overall issue here: it is specific, root-cause oriented, and likely subsumes the narrower user symptom reported in issue:38175.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Different bugs: one is a processor/tokenizer regression, the other is a batched video processor shape issue." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Unrelated symptoms and code paths: dtype mismatch in evaluation vs MoE training memory usage." + }, + { + "left": "issue:44849", + "right": "issue:45003", + "accept": false, + "reason": "Distinct problems: Qwen3.5 hidden-state behavior vs unsafe sys.modules access in modeling_utils." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "No concrete overlap: a DSA indexer ReLU discussion is unrelated to v5 trust_remote_code breakage." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Both mention SigLIP2/processor usage, but the reports describe different failure modes and are not clearly the same underlying bug." + }, + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "Both are docs regressions, but they cover different API removals and are not the same change request." + }, + { + "left": "issue:38175", + "right": "issue:43493", + "accept": true, + "reason": "Same underlying SigLIP2 model discrepancy: zero probabilities is a likely symptom of the HF-vs-JAX implementation mismatch." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "Completely different areas: BigBird tokenizer special-token registration vs a GraniteSpeech CI device test." + }, + { + "left": "issue:44568", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-related v5 regressions, but they concern different models and different breakages." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 65, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:37428", + "issue:43425", + "issue:43450", + "issue:43493", + "issue:43575", + "issue:43819", + "issue:44112", + "issue:44351", + "issue:44462", + "issue:44561", + "issue:44610", + "issue:44779", + "issue:44857", + "issue:45083", + "issue:45127", + "issue:45245", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:45127|issue:45245", + "issue:43450|issue:43493", + "issue:37428|issue:43425", + "issue:44462|issue:44561", + "issue:43819|issue:45083", + "issue:45290|issue:45381", + "issue:44561|issue:44779", + "issue:43450|issue:43575", + "issue:44610|issue:44857", + "issue:37428|issue:44351", + "issue:43819|issue:44112" + ], + "prepared_review_unit_hash": "95b8bcdf55636a4f8150dd37ac6eb6b35e45a2b2731af3e1831a87070c1f3e7a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13638, + "estimated_input_tokens": 3410, + "estimated_eval_tokens": 7076 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous; none of the suggested soft pairs look like true duplicates or the same fix. The strongest standalone issue is the v5 trust_remote_code regression, but there is no evidence for merging any pair.", + "confidence": 0.83, + "canonical_issue_reason": "issue:44561 is the best representative issue: it describes a broad, user-facing regression tied to a specific version change, has inbound references, and is more central than the narrower bug reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44561 has the clearest impact and widest relevance among the listed issues; it is a concrete compatibility break with a clear upgrade-related root cause.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "Different failures: LoRA merge collapse with extended vocab vs a category-cardinality RuntimeError. Same general model-ops area, but not the same bug." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "Both involve vision processing, but one is batched video processor shape handling and the other is a SigLIP2 HF-vs-JAX discrepancy. Distinct code paths and symptoms." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Both are compatibility/import problems, but one is a missing flash-attention symbol import and the other is Torch 2.10 incompatibility. Not the same underlying issue." + }, + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "Both are version-related regressions, but one is AutoTokenizer ignoring tokenizer.json and the other is trust_remote_code breaking after is_torch_fx_available removal. Different mechanisms." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "Different models and different bugs: DAC.from_latents mismatch vs qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Chat-template crash on tool-call assistant messages is unrelated to qwen2.5-vl video vision_position_ids being wrong." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 regressions, but they affect different tokenizer/model paths and have different root causes." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Video processor shape mismatch is unrelated to tensor-parallel OOM when loading Qwen2-57B-A14B-Instruct." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "OmDet-Turbo processor output size mismatch and LwDetrImageLoss float16 CUDA crash are different bugs in different components." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols and likely different compatibility breakages." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "DAC latent conversion mismatch and a stale CI test failure in GraniteSpeech are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 66, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:39692", + "issue:43262", + "issue:43278", + "issue:43450", + "issue:43526", + "issue:43606", + "issue:43611", + "issue:43819", + "issue:43825", + "issue:43931", + "issue:44448", + "issue:44493", + "issue:44743", + "issue:45083", + "issue:45310", + "issue:45478" + ], + "soft_pairs": [ + "issue:44448|issue:44743", + "issue:43262|issue:43450", + "issue:43606|issue:45083", + "issue:45310|issue:45478", + "issue:43819|issue:44493", + "issue:43278|issue:43825", + "issue:29942|issue:32090", + "issue:43611|issue:43931", + "issue:39692|issue:43526" + ], + "prepared_review_unit_hash": "6a70ba71d7a14731f59fd45ebc8e58eb2835ea79d1b553ecb6c189a10d0583d8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13182, + "estimated_input_tokens": 3296, + "estimated_eval_tokens": 6848 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated issue reports. The only clear duplicate pair is the Qwen3.5 MoE `from_pretrained` error: issues 45310 and 45478 describe the same Transformers>=5.4.0 failure. No PRs are present.", + "confidence": 0.95, + "canonical_issue_reason": "Issue 45310 is the better canonical issue because it is the earlier report of the same Qwen3.5 MoE `from_pretrained` failure and already has inbound reference activity, while 45478 is an open duplicate with the same title.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45310 is the strongest cluster representative: it matches 45478 exactly on the underlying bug and is the more established ticket. The rest of the items are separate bug reports in different areas.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Different model families and failures: Pegasus v4/v5 output drift vs Qwen3 recurrent-state reset in cache handling." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Different subsystems and symptoms: audio chat-template sample-rate defaulting vs batched video processor shape errors." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "Unrelated bugs: bark-small CPU offload device mismatch vs qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Same underlying bug and same error class: Transformers>=5.4.0 `from_pretrained` failure for Qwen3.5 MoE." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "Different code paths: DAC latent reconstruction mismatch vs broad position-id warning/key issue across many models." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "Different problems: dtype regression in evaluation vs an incorrect pipeline translation-support error message." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Unrelated failures: Flash Attention 2 test breakage vs Trainer `_gpu_broadcast_one` NoneType error." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "Different model-loading failures: `base_model_prefix` regression vs a specific Qwen3-VL weight-shape mismatch." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "Different topics: SigLIP2 documentation example issues vs BEiT fast image processor label reduction bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 67, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:43335", + "issue:43404", + "issue:43450", + "issue:43526", + "issue:43550", + "issue:43653", + "issue:44448", + "issue:44493", + "issue:44779", + "issue:44855", + "issue:44977", + "issue:45083", + "issue:45200", + "issue:45229", + "issue:45381" + ], + "soft_pairs": [ + "issue:45200|issue:45381", + "issue:43335|issue:43550", + "issue:44855|issue:45083", + "issue:43404|issue:45200", + "issue:39692|issue:44493", + "issue:43450|issue:43526", + "issue:38175|issue:44493", + "issue:44977|issue:45229", + "issue:44448|issue:44779", + "issue:43335|issue:43653" + ], + "prepared_review_unit_hash": "a3a07f6ce4ca7d59063892585a16a1d060c45d819319d3529d7be7c5bf118573", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12961, + "estimated_input_tokens": 3241, + "estimated_eval_tokens": 6738 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose collection of unrelated closed issues across different models, tokenizers, processors, configs, and version regressions. I do not see a single duplicate root cause or a viable canonical artifact for the whole cluster.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue fits: the items span distinct models and subsystems with different failure modes, so none can represent the cluster as a duplicate target.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue: there is no dominant or umbrella bug here, just several unrelated reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Different models and symptoms: Gemma 4 text-only fine-tuning needing mm_token_type_ids defaults is unrelated to Qwen2.5-VL video vision_position_ids being wrong." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated bugs: SwitchTransformers config incorrectly creating a sparse layer vs Bamba-9B-v2 torch.compile + SDPA failure." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "Different code paths and failure types: Python 3.13 JIT parsing/IndentationError in DebertaV2 vs qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:43404", + "right": "issue:45200", + "accept": false, + "reason": "Different model families and issues: tied lm_head weights in Mistral3 vs missing/default mm_token_type_ids in Gemma 4." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "Doc-example/model-processor mismatch and quantization failure in SigLIP2 is not the same as a broad unexpected position-id key issue." + }, + { + "left": "issue:43450", + "right": "issue:43526", + "accept": false, + "reason": "Both are processor bugs, but they affect different processors and different outputs: batched video shape vs BeitImageProcessorFast label reduction." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "Different models and symptoms: SigLIP2 zero probabilities vs unexpected position-id key handling across models." + }, + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Different runtime problems: Qwen3.5 flash-attention generation bug vs Gemma4 multi-GPU CUDA OOM." + }, + { + "left": "issue:44448", + "right": "issue:44779", + "accept": false, + "reason": "Version-related regressions in different subsystems, but not the same bug: Pegasus output drift vs Deepseek tokenizer behavior change." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "No shared root cause: SwitchTransformers sparse-layer construction is unrelated to BigBirdTokenizer special-token registration." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 68, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:32090", + "issue:40444", + "issue:43335", + "issue:43504", + "issue:43526", + "issue:43550", + "issue:43720", + "issue:43723", + "issue:44112", + "issue:44448", + "issue:44554", + "issue:44568", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45491" + ], + "soft_pairs": [ + "issue:29127|issue:32090", + "issue:43335|issue:45083", + "issue:43335|issue:43504", + "issue:43550|issue:44112", + "issue:44568|issue:45491", + "issue:44448|issue:44977", + "issue:40444|issue:43526", + "issue:43335|issue:45072", + "issue:43723|issue:44554", + "issue:43720|issue:44112" + ], + "prepared_review_unit_hash": "99f175079323c5e2a8ae63d577345f1ee983c33a0a4b4067c6d7f89860c8e6b5", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 13088, + "estimated_input_tokens": 3272, + "estimated_eval_tokens": 6800 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not duplicates of one another; they span unrelated bugs across different models, tokenizers, training paths, and CI failures. All soft edges should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue: the cluster is heterogeneous and the candidate pairs do not describe the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good global representative because there is no shared root cause across the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "Different subsystems and failure modes: LayoutLMv3 box validation messaging vs Trainer/_gpu_broadcast_one TypeError." + }, + { + "left": "issue:43335", + "right": "issue:45083", + "accept": false, + "reason": "Both are model-internal bugs, but they concern different architectures and code paths (SwitchTransformers config vs qwen3_omni_moe length helper)." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "Unrelated BEIT pretrained-loading/legacy-field issue versus SwitchTransformers sparse-layer construction bug." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "Different models and bugs: Bamba torch.compile/SDPA failure vs GraniteSpeech CI test flakiness." + }, + { + "left": "issue:44568", + "right": "issue:45491", + "accept": false, + "reason": "Tokenizer special-token regression is unrelated to Gemma3 NaN embeddings from sliding-window attention on GPU." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and failure modes: Pegasus v4/v5 output drift vs Qwen3.5 flash-attention generation bug." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Both involve vision preprocessing, but one is Qwen2.5-VL multi-image dataset handling and the other is BEiT Fast processor label reduction." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "Different bugs in different models: SwitchTransformers sparse-layer config versus dtype mismatch in TimmWrapperModel/CI inference." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "Tokenizer loading in v5 is unrelated to an MPS attention correctness issue with mismatched value/query head dimensions." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "Different causes and areas: BitNet packed-weight loading under accelerate vs a stale GraniteSpeech device-override CI test." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 69, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34689", + "issue:35141", + "issue:37428", + "issue:43066", + "issue:43278", + "issue:43329", + "issue:43335", + "issue:43606", + "issue:43873", + "issue:44112", + "issue:44462", + "issue:44493", + "issue:44857", + "issue:44987", + "issue:45072", + "issue:45229", + "issue:45372", + "issue:45406" + ], + "soft_pairs": [ + "issue:45372|issue:45406", + "issue:43066|issue:44462", + "issue:43335|issue:44112", + "issue:37428|issue:44987", + "issue:43606|issue:44857", + "issue:43873|issue:45229", + "issue:44857|issue:45072", + "issue:34689|issue:35141", + "issue:43873|issue:44493", + "issue:43278|issue:43329" + ], + "prepared_review_unit_hash": "aa1148ea0420ca8ff86d2da450a24b61853d297944d213bb6dbfa181c2c097c2", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13564, + "estimated_input_tokens": 3391, + "estimated_eval_tokens": 7038 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated bug reports across different subsystems. None of the soft pairs look like the same underlying defect, so all soft edges are rejected. If forced to choose an anchor, issue #43873 is the best representative because it is the broadest active report with the most inbound references, but there is no true canonical duplicate here.", + "confidence": 0.84, + "canonical_issue_reason": "Issue #43873 is the most central/active item in the set (open, 4 inbound references) and has the broadest framing around offloading/quantization, so it is the best anchor if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43873 is the strongest general representative for the cluster, but only as an anchor; the cluster does not contain a clear duplicate set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Both involve Gemma 4 processor loading, but one is an import error from mistral_common while the other is a missing _tokenizer attribute in serve; different failure modes and code paths." + }, + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Both mention tokenizer behavior, but one is about decoder type in Transformers v5 and the other about AutoTokenizer ignoring tokenizer.json; not the same bug." + }, + { + "left": "issue:43335", + "right": "issue:44112", + "accept": false, + "reason": "Sparse-layer construction in SwitchTransformers config is unrelated to a stale device-override CI failure in GraniteSpeech." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "The first is an internal import error for flash attention utilities; the second is a model-loading failure for a specific repository. Same broad area, different issue." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "One is a Bark CPU offload device mismatch, the other is an AMP/CUDA crash in LwDetrImageLoss; different models and different root causes." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Offloading/quantization problems are not the same as multi-GPU CUDA OOM during Gemma4 inference." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "Both involve dtype issues, but one is a loss crash under AMP and the other is a bfloat16 inference mismatch in different models/components." + }, + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Model loading for Llama 3.2 Vision and embedding reinitialization after resize are distinct bugs with different triggers." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Offloading with quantization and unexpected position-id keys are unrelated symptoms in different code paths." + }, + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Embedding dtype drift in evaluate is unrelated to an undefined-variable bug in multimodal video token counting." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 70, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:33357", + "issue:35141", + "issue:41720", + "issue:43065", + "issue:43550", + "issue:43701", + "issue:43976", + "issue:43994", + "issue:44448", + "issue:44466", + "issue:45083", + "issue:45229", + "issue:45372", + "issue:45412", + "issue:45440", + "issue:45459" + ], + "soft_pairs": [ + "issue:43701|issue:45083", + "issue:41720|issue:45229", + "issue:45372|issue:45459", + "issue:29942|issue:35141", + "issue:43550|issue:45083", + "issue:43976|issue:45440", + "issue:32090|issue:33357", + "issue:44448|issue:44466", + "issue:43065|issue:45412", + "issue:43065|issue:43994" + ], + "prepared_review_unit_hash": "930a4b679a1d3f73102763ce3cd8ea47a956c05a70835cf0a929b9f7ee1a671d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13459, + "estimated_input_tokens": 3365, + "estimated_eval_tokens": 6986 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is not a duplicate set; the items cover unrelated bugs across training, model inference, serialization, tokenizer/import handling, and platform-specific failures. All soft-similarity pairs should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43701", + "right": "issue:45083", + "accept": false, + "reason": "Different failure modes and components: checkpoint key mismatch vs a Qwen3 Omni MoE helper-length bug." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Both are GPU inference problems, but one is a cudaErrorAssert in Qwen3 auto device mapping and the other is an OOM on Gemma4 multi-GPU; not the same bug." + }, + { + "left": "issue:45372", + "right": "issue:45459", + "accept": false, + "reason": "Different root causes: Gemma 4 processor import dependency breakage vs tokenizer error masking when protobuf is missing." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "Flash Attention test failures and embedding reinitialization on resize are unrelated code paths." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "torch.compile/SDPA on Bamba-9B-v2 is unrelated to the qwen3_omni_moe feature-length helper issue." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Python version compatibility and DeepseekV3MoE divergence are distinct issues with no shared concrete bug." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Trainer NoneType broadcast error and MacOS bus error for a CLIP model are different failures in different environments." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Both concern v5 behavior changes, but one is output parity for Pegasus while the other is serialization of lm_head.weight; not the same underlying defect." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "A dummy Conv2d in Sam3PixelDecoder is unrelated to RT-DETR memory not being released." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Dummy layer implementation in Sam3PixelDecoder is unrelated to SIGLIP2 producing nonsensical results." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 71, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:25251", + "issue:32090", + "issue:42915", + "issue:42994", + "issue:43257", + "issue:43278", + "issue:43381", + "issue:43421", + "issue:43653", + "issue:43994", + "issue:44186", + "issue:44265", + "issue:44361", + "issue:44743", + "issue:45198", + "issue:45412", + "issue:45491" + ], + "soft_pairs": [ + "issue:44186|issue:45198", + "issue:43653|issue:44743", + "issue:43994|issue:45412", + "issue:25251|issue:32090", + "issue:44361|issue:45198", + "issue:44265|issue:45198", + "issue:43278|issue:43381", + "issue:42915|issue:42994", + "issue:43257|issue:43278", + "issue:43421|issue:45491" + ], + "prepared_review_unit_hash": "63cd821b68f1bee09e8bcdb47b1aae118ffbd62d21056d9902a65dbd3d945434", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12882, + "estimated_input_tokens": 3221, + "estimated_eval_tokens": 6698 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes unrelated tokenizer, training, quantization, export, evaluation, and memory-management bugs. None of the soft pairs look like the same underlying issue/change, so I would not merge them as duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "No strong canonical issue stands out because the cluster is not a true duplicate set; the reports cover different models, different code paths, and different failure modes.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must be kept as the most representative standalone report, issue 45491 is the most recent and technically specific, but it does not meaningfully cover the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "Both are tokenizer-related failures, but they affect different models and different behaviors (NER/padding crash vs save_pretrained/tokenization failure)." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "These describe unrelated problems: BigBirdTokenizer special-token registration vs Qwen3 recurrent-state reset in cached generation." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "Different subsystems and symptoms: SigLIP2 wrong inference outputs vs RT-DETR memory not released on deletion." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Pipeline top_k nesting bug and Trainer GPU broadcast NoneType error are unrelated code paths and failure modes." + }, + { + "left": "issue:44361", + "right": "issue:45198", + "accept": false, + "reason": "Both involve tokenizers, but the concrete bugs differ: MLukeTokenizer task AttributeError vs Wav2Vec2 save/tokenization failure." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "torch.export with torch_compilable_check is a model export problem, not the same as Wav2Vec2 tokenization/save_pretrained failure." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "Eval-mode behavior is the only overlap; one is an embedding dtype regression, the other is gradient checkpointing being unsupported in eval mode." + }, + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Both touch quantization, but one is FineGrainedFP8Config failure on Qwen3Moe and the other is quantized model saving failure." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "These are different Qwen3MOE loading/conversion and embedding dtype issues; no shared concrete bug path." + }, + { + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "TokenizersBackend runtime special-token updates and Gemma3 NaN embeddings with mixed-length sliding windows are unrelated failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 72, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:30064", + "issue:43504", + "issue:43756", + "issue:43824", + "issue:44206", + "issue:44279", + "issue:44479", + "issue:44610", + "issue:44857", + "issue:44871", + "issue:44977", + "issue:45072", + "issue:45103", + "issue:45229", + "issue:45372", + "issue:45412", + "issue:45440" + ], + "soft_pairs": [ + "issue:44871|issue:44977", + "issue:44206|issue:44479", + "issue:43756|issue:45440", + "issue:44610|issue:45412", + "issue:45103|issue:45372", + "issue:29942|issue:30064", + "issue:43504|issue:44857", + "issue:43824|issue:44279", + "issue:45072|issue:45229" + ], + "prepared_review_unit_hash": "f096b0a1e6f185d7d94715c412b72000a6efadcfe83e40407c7d8b9ad66017e0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13134, + "estimated_input_tokens": 3284, + "estimated_eval_tokens": 6824 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly a loose similarity cluster rather than true duplicates. All soft edges should be rejected: they pair different models, different failure modes, or different subsystems.", + "confidence": 0.88, + "canonical_issue_reason": "issue:44279 is the most umbrella-like of the set (\u201cDependency issue with transformers\u201d), so it is the closest thing to a cluster anchor, though it is still too generic to be a true duplicate target for the more specific bugs here.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44279 is the broadest and most reusable issue title in the group, making it the best representative if one must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and different bugs: Gemma-3 EOS-token config mismatch vs Qwen3.5 flash-attention generation failure." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Both are regressions, but they affect different models and code paths: a feature extractor arg crash vs video-input regression in Qwen VLMs." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Both mention model-implementation divergence, but they concern different architectures and distinct defects, so they are not the same underlying bug." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor input-size mismatch and model memory-release leak are unrelated failure modes." + }, + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "Both are loading/import problems, but one is a docstring-annotation crash and the other is a missing dependency symbol during Gemma 4 processor loading." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention test failures and void segmentation-map processing are unrelated issues." + }, + { + "left": "issue:43504", + "right": "issue:44857", + "accept": false, + "reason": "A pretrained preset legacy-field loading bug is not the same as an AMP/CUDA loss crash." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "A specific missing import for Qwen2_5_VL is not the same as a vague general dependency issue." + }, + { + "left": "issue:45072", + "right": "issue:45229", + "accept": false, + "reason": "Bfloat16 dtype mismatches and multi-GPU CUDA OOM are different concrete problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 73, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:39692", + "issue:43262", + "issue:43334", + "issue:43526", + "issue:43531", + "issue:43653", + "issue:44186", + "issue:44206", + "issue:44442", + "issue:44464", + "issue:44977", + "issue:45245", + "issue:45491" + ], + "soft_pairs": [ + "issue:33357|issue:39692", + "issue:29127|issue:34567", + "issue:32090|issue:33290", + "issue:43334|issue:43531", + "issue:43653|issue:44442", + "issue:44977|issue:45245", + "issue:43526|issue:44186", + "issue:44464|issue:45491", + "issue:43262|issue:44206" + ], + "prepared_review_unit_hash": "9954cd716c46eee52c4c5dfd20d1941c64897272011e068323d43d4c6d9de9fe", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13241, + "estimated_input_tokens": 3311, + "estimated_eval_tokens": 6878 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster appears to be a loose thematic grouping of unrelated Hugging Face issues; none of the soft pairs look like the same underlying bug or change, so no duplicates should be merged.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "Both are multimodal model issues, but one is a MacOS bus error for CLIP and the other is a SigLIP2 docs/model-processor mismatch with quantization errors; different failure modes and code paths." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and TrainerState token-count tracking are unrelated areas with different symptoms and fixes." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "Trainer NoneType broadcast failure and DeepSpeed Adafactor OOM are distinct training/runtime problems; not the same bug." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Qwen3-VL config loading pad_token_id issue and Qwen3-MoE sliding_window behavior are separate model-specific bugs." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "BigBirdTokenizer special-token registration/empty decode and AutoTokenizer failing to load FastSpeech2ConformerTokenizer are different tokenizer-loading defects." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention generation bug for Qwen3.5 and a category-cardinality runtime error are unrelated." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BEiT image processor reduce_labels behavior and LayoutLMv2 NER/batching crashes involve different preprocessing components and failures." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency and Gemma3 mixed-length sliding-window NaNs are both generation-related but arise from different code paths and symptoms." + }, + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sampling-rate default mismatch and LasrFeatureExtractor unsupported center arg are separate audio-processing bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 74, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:33357", + "issue:35141", + "issue:36010", + "issue:42915", + "issue:43232", + "issue:43381", + "issue:43388", + "issue:43526", + "issue:43701", + "issue:43761", + "issue:43931", + "issue:44079", + "issue:44442", + "issue:44857", + "issue:45072" + ], + "soft_pairs": [ + "issue:43381|issue:43701", + "issue:43761|issue:44079", + "issue:29942|issue:36010", + "issue:29127|issue:33357", + "issue:43232|issue:43388", + "issue:29127|issue:35141", + "issue:43526|issue:44857", + "issue:44442|issue:45072", + "issue:42915|issue:43931" + ], + "prepared_review_unit_hash": "6a02a9e666c3ac078836337cdea0cf2585a162a2f47db93c02290901581df315", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12553, + "estimated_input_tokens": 3139, + "estimated_eval_tokens": 6534 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are a bundle of unrelated closed issues that only match superficially at the text-similarity level; none of the soft pairs look like the same underlying bug or fix.", + "confidence": 0.91, + "canonical_issue_reason": "No true duplicate cluster is evident. If one issue has to serve as the anchor, issue #43761 is the most concrete, self-contained regression report with a clearly stated model behavior mismatch.", + "canonical_pr_reason": null, + "best_issue_reason": "#43761 is the strongest standalone issue report: specific symptom, clear expected/actual behavior, and a narrow code path. The others are similarly valid but not better representatives of a duplicate set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Eval-mode gradient checkpointing and resume_from_checkpoint key mismatch are different failure modes in different parts of training workflow." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "One is a CLIPVisionModel hidden-states regression; the other is ModelOutput key assignment logic. Related area, but not the same bug." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention 2 test failures and a GenerationMixin import error are unrelated subsystems and symptoms." + }, + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "Error-message clarity for LayoutLMv3 boxes vs. a MacOS bus error in CLIP are completely different issues." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwargs sync logic and gather_for_metrics label truncation are separate code paths and defects." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "LayoutLMv3 validation messaging and token-embedding reinitialization are unrelated bugs." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and LwDetrImageLoss AMP/CUDA crashing are different model/component failures." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer loading failure and bfloat16 dtype mismatches in inference are not the same underlying change or bug." + }, + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure and Qwen3-VL weight-shape mismatch are distinct model-loading/runtime issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 75, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43316", + "issue:43525", + "issue:43526", + "issue:43723", + "issue:43761", + "issue:43931", + "issue:43994", + "issue:44188", + "issue:44206", + "issue:44479", + "issue:44743", + "issue:44811", + "issue:45003", + "issue:45216", + "issue:45290", + "issue:45440", + "issue:45479" + ], + "soft_pairs": [ + "issue:43723|issue:45479", + "issue:43526|issue:43994", + "issue:45003|issue:45440", + "issue:44743|issue:44811", + "issue:44479|issue:45290", + "issue:43316|issue:43525", + "issue:43761|issue:44188", + "issue:43994|issue:44206", + "issue:43931|issue:45216" + ], + "prepared_review_unit_hash": "ba868f1870e437298f38de5ded15d6fd76bc44ec7c0f174814e5299b1d456d70", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12706, + "estimated_input_tokens": 3177, + "estimated_eval_tokens": 6610 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft-similarity pairs are all false positives, covering unrelated regressions in configs, tokenizers, vision/audio processors, model loading/saving, and attention behavior. No duplicate/canonical issue emerges.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the items do not describe the same underlying bug, and the strongest-looking pairs still differ by model/component and failure mode.", + "canonical_pr_reason": "No PRs are present in this cluster.", + "best_issue_reason": "No single issue is a good representative; the cluster spans several unrelated topics, so selecting one would be arbitrary.", + "best_pr_reason": "No PRs are present in this cluster.", + "soft_edge_verdicts": [ + { + "left": "issue:43723", + "right": "issue:45479", + "accept": false, + "reason": "Tokenizer loading in v5 vs. degenerate classification loss; unrelated components and symptoms." + }, + { + "left": "issue:43526", + "right": "issue:43994", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug vs. SigLIP2 model inference issue; different code paths." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "Unsafe sys.modules access vs. DeepSeekV3MoE implementation divergence; not the same bug." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Recurrent cache-state reset bug vs. Whisper batch_decode skip_special_tokens handling; unrelated functionality." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "Video-input regression in Qwen VL models vs. chat-template crash on tool-call messages; different failure modes." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Both are config/API issues, but for different models and missing fields; not a shared underlying defect." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "CLIPVision hidden_states regression vs. attention-kernel divergence under torch.compile; distinct code paths." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 nonsensical outputs vs. LasrFeatureExtractor center-arg crash; different subsystems and symptoms." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Model weight-shape mismatch on load vs. incorrect checkpoint save_pretrained regression; loading and saving are different problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 76, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:33290", + "issue:33666", + "issue:34567", + "issue:39692", + "issue:43575", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:44077", + "issue:44387", + "issue:44442", + "issue:44743", + "issue:44898", + "issue:44964", + "issue:45440", + "issue:45491" + ], + "soft_pairs": [ + "issue:43575|issue:43701", + "issue:43646|issue:44077", + "issue:44964|issue:45440", + "issue:39692|issue:45491", + "issue:43575|issue:44387", + "issue:43673|issue:44743", + "issue:29942|issue:33666", + "issue:44442|issue:44898", + "issue:33290|issue:34567" + ], + "prepared_review_unit_hash": "a57ca131894aa5529f488f07d2d0b7fba399a89722dc101d14b1297088eb674d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12408, + "estimated_input_tokens": 3102, + "estimated_eval_tokens": 6460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated; several share broad themes like OOMs, cache/state handling, or Transformers v5 regressions, but none look like the same underlying bug/change closely enough to merge.", + "confidence": 0.93, + "canonical_issue_reason": "No pair looks like a true duplicate cluster. If one issue must serve as the representative triage anchor, issue 45440 is the most active and concretely described open bug report, with explicit inbound references and a clear model-specific divergence.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45440 is the strongest standalone issue to keep as the lead reference: it is open, has inbound references, and describes a specific reproducible correctness gap in a native model implementation.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "Both mention failure during loading/resume, but one is a tensor-parallel OOM for a large Qwen2 model and the other is a checkpoint key mismatch; different failure modes and code paths." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "Both relate to initialization changes in Transformers 5.0.0, but one is a broad custom-model init regression and the other is a specific patchtsmixer post_init policy issue; not the same bug." + }, + { + "left": "issue:44964", + "right": "issue:45440", + "accept": false, + "reason": "Different models and different problems: Phi-4 multimodal loading failure versus DeepSeekV3 implementation divergence." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "One is a documentation example error for SigLIP2; the other is a runtime NaN bug in Gemma3 sliding-window attention. No shared underlying change." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both are OOM reports, but one is caused by tensor-parallel loading of a specific model and the other by increased reserved memory under int4 quantization in Transformers 5.x; too different to be duplicates." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Both involve cache/state semantics, but one is a missing GenerationMixin cache in chunked prefill and the other is recurrent states resetting in modular_qwen3_5; different implementations and symptoms." + }, + { + "left": "issue:29942", + "right": "issue:33666", + "accept": false, + "reason": "Flash Attention 2 test failures and Qwen2-VL multi-GPU training are unrelated beyond both touching training/inference performance areas." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "Tokenizer loading failure for FastSpeech2ConformerTokenizer is unrelated to Perceiver interpolation failure on non-default image sizes." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "AdaFactor/DeepSpeed OOM and TrainerState token-count tracking are completely different issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 77, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33290", + "issue:34689", + "issue:35141", + "issue:39692", + "issue:41628", + "issue:43381", + "issue:43493", + "issue:43611", + "issue:43749", + "issue:43827", + "issue:43873", + "issue:43931", + "issue:44077", + "issue:44206", + "issue:44479", + "issue:44871", + "issue:45372" + ], + "soft_pairs": [ + "issue:41628|issue:45372", + "issue:34689|issue:43611", + "issue:43493|issue:43827", + "issue:43381|issue:43873", + "issue:44077|issue:44479", + "issue:39692|issue:44206", + "issue:33290|issue:35141", + "issue:33290|issue:34689", + "issue:43873|issue:44871", + "issue:43749|issue:43931" + ], + "prepared_review_unit_hash": "6ff20feea06782d44303a48c2c251a07c0ba81bbc85ebfd0546a8393ede63363", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12916, + "estimated_input_tokens": 3229, + "estimated_eval_tokens": 6714 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated Transformers issues: model-loading regressions, docs breakage, quantization/offloading, and a few model-specific bugs. None of the proposed soft pairs look like true duplicates of the same underlying change or bug.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43873 is the broadest and still-open item, with inbound references and a general quantization/offloading failure description, so it is the best single representative if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43873 is the most central representative of the cluster\u2019s only broad infrastructure-style problem; the others are narrower, model- or version-specific regressions and should not be collapsed into it.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "Both are import errors, but they concern different symbols, different upstream causes, and different model flows; not the same bug." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Both are model-loading regressions, but one is Llama 3.2 Vision-specific and the other is a base_model_prefix loading change in v5; too different to merge." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "SigLIP2 implementation mismatch and docs still using pipeline() after v5 removals are unrelated problem types." + }, + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Gradient checkpointing in eval mode and quantization/offloading failures affect different code paths and fixes." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "A patchtsmixer post_init restriction is unrelated to the video-input regression in Qwen multimodal models." + }, + { + "left": "issue:39692", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 docs/example errors are not the same bug as the LasrFeatureExtractor center-argument regression." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Adafactor OOM in DeepSpeed and token embedding reinitialization in post_init are different mechanisms and code paths." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "These share only a broad theme of model loading/runtime failure; the concrete bugs and affected features are unrelated." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading behavior and Gemma eos_token_id inconsistency are separate configuration issues." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and Qwen3-VL shape mismatch are different loading failures with different root causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 78, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33290", + "issue:36331", + "issue:42175", + "issue:43316", + "issue:43381", + "issue:43540", + "issue:43643", + "issue:43856", + "issue:44162", + "issue:44190", + "issue:44496", + "issue:44514", + "issue:44792", + "issue:44855", + "issue:44877", + "issue:44913", + "issue:45137" + ], + "soft_pairs": [ + "issue:44190|issue:44496", + "issue:43643|issue:44913", + "issue:43540|issue:44514", + "issue:43316|issue:44877", + "issue:42175|issue:43316", + "issue:44162|issue:44792", + "issue:43381|issue:43856", + "issue:44855|issue:45137", + "issue:33290|issue:36331" + ], + "prepared_review_unit_hash": "8d454e6be231daa2dfc949a1f9402eb4d9f3b91153afa8c7aef214af00e4f27a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12510, + "estimated_input_tokens": 3128, + "estimated_eval_tokens": 6512 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are largely unrelated single issues across different subsystems; none of the soft pairs look like true duplicates. The clearest standalone bug report is the `compute_loss()` keyword regression.", + "confidence": 0.18, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue 36331 is the most concrete and actionable: it has a precise traceback and a clear behavioral regression to fix.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Different bugs: one is a local dataset loading failure in an example script, the other is an unrecognized model/config loading error." + }, + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Both involve config loading, but the failures differ: missing fields from `trust_remote_code=True` vs. a `rotary_pct` value not persisting on reload." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Both are Qwen multimodal issues, but they hit different code paths and failure modes: video-input processing vs. batched chat template padding." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Both are config/API-related, but they concern different models and different problems (`Gemma3TextConfig` API mismatch vs. strict config blocking `granite_speech`)." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Unrelated: packaging/backend dependency omission versus a config API discrepancy." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "Different model/test failures with no shared concrete code-path or bug." + }, + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "One is about gradient checkpointing in eval mode; the other is MoE training memory inefficiency. Related themes, but not the same bug." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Different failures: Python 3.13 parsing/`@torch.jit.script` indentation issue versus DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:33290", + "right": "issue:36331", + "accept": false, + "reason": "Completely different problems: DeepSpeed/Adafactor OOM versus a `compute_loss()` signature mismatch." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 79, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:34689", + "issue:36010", + "issue:41950", + "issue:43065", + "issue:43316", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43688", + "issue:43716", + "issue:44617", + "issue:45127", + "issue:45200" + ], + "soft_pairs": [ + "issue:43388|issue:43688", + "issue:34689|issue:36010", + "issue:43316|issue:43716", + "issue:43404|issue:43441", + "issue:43065|issue:44617", + "issue:41950|issue:43475", + "issue:45127|issue:45200", + "issue:16998|issue:22355", + "issue:25251|issue:28282" + ], + "prepared_review_unit_hash": "aa35ac623b81766c52209284a77d0d9034b44e2a312610d40b3e1ffe42f3b597", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13052, + "estimated_input_tokens": 3263, + "estimated_eval_tokens": 6782 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items cover unrelated transformers bugs, import errors, model-specific regressions, and pipeline quirks. None of the soft pairs look like the same underlying issue, so there is no good canonical item for the cluster.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Both mention metrics/loss, but one is a dataloader/metric-gathering bug with tuple labels and the other is an auxiliary-loss normalization issue in different models. Different code paths and fixes." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "Both are import/load failures, but one is a Transformers 4.46.2 model-loading regression for Llama 3.2 Vision and the other is a missing `transformers.generation.GenerationMixin` import. Not the same bug." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "Different model families and failures: Gemma3TextConfig API inconsistency versus Mistral-3 image-preprocessor dtype mismatch. No shared underlying code-path problem." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "Both concern Mistral-family models, but one is about tied `lm_head` weights in `Mistral3ForConditionalGeneration` and the other is a FlashAttention failure in Ministral-3. Distinct issues." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "Both are about Sam3Video, but one reports a dummy `nn.Conv2d` in `Sam3PixelDecoder` while the other is CUDA OOM. Different root causes." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Both touch video/SAM3 code, but one is a pipeline selecting image processors and the other is a missing `fpn_position_embeddings` attribute on an output object. Separate bugs." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "Both involve token/embedding-related behavior, but one is LoRA merging with extended vocabulary causing collapse and the other is `mm_token_type_ids` defaulting for Gemma 4 text-only fine-tuning. Not the same change." + }, + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "Completely unrelated: a `model_max_length` question for DeBERTa-V3 versus a missing `transformers.onnx` module import." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Different subsystems and symptoms: pipeline `top_k` output nesting versus `AutoModel` failing because PyTorch is absent. No shared underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 80, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43295", + "issue:43452", + "issue:43550", + "issue:43606", + "issue:43646", + "issue:43701", + "issue:43761", + "issue:43825", + "issue:44077", + "issue:44242", + "issue:44479", + "issue:44496", + "issue:44534", + "issue:44734", + "issue:44871", + "issue:44913", + "issue:45406" + ], + "soft_pairs": [ + "issue:43761|issue:44479", + "issue:43295|issue:43825", + "issue:44734|issue:45406", + "issue:43295|issue:44871", + "issue:44077|issue:44534", + "issue:43550|issue:43701", + "issue:43452|issue:44496", + "issue:43606|issue:43701", + "issue:43646|issue:44913", + "issue:43761|issue:44242" + ], + "prepared_review_unit_hash": "d11b8e71f3b074bfa5d6c3da796298ff2480343b7b7734606707897a77536596", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12957, + "estimated_input_tokens": 3240, + "estimated_eval_tokens": 6736 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not a duplicate cluster; they span unrelated regressions in processor/tokenizer handling, model loading, serving, config reload, training, and video/model-specific failures. None of the soft edges look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue fits: the reports describe distinct failures in different code paths and subsystems.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick a representative, issue 43646 is the broadest user-facing regression, but it still does not unify the rest of the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "Both are regressions, but one is CLIPVision hidden_states output and the other is video-input handling for Qwen VL models; different models and failure modes." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "Processor/tokenizer access regression vs pipeline task-support error message; unrelated symptoms and code paths." + }, + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "Both involve transformers serve, but one is KV-cache continuation indexing and the other is Gemma4Processor missing _tokenizer; different bugs." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "processor.tokenizer breakage vs Gemma-3 eos_token_id mismatch; separate model/config issues." + }, + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "Optional post_init validation for patchtsmixer is not the same as non-persistent buffer initialization junk in v5." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "torch.compile + SDPA failure for Bamba is unrelated to resume_from_checkpoint key mismatch." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "Both are loading failures, but one is gguf_file support breaking from_pretrained paths and the other is an unrecognized model config lacking model_type; different root causes." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "CPU offload device mismatch for Bark is unrelated to checkpoint resume key mapping." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom model initialization breakage in v5 is not the same as GPTNeoXConfig rotary_pct not persisting on reload." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "Hidden_states regression in CLIPVisionModel is unrelated to load-balancing loss omission when output_router_logits=False." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 81, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42617", + "issue:43065", + "issue:43525", + "issue:43531", + "issue:43575", + "issue:43742", + "issue:43873", + "issue:43906", + "issue:43931", + "issue:44060", + "issue:44479", + "issue:44521", + "issue:44610", + "issue:44734", + "issue:44792", + "issue:44977", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:43906|issue:45290", + "issue:43931|issue:44060", + "issue:42617|issue:43575", + "issue:43575|issue:43742", + "issue:44521|issue:45381", + "issue:43525|issue:43531", + "issue:44479|issue:44734", + "issue:44792|issue:44977", + "issue:43065|issue:44610", + "issue:43575|issue:43873" + ], + "prepared_review_unit_hash": "bb5280220eadea6a0f7118bb43d958b1e926fad19f54fb0202f0d309c682bf5d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13274, + "estimated_input_tokens": 3319, + "estimated_eval_tokens": 6894 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated issue reports spanning different models, errors, and subsystems. None of the proposed soft pairs look like true duplicates or the same fix target, so all soft-edge merges should be rejected.", + "confidence": 0.93, + "canonical_issue_reason": "issue:44521 is the strongest representative by activity and breadth: it is open, has the most inbound references/discussion, and describes a concrete multimodal/template bug with clear reproduction and impact.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 is the best issue candidate because it is active, well-scoped, and central enough to represent the cluster better than the many one-off model-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "Both involve chat/template behavior, but one is a reproduction of an older issue and the other is a tokenizer crash on assistant tool-call messages. Different failure modes and likely different code paths." + }, + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Different models and different bugs: weight-shape mismatch in Qwen3-VL vs a tied-weights warning in Qwen3-Next. Not the same underlying defect." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "A 3d_parallel.py runtime problem is unrelated to Qwen2-57B tensor-parallel OOM. Same broad runtime area, but not the same bug." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "Both are model-loading issues, but one is OOM during TP loading of Qwen2-57B and the other is a key error for MobileLLM-125M. Different symptoms, models, and fixes." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "Both concern multimodal/video preprocessing, but one is all-zero assistant masks from apply_chat_template and the other is wrong vision_position_ids. Similar area, not the same concrete bug." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Different models and unrelated problems: missing pad_token_id on Llama4Config versus a sliding_window issue on Qwen3-MoE." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression in several Qwen VL variants versus a serve-side KV-cache continuation crash from wrong tensor indexing. Distinct code paths and failure modes." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "Janus image-generation test failure is not the same issue as Qwen3.5 flash-attention generation breakage. Different models and symptoms." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Both touch vision models, but one is a dummy Conv2d in Sam3PixelDecoder and the other is an OmDet-Turbo processor/model input-size mismatch. Not the same defect." + }, + { + "left": "issue:43575", + "right": "issue:43873", + "accept": false, + "reason": "OOM from tensor-parallel model loading is not the same as offloading failing under quantization. Related resource-management theme, but different underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 82, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:43388", + "issue:43582", + "issue:43650", + "issue:43827", + "issue:43994", + "issue:44190", + "issue:44220", + "issue:44464", + "issue:44534", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44964", + "issue:45245", + "issue:45362" + ], + "soft_pairs": [ + "issue:44933|issue:45362", + "issue:44464|issue:44871", + "issue:32090|issue:35141", + "issue:41720|issue:45245", + "issue:43827|issue:43994", + "issue:44534|issue:44913", + "issue:43388|issue:43582", + "issue:43650|issue:44220", + "issue:44190|issue:44964" + ], + "prepared_review_unit_hash": "50ff33f9c7c06ce96e196fa703ef6252fa0cb070e0bc5e311ac9de7ca6c0b645", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13089, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues cover unrelated areas (trainer/distributed, embedding resize, device mapping, docs, model loading, configs, generation, etc.). None of the soft pairs appear to be the same underlying bug or change, so all candidate duplicate edges should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Different problems: an import/API issue in image_utils vs a Qwen3.5 chat crash." + }, + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Different failure modes: compiled-forward chunked generation mismatch vs Gemma-3 eos_token_id config inconsistency." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer broadcast TypeError from NoneType is unrelated to embedding reinitialization after resize." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3 auto device-mapping cuda assert is unrelated to the 2^24 category-count limit error." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "One is a docs update for removed pipeline() examples; the other is a model/pipeline runtime correctness bug." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk on v5 is not the same as GPTNeoXConfig rotary_pct not persisting on reload." + }, + { + "left": "issue:43388", + "right": "issue:43582", + "accept": false, + "reason": "Metric-gather label truncation and Apple Silicon allocator warmup TypeError are unrelated." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "The placeholder 'ADD THE DATA' issue is unrelated to the fbank feature extraction bug." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Both are loading-related, but they concern different scripts/models and different root causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 83, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43335", + "issue:43540", + "issue:43550", + "issue:43644", + "issue:43761", + "issue:43976", + "issue:44077", + "issue:44190", + "issue:44355", + "issue:44496", + "issue:44734", + "issue:44743", + "issue:44855", + "issue:44877", + "issue:44913", + "issue:45127", + "issue:45357", + "issue:45372" + ], + "soft_pairs": [ + "issue:43761|issue:44743", + "issue:45127|issue:45357", + "issue:44355|issue:44855", + "issue:43540|issue:44734", + "issue:44496|issue:45372", + "issue:43976|issue:44855", + "issue:43644|issue:44913", + "issue:43335|issue:44855", + "issue:44077|issue:44877", + "issue:43550|issue:44190" + ], + "prepared_review_unit_hash": "ea394f716f1284436f1ba8c5811653863838379df5916da9f907ce7ae7258dbf", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13623, + "estimated_input_tokens": 3406, + "estimated_eval_tokens": 7068 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These reports cover unrelated bugs across different models, configs, loaders, and runtime paths. None of the soft pairs look like the same underlying defect, so they should remain separate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must be highlighted, issue 45372 is the broadest runtime regression with active references, but it is not a duplicate anchor for the rest of the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "Different model families and failure modes: CLIPVisionModel hidden_states regression vs Qwen3 recurrent state reset in modular code." + }, + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "Both involve model save/load behavior, but one is LoRA merging with tied embeddings and the other is incorrect visual encoder keys for Qwen3.5; not the same bug." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "Unrelated import/runtime errors: compiled Python files vs a Python 3.13 TorchScript decorator parsing problem in DebertaV2Model." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Different subsystems and symptoms: Qwen3OmniMoe video input ValueError vs transformers serve KV-cache continuation indexing crash." + }, + { + "left": "issue:44496", + "right": "issue:45372", + "accept": false, + "reason": "Separate loading failures: unrecognized model/config.json handling vs a mistral_common ReasoningEffort import break in Gemma 4 processor loading." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "Version/platform incompatibility in Transformers 5.1.0 vs a specific Python 3.13 TorchScript parsing regression; distinct causes." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Different configuration persistence bugs: non-persistent buffer initialization junk vs GPTNeoX rotary_pct not preserved on reload." + }, + { + "left": "issue:43335", + "right": "issue:44855", + "accept": false, + "reason": "SwitchTransformers sparse layer creation on single-layer models is unrelated to the DebertaV2 Python 3.13 import/parsing issue." + }, + { + "left": "issue:44077", + "right": "issue:44877", + "accept": false, + "reason": "PatchTSMixer post_init acceptance and granite_speech strict config loading are separate config-validation issues with different targets." + }, + { + "left": "issue:43550", + "right": "issue:44190", + "accept": false, + "reason": "Bamba-9B-v2 torch.compile/SDPA failure is unrelated to local dataset loading in the image classification no-trainer script." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 84, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41628", + "issue:41950", + "issue:43065", + "issue:43116", + "issue:43295", + "issue:43404", + "issue:44016", + "issue:44186", + "issue:44190", + "issue:44734", + "issue:44871", + "issue:45216", + "issue:45245", + "issue:45276" + ], + "soft_pairs": [ + "issue:43116|issue:44190", + "issue:43295|issue:45276", + "issue:41950|issue:44016", + "issue:36246|issue:41628", + "issue:43404|issue:45216", + "issue:36246|issue:38617", + "issue:39692|issue:44190", + "issue:44186|issue:44734", + "issue:44871|issue:45245", + "issue:38175|issue:43065" + ], + "prepared_review_unit_hash": "62aa38d2d7efa0020b46243abc095e253fd2fa9c61b2004e2810663418bbc1c1", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13648, + "estimated_input_tokens": 3412, + "estimated_eval_tokens": 7080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous and does not form a true duplicate set. The soft pairs are only superficial theme matches (mostly around import errors or example failures), so none should be merged as duplicates and no single canonical issue stands out.", + "confidence": 0.93, + "canonical_issue_reason": "No clear canonical issue: the items span unrelated bugs in imports, docs/examples, tokenizer behavior, model save/load, and runtime errors.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong cluster-wide representative because there is no single underlying bug or change tying these items together.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "Different example-script failures: one is multi-label classification output handling, the other is local dataset loading for image classification." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Different concrete bugs in different model paths: processor/tokenizer regression vs. resize_token_embeddings not updating Gemma4 embeddings." + }, + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Unrelated failures: a video-classification pipeline bug vs. a notebook syntax error." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different symbols and modules; they do not look like the same underlying code-path bug." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Different model-specific issues: Mistral3 weight tying vs. Qwen3.5 save_pretrained checkpoint corruption." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Distinct import failures for different names in different modules, not one shared missing export bug." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "Different problems in different examples: SigLIP2 docs/model mismatch and quantization failure vs. local dataset loading." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Different subsystems and symptoms: tokenizer padding/NER crashes vs. server KV-cache indexing during responses." + }, + { + "left": "issue:44871", + "right": "issue:45245", + "accept": false, + "reason": "Completely different bugs: Gemma-3 eos token configuration vs. a category cardinality runtime limit." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "No shared underlying bug: SigLIP2 zero probabilities vs. a dummy Conv2d in Sam3PixelDecoder." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 85, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42175", + "issue:43295", + "issue:43479", + "issue:43854", + "issue:44016", + "issue:44112", + "issue:44220", + "issue:44246", + "issue:44279", + "issue:44466", + "issue:44683", + "issue:44792", + "issue:44913", + "issue:45003", + "issue:45335", + "issue:45375", + "issue:45446" + ], + "soft_pairs": [ + "issue:44016|issue:44220", + "issue:43479|issue:45375", + "issue:44246|issue:45003", + "issue:42175|issue:44279", + "issue:44683|issue:45446", + "issue:44466|issue:44913", + "issue:44112|issue:44792", + "issue:43854|issue:44792", + "issue:43295|issue:45335" + ], + "prepared_review_unit_hash": "ac0f8879a40388954a0f36df5b9bc35030042d950f5d133a3fb6cca404189c19", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12456, + "estimated_input_tokens": 3114, + "estimated_eval_tokens": 6484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; the items span unrelated bugs across configs, CI, notebooks, dependency issues, multimodal models, and attention/version checks. No single canonical issue or PR emerges.", + "confidence": 0.96, + "canonical_issue_reason": "No canonical issue: the issues describe distinct failures in different subsystems and code paths, with no strong evidence they are the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No best issue: none is a good representative of the whole set because there is no coherent duplicate cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "Notebook syntax error and `_torch_extract_fbank_features()` are unrelated failure modes in different areas; not the same bug." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config handling, but they concern different model families and different fields/default-init behavior; too different to merge." + }, + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "`import transformers` slowness is a performance/import-path issue, while `sys.modules[]` access is a correctness/safety issue; not the same underlying problem." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "Both mention dependencies, but one is specifically missing TensorFlow in an extra install and the other is a generic dependency issue; no same code-path bug." + }, + { + "left": "issue:44683", + "right": "issue:45446", + "accept": false, + "reason": "Both touch flex_attention/version checks, but one is a torch>=2.9 compile failure and the other is an AuxRequest import guard problem; different fixes." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Inconsistent `lm_head.weight` serialization and `rotary_pct` reload behavior are separate configuration/serialization bugs in different model code." + }, + { + "left": "issue:44112", + "right": "issue:44792", + "accept": false, + "reason": "These are different CI test failures in different models; no shared concrete bug or mergeable fix path." + }, + { + "left": "issue:43854", + "right": "issue:44792", + "accept": false, + "reason": "GLM-4.7-Flash loading in tests and Janus image generation test failure are distinct model/test issues." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Regression in `processor.tokenizer`/image passing and T5Gemma embedding resize behavior are different multimodal/modeling bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 86, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42907", + "issue:43299", + "issue:43441", + "issue:43643", + "issue:43673", + "issue:43824", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44336", + "issue:44368", + "issue:44496", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45216" + ], + "soft_pairs": [ + "issue:42907|issue:43441", + "issue:43299|issue:45216", + "issue:44016|issue:44062", + "issue:43643|issue:44496", + "issue:44112|issue:44964", + "issue:43673|issue:44336", + "issue:43976|issue:44987", + "issue:44075|issue:44368", + "issue:43824|issue:44933" + ], + "prepared_review_unit_hash": "1f18f01515425d291642756af22e7de7f31cd92a5e16711e0e4d15825a0d0f6d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13174, + "estimated_input_tokens": 3294, + "estimated_eval_tokens": 6844 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All candidate pairs look like separate regressions or unrelated bugs; none appear to be the same underlying issue/change. The closest pairs are only superficially similar by model family or import/config context, but the code paths differ.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43824 is the strongest issue candidate overall: it is concrete, well-scoped, and has the most obvious regression signal and community traction among the set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the best representative issue in this set because it states a specific failure mode and is easier to triage than the broader or more ambiguous reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Both mention Ministral/Ministral-3, but one is about saving dequantized models and the other is a FlashAttention load failure. Different failure paths and likely different fixes." + }, + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Qwen3VL MoE loading and Qwen3.5 save_pretrained regression are different model families and opposite operations (load vs save). Not the same bug." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "A notebook syntax error and an AddedToken argument TypeError are unrelated issues with no shared code path." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "Both involve config/model loading, but one is trust_remote_code returning incomplete objects and the other is an unrecognized model lacking model_type. Related area, but not the same concrete bug." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "A stale GraniteSpeech CI test failure is unrelated to a Phi-4-multimodal model loading failure." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Chunked_prefill GenerationMixin cache regression and ANSI color codes in loading_report are unrelated subsystems and symptoms." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "A Python version compatibility regression and a specific model-loading failure in physical-intelligence/fast are not the same underlying issue." + }, + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "SGD optimizer args being ignored and a tie_word_embeddings warning during Qwen3.5 LoRA fine-tuning are unrelated." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Both are import errors, but they reference different missing symbols in different modules; too generic to treat as the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 87, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43299", + "issue:43526", + "issue:43577", + "issue:43854", + "issue:43901", + "issue:43976", + "issue:44242", + "issue:44279", + "issue:44351", + "issue:44496", + "issue:44521", + "issue:44964", + "issue:45042", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45325", + "issue:45357" + ], + "soft_pairs": [ + "issue:44496|issue:45042", + "issue:43976|issue:44279", + "issue:43299|issue:45357", + "issue:43526|issue:43901", + "issue:44242|issue:45200", + "issue:44521|issue:45325", + "issue:43577|issue:45216", + "issue:43901|issue:44496", + "issue:43854|issue:44964", + "issue:44351|issue:45278" + ], + "prepared_review_unit_hash": "57349bcb717d7a461a203672805cca492d6577ed2b41dfc521e0e14b020293e9", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13475, + "estimated_input_tokens": 3369, + "estimated_eval_tokens": 6994 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These look like a collection of distinct Transformers regression reports rather than one duplicate cluster. The soft-linked pairs share only broad topical similarity (model loading, import errors, multimodal behavior, or version regressions), not the same concrete bug or change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 is the most detailed and actively referenced report, so it is the best representative issue in this set, but it is still a distinct multimodal chat-template bug rather than a true canonical match for the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "Different problems: unrecognized model loading for Olmo-Hybrid vs PIL image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "One is a Python-version compatibility regression; the other is a vague dependency issue with no clear shared code path." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen regressions: loading Qwen3VL MoE models vs incorrect save_pretrained visual encoder keys for Qwen3.5." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "BEiT image processor label reduction bug is unrelated to TextClassificationPipeline documentation/behavior mismatch." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "Load-balancing loss omission in MoE routing is unrelated to Gemma 4 mm_token_type_ids defaulting for fine-tuning." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "apply_chat_template assistant mask failure is unrelated to Qwen2.5-VL rope-index scaling for still-image positions." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "BLIP2 dtype loading issue is unrelated to the Qwen3.5 save_pretrained checkpoint regression." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Docs/behavior mismatch for return_all_scores is not the same bug as failing to load an unrecognized model." + }, + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Both are model-loading failures, but they involve different model families and likely different root causes." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "Missing HybridCache import is a specific symbol regression; the other is a broad import-error report and not the same concrete issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 88, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:41950", + "issue:43479", + "issue:43525", + "issue:43650", + "issue:43976", + "issue:44279", + "issue:44297", + "issue:44462", + "issue:44464", + "issue:44496", + "issue:44617", + "issue:44779", + "issue:44877", + "issue:45020", + "issue:45081", + "issue:45245", + "issue:45405" + ], + "soft_pairs": [ + "issue:44464|issue:45245", + "issue:44462|issue:45081", + "issue:41950|issue:45020", + "issue:41950|issue:43650", + "issue:43976|issue:45405", + "issue:44297|issue:44462", + "issue:44279|issue:45405", + "issue:43479|issue:43525", + "issue:44496|issue:44877", + "issue:44462|issue:44779", + "issue:38175|issue:44617" + ], + "prepared_review_unit_hash": "924cc7077fe6decc1f88e15eebd1b6c553a4b14223e2bcdef62c05a9649b2354", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13621, + "estimated_input_tokens": 3406, + "estimated_eval_tokens": 7068 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the soft-similarity pairs share broad themes (tokenizers, configs, regressions) but not the same concrete bug or code path. I do not see any true duplicate pairs. If forced to pick a representative issue, #45020 is the broadest and most substantive report, but it is not a real canonical duplicate anchor.", + "confidence": 0.91, + "canonical_issue_reason": "No single issue cleanly represents a duplicate set here; the items span unrelated bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "#45020 is the most broadly scoped and actionable issue in the group, though the cluster is too mixed for a true canonical duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44464", + "right": "issue:45245", + "accept": false, + "reason": "Different bugs: chunked generation/compiled forward versus a categories-cardinality runtime error." + }, + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Both involve tokenizer loading, but one is about repository tokenizer.json selection and the other is a Mistral regex patch crash." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Different subsystems and failures: video-classification processor lookup versus remote_code model loading regressions." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "No shared bug path; pipeline processor selection is unrelated to the vague data issue." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "A Python version compatibility problem is unrelated to a PEFT minimum-version bump." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Both are tokenizer-related, but one is save_pretrained metadata mismatch and the other is AutoTokenizer ignoring tokenizer.json." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "General dependency trouble does not match the specific unreleased MIN_PEFT_VERSION issue." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Different config classes and failures: default multimodal config initialization versus missing pad_token_id on Llama4Config." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Both are config-loading problems, but they stem from different validation paths and model families." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenizers, but the symptoms and likely code paths differ: repo tokenizer selection versus Deepseek v5 behavior regression." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unrelated model inference issues: zero probabilities on SigLIP2 versus CUDA OOM in Sam3Video." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 89, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:42994", + "issue:43295", + "issue:43299", + "issue:43317", + "issue:43441", + "issue:43450", + "issue:43550", + "issue:43723", + "issue:43854", + "issue:43901", + "issue:44016", + "issue:44162", + "issue:44279", + "issue:44291", + "issue:44683", + "issue:44779", + "issue:44964" + ], + "soft_pairs": [ + "issue:43550|issue:44964", + "issue:43299|issue:43854", + "issue:43295|issue:44779", + "issue:39692|issue:43901", + "issue:43441|issue:44683", + "issue:43450|issue:44279", + "issue:44016|issue:44291", + "issue:42994|issue:43317", + "issue:43723|issue:44162" + ], + "prepared_review_unit_hash": "f0616d117b8a08c5b08c9dd1525b99108154a429e02d0e6a5e03a6ebf01e15ba", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13028, + "estimated_input_tokens": 3257, + "estimated_eval_tokens": 6770 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates; they span separate regressions and unrelated subsystems (model loading, tokenizer behavior, attention kernels, quantization, docs, and notebook syntax). The cluster should be split rather than merged.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue: the set covers distinct bugs across different models and code paths, so none can represent the whole cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for deduplication; each item is a separate problem with no clear superseding report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:44964", + "accept": false, + "reason": "Both are model-loading failures, but they involve different models and different failure modes (torch.compile+SDPA vs latest-transformers loadability)." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "These are separate model-loading bugs for different architectures and likely different code paths; similarity is only at the broad 'load fails' level." + }, + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "One is a processor/tokenizer API regression, the other is incorrect tokenizer output for Deepseek; related area, but not the same bug." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "Both mention docs, but one is a SigLIP2 example with model/processor and quantization errors, while the other is a pipeline docs mismatch about return_all_scores." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "Different attention implementations and different failure conditions; not the same concrete code-path problem." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Video processor batching shape bug is unrelated to a generic dependency issue." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "A notebook syntax error and an init_empty_weights TypeError are unrelated defects." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Quantized model saving failure and dequantized model loading with device_map=auto are different quantization lifecycle problems." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "Tokenizer loading regression and ESM2 model breakage are different issues; no shared concrete bug is evident." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 90, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43650", + "issue:43673", + "issue:43761", + "issue:44261", + "issue:44263", + "issue:44466", + "issue:44521", + "issue:44623", + "issue:44671", + "issue:44749", + "issue:44779", + "issue:44898", + "issue:45276", + "issue:45290", + "issue:45397" + ], + "soft_pairs": [ + "issue:43650|issue:45290", + "issue:43065|issue:44898", + "issue:44623|issue:45397", + "issue:44261|issue:44263", + "issue:44466|issue:45276", + "issue:43262|issue:44521", + "issue:44623|issue:44749", + "issue:43122|issue:43650", + "issue:44671|issue:44779", + "issue:43673|issue:43761" + ], + "prepared_review_unit_hash": "815fdafaa49d424efe92dec924cffafd4552143132586b4f13d93a010ceb30f1", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13452, + "estimated_input_tokens": 3363, + "estimated_eval_tokens": 6982 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No convincing duplicate pairs here: the soft links mostly share broad keywords (e.g. apply_chat_template, v5 regressions, serialization) but point to different bugs and code paths. I\u2019d anchor the issue side on the most concrete, active bug report.", + "confidence": 0.86, + "canonical_issue_reason": "Issue 44521 is the strongest representative: it is a specific, reproducible bug in apply_chat_template for multimodal inputs, is still open, and has the most cluster activity/inbound references among the candidates.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44521 looks best suited as the canonical issue because it is the most concrete and well-scoped report with active discussion and references; the other issues are either closed, narrower, or clearly unrelated.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "Completely different problems: a vague data request vs a crash in apply_chat_template with tool-call assistant messages. No shared code path." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "Different models and failures: dummy Conv2d in Sam3PixelDecoder vs Perceiver image classification at non-default resolution." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "processor.save_pretrained missing files is unrelated to gemma-4 Zero3 from_pretrained behavior." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "MLA q_a_layernorm epsilon handling and GlmMoeDsaIndexer torch.split return values are unrelated bugs." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "One is lm_head.weight serialization/tied weights behavior; the other is resize_token_embeddings not updating Gemma4 embeddings. Different fixes." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Both involve apply_chat_template, but one is about audio sampling-rate defaults and the other about assistant_masks for multimodal inputs. Not the same bug." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "save_pretrained file output vs tokenizer/filtering performance regression are unrelated." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Different tokenizer regression vs a vague request to add data; no shared underlying issue." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression and Deepseek tokenizer incorrect results are separate model/tokenizer bugs." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "GenerationMixin chunked_prefill cache handling and CLIPVisionModel hidden_states output are unrelated code paths and behaviors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 91, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42175", + "issue:43295", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44246", + "issue:44297", + "issue:44589", + "issue:44617", + "issue:45250", + "issue:45362", + "issue:45397" + ], + "soft_pairs": [ + "issue:44297|issue:45397", + "issue:43352|issue:45250", + "issue:42175|issue:43976", + "issue:43901|issue:43994", + "issue:43295|issue:44617", + "issue:43475|issue:44016", + "issue:43824|issue:43976", + "issue:44246|issue:44617", + "issue:43525|issue:43881", + "issue:44589|issue:45362", + "issue:43901|issue:44246" + ], + "prepared_review_unit_hash": "928221e4dec0d3644de1140fc7ce213208583deed3fc79e7f4d3a86e80627bdc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13481, + "estimated_input_tokens": 3371, + "estimated_eval_tokens": 6998 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated individual bugs/regressions that only share broad Transformer/model-loading vocabulary. None of the soft-edge pairs looks like the same concrete underlying issue, so all should be rejected. If a representative issue is needed, issue 44246 is the broadest/highest-engagement one, but this is not a strong duplicate cluster.", + "confidence": 0.92, + "canonical_issue_reason": "Issue 44246 is the most general and highest-engagement item in the set, so it is the closest thing to a cluster anchor, though the cluster itself is heterogeneous.", + "canonical_pr_reason": null, + "best_issue_reason": "44246 is the best single representative by breadth and discussion activity, but it does not genuinely subsume the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer save_pretrained metadata mismatch and Gemma-4 zero3 from_pretrained are unrelated failure modes." + }, + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "Both mention Flash Attention 2.0, but one is a specific model-not-supported error and the other is a generic FA2 issue; not the same bug." + }, + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "Backend dependency installation for TensorFlow and Python version compatibility are different problems." + }, + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs mismatch for return_all_scores and incorrect SigLIP2 inference behavior are distinct pipeline issues." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "Processor/tokenizer regression and CUDA out-of-memory in Sam3Video are unrelated." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "Missing SAM3 encoder attribute and notebook syntax error are clearly different issues." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "Missing Qwen2.5-VL import and Python version incompatibility do not share the same underlying fix." + }, + { + "left": "issue:44246", + "right": "issue:44617", + "accept": false, + "reason": "Slow import performance and video model OOM are not the same code-path problem." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Llama4Config missing pad_token_id and glm-4v-9b loading failure are separate model/config errors." + }, + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Float8 storage deserialization error and Qwen3.5 chat crash are unrelated runtime failures." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "Documentation mismatch for pipeline output and slow package import are different concerns." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 92, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42491", + "issue:43299", + "issue:43976", + "issue:44188", + "issue:44220", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44351", + "issue:44373", + "issue:44462", + "issue:44496", + "issue:45356", + "issue:45397" + ], + "soft_pairs": [ + "issue:41950|issue:44220", + "issue:41628|issue:43976", + "issue:44188|issue:44336", + "issue:44295|issue:45397", + "issue:44462|issue:45356", + "issue:42491|issue:43299", + "issue:44336|issue:44373", + "issue:44246|issue:44496", + "issue:36246|issue:44351" + ], + "prepared_review_unit_hash": "7eda0fbcdb3cf428ddf84c786185af19a06ab171e1ab7bd86e9258d0a5111c36", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12539, + "estimated_input_tokens": 3135, + "estimated_eval_tokens": 6526 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs mostly share only broad themes (imports, tokenizers, model loading, docs) but not the same concrete bug. I would not merge any of the suggested pairs as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits the cluster; the items cover unrelated failures across import/export, tokenizer behavior, model loading, attention kernels, and documentation.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good cluster-wide representative because there is no clear duplicate set here.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Different failures: video-classification pipeline image-processor lookup vs an error in _torch_extract_fbank_features(). Same general library area, but not the same bug." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "ImportError for AutoImageProcessor is unrelated to the Python 3.9/3.10 compatibility break." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "Attention-kernel divergence under torch.compile is unrelated to ANSI codes emitted by loading_report." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "Position_ids buffer handling and gemma-4 zero3 from_pretrained are different model-loading problems." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "Both involve tokenizers, but one is about ignoring tokenizer.json and the other about a specific Kimi-K2.5 codec/regression; not the same underlying change." + }, + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen/MoE loading, but one is a LoRA compatibility issue across branches and the other is a Qwen3VL MoE loading regression; too different to treat as one bug." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "Loading_report ANSI output and a wrong docstring for position_ids are unrelated." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Slow import timing is unrelated to the Olmo-Hybrid model config/model_type ValueError." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different exported symbols and likely different causes; not the same concrete failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 93, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:43526", + "issue:43531", + "issue:43596", + "issue:43673", + "issue:43994", + "issue:44162", + "issue:44242", + "issue:44295", + "issue:44336", + "issue:44485", + "issue:44561", + "issue:44749", + "issue:44857", + "issue:45003", + "issue:45200", + "issue:45276", + "issue:45362" + ], + "soft_pairs": [ + "issue:43531|issue:45362", + "issue:43526|issue:45003", + "issue:43994|issue:45276", + "issue:43673|issue:44561", + "issue:44242|issue:44857", + "issue:44336|issue:44485", + "issue:36246|issue:43596", + "issue:44162|issue:45200", + "issue:44295|issue:44749" + ], + "prepared_review_unit_hash": "252a0dd444b87ca5868ca436884bb6de676ac84ca413e79c586e5423d41f78ad", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13195, + "estimated_input_tokens": 3299, + "estimated_eval_tokens": 6854 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster does not look like a true duplicate set: the issues span unrelated models, subsystems, and failure modes. None of the soft pairs appear to share the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "issue:45003 is the best single representative if one must be chosen: it is open, broadly applicable, and describes a concrete framework-level bug in modeling_utils rather than a narrow model-specific report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45003 is the strongest issue to anchor the cluster because it is actionable and systemic, but the cluster overall is too heterogeneous to justify a real duplicate canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Both mention Qwen, but one is a sliding_window behavior report for Qwen3-MoE and the other is a chat crash for Qwen3.5-35B; different symptom and likely different code paths." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "BEiT image label reduction and unsafe sys.modules access in modeling_utils are unrelated bugs in different components." + }, + { + "left": "issue:43994", + "right": "issue:45276", + "accept": false, + "reason": "SigLIP2 AutoModel/pipeline bad outputs and Gemma 4 embedding resize behavior are different model families and different failure modes." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Chunked_prefill cache handling and removal of is_torch_fx_available causing trust_remote_code breakage are distinct issues with no shared concrete root cause." + }, + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "MoE load-balancing loss omission and LwDetrImageLoss AMP/CUDA crash are unrelated training/runtime bugs." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "ANSI output in loading_report and GLM-5 RoPE implementation are different subsystems and different bug classes." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "Qwen2.5-VL import failure and BertModel deepspeed zero3 IndexError are unrelated; one is an import/export problem, the other a distributed-init runtime error." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 broken workflows and Gemma 4 mm_token_type_ids defaults are separate model-specific issues with no shared code path." + }, + { + "left": "issue:44295", + "right": "issue:44749", + "accept": false, + "reason": "position_ids buffer access error and a 10x slowdown after upgrade are different regressions affecting different mechanics." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 94, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41950", + "issue:42886", + "issue:43066", + "issue:43441", + "issue:43540", + "issue:43596", + "issue:44246", + "issue:44779", + "issue:44871", + "issue:44877", + "issue:44987", + "issue:44995", + "issue:45200", + "issue:45229", + "issue:45292", + "issue:45375", + "issue:45459" + ], + "soft_pairs": [ + "issue:44987|issue:45459", + "issue:44877|issue:45375", + "issue:44246|issue:44871", + "issue:44779|issue:45292", + "issue:41950|issue:43596", + "issue:43066|issue:44779", + "issue:43441|issue:43540", + "issue:44995|issue:45200", + "issue:42886|issue:45229" + ], + "prepared_review_unit_hash": "0fb544a8d16362a2568146bd4a3a4708caaea49846965693c93ec33b28241771", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12417, + "estimated_input_tokens": 3105, + "estimated_eval_tokens": 6466 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues cover unrelated transformer bugs in tokenizers, configs, vision/video models, FlashAttention, cache/offline loading, embedding resizing, and OOM behavior. None of the soft pairs look like the same underlying bug/change, so no duplicates are accepted.", + "confidence": 0.97, + "canonical_issue_reason": "No clear canonical issue: the set spans unrelated bugs and code paths, so there is no single issue that reasonably represents the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong best-issue anchor either; none of the reports subsumes the others. If forced, 44779 is a concrete regression report, but it is not representative of the whole set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Different failures: one is a loading failure for a specific model, the other is tokenizer error masking when protobuf is missing. Same broad area, but not the same bug." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Both mention strict config handling, but they concern different models and different missing fields. Not enough evidence of one shared underlying bug." + }, + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Import slowdown is unrelated to Gemma eos_token_id inconsistency; different symptom, path, and fix scope." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer regression in v5 and resize_token_embeddings not updating output embeddings are separate code paths and bugs." + }, + { + "left": "issue:41950", + "right": "issue:43596", + "accept": false, + "reason": "Video-classification processor lookup and deepspeed zero3/BertModel index error are unrelated issues." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer-related v5 regressions, but one is a decoder-type mismatch and the other is Deepseek output corruption; not clearly the same bug." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "Ministral FlashAttention failure and Qwen3OmniMoe video ValueError affect different models and code paths." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "Stale indexer cache on second forward pass is unrelated to Gemma 4 mm_token_type_ids defaults for text-only fine-tuning." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU CUDA OOM are different classes of failure with no shared underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 95, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43065", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43295", + "issue:43408", + "issue:43475", + "issue:43540", + "issue:43653", + "issue:43901", + "issue:44162", + "issue:44246", + "issue:44336", + "issue:44496", + "issue:45003", + "issue:45137", + "issue:45292" + ], + "soft_pairs": [ + "issue:43901|issue:45003", + "issue:43232|issue:44336", + "issue:43065|issue:43408", + "issue:43257|issue:45137", + "issue:43262|issue:44246", + "issue:44496|issue:45003", + "issue:43295|issue:45292", + "issue:43475|issue:43540", + "issue:43653|issue:44162" + ], + "prepared_review_unit_hash": "34c847b83244fa2f7658d320dd1044aea2cb05fc9d603a27b27ac40049897f36", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12552, + "estimated_input_tokens": 3138, + "estimated_eval_tokens": 6532 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are largely unrelated and should not be deduplicated. The apparent overlaps are only at the subsystem level (SAM3, DeepSpeed, tokenizer/modeling internals), not the same concrete bug.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch for TextClassificationPipeline vs unsafe sys.modules access in modeling_utils; different code paths and fixes." + }, + { + "left": "issue:43232", + "right": "issue:44336", + "accept": false, + "reason": "Generation kwarg update after sync_gpus is unrelated to ANSI output in loading_report." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "Both mention SAM3, but one is about a dummy Conv2d in the pixel decoder and the other about a model-type mismatch warning; not the same bug." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both involve DeepSpeed, but one is about Qwen3 MOE weight conversion and the other an empty-deque IndexError in ZeRO3; different failures." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio sampling-rate defaulting in apply_chat_template is unrelated to intermittent slow import time." + }, + { + "left": "issue:44496", + "right": "issue:45003", + "accept": false, + "reason": "Unrecognized model/model_type handling is a distinct issue from unsafe sys.modules access." + }, + { + "left": "issue:43295", + "right": "issue:45292", + "accept": false, + "reason": "Processor tokenizer regression with images is unrelated to resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "SAM3 vision encoder missing attribute and Qwen3OmniMoe video processing error are different models and different failure modes." + }, + { + "left": "issue:43653", + "right": "issue:44162", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug is unrelated to ESM2 breakage." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 96, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43232", + "issue:43441", + "issue:43606", + "issue:43673", + "issue:43931", + "issue:44060", + "issue:44077", + "issue:44297", + "issue:44368", + "issue:44466", + "issue:44623", + "issue:44734", + "issue:44869", + "issue:44933", + "issue:44977", + "issue:45335", + "issue:45356", + "issue:45459" + ], + "soft_pairs": [ + "issue:44060|issue:44077", + "issue:44466|issue:45335", + "issue:44933|issue:45459", + "issue:44368|issue:44977", + "issue:43673|issue:44734", + "issue:43441|issue:43606", + "issue:44869|issue:45356", + "issue:43232|issue:43673", + "issue:43931|issue:44368", + "issue:44297|issue:44623" + ], + "prepared_review_unit_hash": "c97f423679e41ab7ab934f76ee4cb3d70d37205fcf4c5b12c31a5fdf092d4004", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13604, + "estimated_input_tokens": 3401, + "estimated_eval_tokens": 7058 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change; the issues span unrelated models, subsystems, and failure modes. I\u2019d avoid merging them into a single duplicate cluster.", + "confidence": 0.84, + "canonical_issue_reason": "No single issue cleanly covers the whole set. If one must be chosen as an anchor, issue 44466 is the broadest, most clearly scoped v5 regression report with a concrete serialization symptom and version context.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44466 is the best representative issue overall because it is specific, reproducible, and framed as a regression, whereas the others are narrower model- or edge-case reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Different models and different bugs: one is a tied-weights warning on Qwen3-Next, the other is an invalid optional post_init allowance for patchtsmixer." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Both involve weight tying/serialization, but the concrete problems differ: inconsistent lm_head serialization by device vs resize_token_embeddings not updating decoder.embed_tokens." + }, + { + "left": "issue:44933", + "right": "issue:45459", + "accept": false, + "reason": "Both are tokenizer-related, but one is a nonexistent import from image_utils while the other is swallowed protobuf decode errors; different failure paths." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "Same model family and flash-attention context, but one is a tied-embeddings warning during LoRA fine-tuning and the other is a generation failure; not the same bug." + }, + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Both mention generation cache, but one reports missing cache in chunked_prefill while the other is a KV-cache continuation crash from wrong tensor indexing in serve." + }, + { + "left": "issue:43441", + "right": "issue:43606", + "accept": false, + "reason": "Both are model-specific runtime failures, but the concrete causes are unrelated: FlashAttention failure on Ministral-3 vs CPU offload device mismatch on bark-small." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Both touch tokenizer/decoding behavior, but one is a Whisper timestamp decode crash on a trailing replacement character and the other is a Kimi-K2.5 codec regression with a misleading warning." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "Both are generation-related, but the reported bugs differ: after sync_gpus model kwargs update vs missing cache during chunked_prefill." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Different symptoms and code paths: Qwen3-VL weight-shape mismatch on load versus a tie_word_embeddings warning during Qwen3.5 LoRA fine-tuning." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Both concern save_pretrained, but tokenizer_class mismatch in tokenizer_config.json is not the same as processor.save_pretrained omitting files." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 97, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:42673", + "issue:43381", + "issue:43646", + "issue:43673", + "issue:43704", + "issue:44038", + "issue:44162", + "issue:44361", + "issue:44623", + "issue:44749", + "issue:45230", + "issue:45278", + "issue:45292", + "issue:45310", + "issue:45335", + "issue:45397" + ], + "soft_pairs": [ + "issue:43673|issue:45278", + "issue:43646|issue:44162", + "issue:43381|issue:45292", + "issue:42673|issue:43704", + "issue:44361|issue:44749", + "issue:43381|issue:45335", + "issue:28282|issue:30990", + "issue:45230|issue:45397", + "issue:44749|issue:45310", + "issue:44038|issue:44623" + ], + "prepared_review_unit_hash": "6d99a3b5e15591286041229acfd807ce70e83945549fdee3444261ec0817ad85", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13091, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Only one soft duplicate pair is convincing: the two Qwen3ForCausalLM VRAM leak reports are the same bug with the same title. The rest are different failure modes, models, or performance/regression reports and should stay separate.", + "confidence": 0.93, + "canonical_issue_reason": "issue:42673 is the better canonical issue because it is earlier and has the same specific title as its duplicate, making it the cleanest representative of that bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42673 is the strongest issue choice overall: it is specific, clearly describes the underlying bug, and has a direct duplicate with matching wording.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "Both mention v5-era breakage, but one is a chunked_prefill cache issue and the other is broad import errors after upgrade; not the same bug." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Custom model initialization regression and ESM2 breakage are different problem reports; no shared concrete failure path is evident." + }, + { + "left": "issue:43381", + "right": "issue:45292", + "accept": false, + "reason": "Gradient checkpointing in eval mode is unrelated to resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": true, + "reason": "Exact same title and same VRAM leak scenario for Qwen3ForCausalLM in multiple dataloader threads; these are duplicates." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "MLukeTokenizer AttributeError and a transformer upgrade performance slowdown are unrelated issues." + }, + { + "left": "issue:43381", + "right": "issue:45335", + "accept": false, + "reason": "Eval-mode gradient checkpointing limitation is a different bug from t5gemma embedding resize propagation." + }, + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "PyTorch import-missing error and Sentence Transformers loading hang are distinct runtime problems." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "A generic bug report and a gemma-4 zero3 from_pretrained issue do not indicate the same concrete bug." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "Data filtering slowdown and Qwen3.5-Moe from_pretrained error are different symptoms and code paths." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe/transformers 5.0 compatibility and processor.save_pretrained missing files are separate issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 98, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:42371", + "issue:42994", + "issue:43475", + "issue:43525", + "issue:43643", + "issue:43701", + "issue:43824", + "issue:44077", + "issue:44749", + "issue:44933", + "issue:45003", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45381" + ], + "soft_pairs": [ + "issue:36246|issue:43824", + "issue:44077|issue:45003", + "issue:42371|issue:45092", + "issue:43475|issue:45381", + "issue:43701|issue:44749", + "issue:45081|issue:45103", + "issue:41628|issue:44933", + "issue:43475|issue:43643", + "issue:42994|issue:43525" + ], + "prepared_review_unit_hash": "9fcb8a7887327f74bc5c2f6eb0717c7c86ea31bdd364339b7650628cf56850f9", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12731, + "estimated_input_tokens": 3183, + "estimated_eval_tokens": 6622 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous; the only clear duplicate-like pair is the two Qwen2.5-VL import/export errors. The rest appear to be unrelated bugs across different subsystems.", + "confidence": 0.68, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole set. Among the related subset, issue 43824 is the strongest representative because it is concrete, widely referenced, and matches the same Qwen2.5-VL import/export failure family as 36246.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43824 is the best overall issue candidate: it has the most inbound references, a specific failure mode, and the clearest duplicate-family match within the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36246", + "right": "issue:43824", + "accept": true, + "reason": "Both report import errors for Qwen2.5-VL symbols from the same transformers module family, suggesting the same underlying export/re-export bug." + }, + { + "left": "issue:44077", + "right": "issue:45003", + "accept": false, + "reason": "Different problems: one is a patchtsmixer API/post_init validation issue, the other is unsafe sys.modules access in modeling_utils." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "Unrelated: TF32 settings behavior vs remote-code checkpoint/meta-init compatibility." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "Different model-specific bugs: missing fpn_position_embeddings in SAM 3 video vs wrong vision_position_ids in qwen2.5-vl video input." + }, + { + "left": "issue:43701", + "right": "issue:44749", + "accept": false, + "reason": "Unrelated: resume_from_checkpoint key mismatch vs performance regression in filtering after a Transformers upgrade." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Different failure modes: tokenizer backend_tokenizer crash vs auto_docstring annotation handling crash." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "Both are import-related, but the titles point to different symbols and likely different root causes; not enough evidence they are the same bug." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "Different issues: missing SAM 3 video output attribute vs trust_remote_code returning incomplete config fields." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Different bugs: quantized model saving failure vs Llama4Config missing pad_token_id." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 99, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42222", + "issue:42831", + "issue:43232", + "issue:43475", + "issue:43873", + "issue:44188", + "issue:44222", + "issue:44351", + "issue:44661", + "issue:44945", + "issue:45200", + "issue:45310", + "issue:45362", + "issue:45375", + "issue:45397", + "issue:45478" + ], + "soft_pairs": [ + "issue:42222|issue:45200", + "issue:45375|issue:45478", + "issue:45310|issue:45375", + "issue:43873|issue:44945", + "issue:43475|issue:44661", + "issue:45310|issue:45397", + "issue:42831|issue:44222", + "issue:41628|issue:44351", + "issue:43232|issue:44188", + "issue:45362|issue:45375", + "issue:44222|issue:45310" + ], + "prepared_review_unit_hash": "9a01b7fbed3d8145b11e8c07415a230ebf72c7a9fab604ea7e694f5f54ae2d06", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12890, + "estimated_input_tokens": 3223, + "estimated_eval_tokens": 6702 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated issue titles; the only clear duplicate-like thread is the Qwen3.5 MoE from_pretrained error, with 45478 the best current tracker. The rest are separate bugs in different subsystems and should not be merged by title similarity alone.", + "confidence": 0.69, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45478 is the freshest open report for the Qwen3.5 MoE from_pretrained regression and matches the exact title of the recurring bug thread better than the closed duplicate-style report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "Different models and failure modes: ViTPose import breakage vs Gemma 4 mm_token_type_ids defaulting. Same broad area only at a very high level." + }, + { + "left": "issue:45375", + "right": "issue:45478", + "accept": false, + "reason": "Both mention Qwen3.5, but one is a missing vision-config field under strict parsing and the other is a generic from_pretrained error; not enough evidence they are the same bug." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "Possible relation through Qwen3.5/MoE config handling, but the reports describe different concrete failures and code paths." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Offloading/quantization behavior and pipeline-parallel output divergence are different runtime issues." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "SAM 3 Vision encoder attribute error and add-new-model-like tokenizer mapping failure are unrelated code paths." + }, + { + "left": "issue:45310", + "right": "issue:45397", + "accept": false, + "reason": "Same general family of Transformers model loading errors, but Qwen3.5 MoE and Gemma 4 zero3 are distinct bugs." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "Both involve FP8, but one is an accuracy regression and the other is a save_pretrained bug; different concrete problems." + }, + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "Both are import-name errors from transformers, but they concern different symbols and likely different missing exports." + }, + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Generation kwargs update after sync_gpus and attention-kernel divergence under torch.compile are separate issues." + }, + { + "left": "issue:45362", + "right": "issue:45375", + "accept": false, + "reason": "Qwen3.5 chat crash and Qwen3.5MoEVisionConfig strict-field handling do not describe the same failure." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "FP8 MoE save_pretrained and Qwen3.5 MoE from_pretrained are different operations with different failure surfaces." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 100, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42994", + "issue:43232", + "issue:43377", + "issue:43479", + "issue:43673", + "issue:43824", + "issue:44038", + "issue:44079", + "issue:44188", + "issue:44242", + "issue:44792", + "issue:44869", + "issue:45310" + ], + "soft_pairs": [ + "issue:42994|issue:43824", + "issue:44038|issue:45310", + "issue:36010|issue:41628", + "issue:43377|issue:44242", + "issue:43232|issue:44792", + "issue:30990|issue:35141", + "issue:43673|issue:44188", + "issue:43479|issue:44079", + "issue:43479|issue:44869" + ], + "prepared_review_unit_hash": "28753463ae417617de59f57dbbbe132ac52de8d54a224897a4e8fab8929cb185", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12482, + "estimated_input_tokens": 3121, + "estimated_eval_tokens": 6498 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft duplicate edges are convincing here; the items are mostly unrelated issues across different subsystems and failure modes. The cluster should not be merged into a single canonical bug.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the clearest standalone report: a specific import failure with a concrete symbol and module path, making it the strongest representative issue in this otherwise heterogeneous set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42994", + "right": "issue:43824", + "accept": false, + "reason": "Both are import/save-related regressions, but one is about quantized model saving and the other is an ImportError for a missing model class; different code paths and fixes." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen MoE and from_pretrained/import problems, but they target different model variants and failure modes; not the same concrete bug." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different symbols (`GenerationMixin` vs `AutoImageProcessor`) and different modules; too broad to be duplicates." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "One is a MIMI encoder batching/padding-mask correctness bug; the other is an MoE load-balancing loss omission. Different functionality and symptoms." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Generation cache/update behavior and a Janus image-generation test failure are not the same underlying defect." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "Sentence-transformers loading hang is unrelated to embedding resize/post-init reinitialization." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "Both touch generation behavior, but one is about missing cache support in chunked prefill and the other is a torch.compile attention-kernel branching divergence; different bugs." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "A config initialization bug for multimodal defaults is unrelated to ModelOutput key assignment behavior." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Multimodal config defaulting and Whisper timestamp decoding crash are different components with no shared concrete failure path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 101, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42175", + "issue:42898", + "issue:43278", + "issue:43784", + "issue:43824", + "issue:43976", + "issue:44060", + "issue:44188", + "issue:44373", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44933", + "issue:44964", + "issue:45103", + "issue:45125", + "issue:45137", + "issue:45310" + ], + "soft_pairs": [ + "issue:42898|issue:44568", + "issue:42175|issue:44933", + "issue:43784|issue:43824", + "issue:43976|issue:44933", + "issue:44964|issue:45137", + "issue:44188|issue:44373", + "issue:44623|issue:45310", + "issue:43278|issue:45137", + "issue:44734|issue:45103", + "issue:44060|issue:45125" + ], + "prepared_review_unit_hash": "a22d403a284a88c297969817e132ff9ffea506ad68b945dc57753057001be682", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13423, + "estimated_input_tokens": 3356, + "estimated_eval_tokens": 6968 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a true duplicate set: it mixes unrelated tokenizer, packaging, import, model-loading, serving, training, docs, and tensor-parallelism issues. All soft-similarity pairs are superficial and should be rejected.", + "confidence": 0.93, + "canonical_issue_reason": "No canonical issue: the items describe different bugs and feature regressions across unrelated code paths, so there is no single representative issue for the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "No clear best issue either; at most some items are thematically similar (e.g. tokenizer/model-version regressions), but none is a safe global representative for the whole cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "Both involve tokenizer behavior in v5, but one is about clean_up_tokenization_spaces while the other is about add_special_tokens not adding BOS/EOS; different code paths and symptoms." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Packaging/backend dependency selection vs a missing image_utils import; no shared underlying bug." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Both are import failures, but for different packages/classes and different causes; not the same defect." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version compatibility in transformers 5.1.0 is unrelated to a missing image_utils import." + }, + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Different failure modes in different subsystems: multimodal model loading vs DeepSpeed ZeRO3 deque handling." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "Runtime kernel divergence under torch.compile is unrelated to a wrong docstring." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Missing files on processor save and Qwen3.5 MoE from_pretrained errors involve different save/load paths and are not the same bug." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Embedding dtype drift during evaluate and a ZeRO3 deque IndexError are unrelated training/runtime issues." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Serving KV-cache tensor indexing crash and auto-docstring AttributeError with future annotations are different subsystems and different root causes." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Both concern Qwen models, but one is a tied-weights warning bug and the other is missing _tp_plan for tensor parallelism; they are distinct changes/fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 102, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42898", + "issue:43295", + "issue:43723", + "issue:43874", + "issue:44077", + "issue:44222", + "issue:44242", + "issue:44297", + "issue:44442", + "issue:44448", + "issue:44568", + "issue:44779", + "issue:45092", + "issue:45137", + "issue:45276", + "issue:45356", + "issue:45459" + ], + "soft_pairs": [ + "issue:42898|issue:45356", + "issue:43723|issue:44568", + "issue:43874|issue:45092", + "issue:42898|issue:44779", + "issue:45137|issue:45276", + "issue:44077|issue:44242", + "issue:42898|issue:44448", + "issue:41628|issue:45459", + "issue:44222|issue:44297", + "issue:44442|issue:45459", + "issue:42898|issue:43295" + ], + "prepared_review_unit_hash": "4e68d080e1ff610fb60cb2e16d619595ddc5661fcab5d15ba564ad274f18fd18", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13825, + "estimated_input_tokens": 3457, + "estimated_eval_tokens": 7170 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly distinct tokenizer/processor regressions in v5 rather than true duplicates. The cluster is thematically related, but the concrete failure modes and affected code paths differ too much to merge any soft pair.", + "confidence": 0.86, + "canonical_issue_reason": "Issue 42898 is the broadest, most umbrella-like tokenizer-v5 regression report in the set, so it best serves as the cluster\u2019s anchor issue.", + "canonical_pr_reason": null, + "best_issue_reason": "42898 is the most representative issue for the general v5 tokenizer behavior-change theme; the others are narrower model-specific or API-specific regressions.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Both concern tokenizer regressions in v5, but 42898 is about clean_up_tokenization_spaces behavior while 45356 is Kimi-K2.5 codec handling and a misleading warning; different bugs and fixes." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "43723 is an AutoTokenizer loading failure for a custom tokenizer, whereas 44568 is about add_special_tokens not adding BOS/EOS for a specific tokenizer; not the same code-path problem." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "43874 is a missing image-processor method causing multimodal token counting failure, while 45092 is remote-code checkpoint incompatibility with meta initialization; separate issues." + }, + { + "left": "issue:42898", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 tokenizer regressions, but 42898 targets tokenization-space cleanup and 44779 targets Deepseek tokenizer output correctness; different underlying defects." + }, + { + "left": "issue:45137", + "right": "issue:45276", + "accept": false, + "reason": "45137 is a DeepSpeed ZeRO3 deque error, while 45276 is resize_token_embeddings not propagating to gemma4 embeddings; unrelated failures." + }, + { + "left": "issue:44077", + "right": "issue:44242", + "accept": false, + "reason": "44077 concerns a model config/post_init allowance, whereas 44242 is about load-balancing loss when output_router_logits=False; not the same bug." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "44448 reports Pegasus output differences between v4 and v5, but it is a model-specific regression and not clearly the same issue as clean_up_tokenization_spaces behavior in 42898." + }, + { + "left": "issue:41628", + "right": "issue:45459", + "accept": false, + "reason": "41628 is an AutoImageProcessor import error; 45459 is tokenizer error masking when protobuf is absent. Different subsystems and symptoms." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "44222 is an FP8 save_pretrained/moe issue, while 44297 is tokenizer_class mismatch in tokenizer_config.json; both involve saving but not the same concrete bug." + }, + { + "left": "issue:44442", + "right": "issue:45459", + "accept": false, + "reason": "44442 is AutoTokenizer failing to load a specific tokenizer class; 45459 is exception handling that hides tokenizer errors when protobuf is missing. Distinct problems." + }, + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "43295 is about processor.tokenizer access and passing images to tokenizer in a regression, not the same behavior change as clean_up_tokenization_spaces in 42898." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 103, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:42831", + "issue:43479", + "issue:43644", + "issue:43723", + "issue:44206", + "issue:44222", + "issue:44297", + "issue:44483", + "issue:44484", + "issue:44514", + "issue:44623", + "issue:44977", + "issue:45103", + "issue:45310", + "issue:45406" + ], + "soft_pairs": [ + "issue:43479|issue:43644", + "issue:44222|issue:44623", + "issue:44206|issue:45103", + "issue:42831|issue:44623", + "issue:44297|issue:45310", + "issue:22355|issue:28282", + "issue:43723|issue:44977", + "issue:44483|issue:45406", + "issue:44222|issue:44484", + "issue:44514|issue:45103" + ], + "prepared_review_unit_hash": "f20840d91cce7b28fae42997363cabd1eedfc1eed4d54ed236e816c94e3049fa", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12771, + "estimated_input_tokens": 3193, + "estimated_eval_tokens": 6642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the items cover unrelated bugs and regressions across config loading, save_pretrained, tokenizers, chat serving, FP8, and docstring tooling. None of the soft pairs look like the same underlying issue, so all should be kept separate.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43644 is the best anchor only in a loose sense: it describes a concrete, high-impact regression with a clear symptom and broader relevance, but it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Different bugs: Phi4MultimodalConfig defaulting logic vs non-persistent buffer initialization in v5. No shared code path or fix." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "Both mention save_pretrained, but one is about FP8 MoE saving and the other about processor files missing. Different artifact types and failure modes." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "LasrFeatureExtractor center-arg crash is unrelated to auto_docstring failing on postponed annotations." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "FineGrainedFP8 accuracy regression is unrelated to processor.save_pretrained missing files." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Tokenizer config mismatch on save is a different problem from Qwen3.5 MoE from_pretrained failing." + }, + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Missing transformers.onnx module and missing PyTorch are distinct import/dependency errors." + }, + { + "left": "issue:43723", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3.5 flash-attention generation failure affect different paths and symptoms." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "Chat/completions request rejection is not the same as Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "FP8 MoE save_pretrained bug is unrelated to the max_shard_size default question." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "Qwen2_5_VLProcessor batched chat template crash is unrelated to auto_docstring's AttributeError on future annotations." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 104, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:38175", + "issue:43479", + "issue:43723", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44360", + "issue:44373", + "issue:44442", + "issue:44485", + "issue:44623", + "issue:44871", + "issue:44913", + "issue:44991", + "issue:45092", + "issue:45357" + ], + "soft_pairs": [ + "issue:44263|issue:44360", + "issue:44263|issue:45092", + "issue:44295|issue:44623", + "issue:43479|issue:44913", + "issue:44261|issue:45092", + "issue:43723|issue:44871", + "issue:43723|issue:44442", + "issue:43723|issue:44991", + "issue:44373|issue:44485", + "issue:30990|issue:38175", + "issue:45092|issue:45357" + ], + "prepared_review_unit_hash": "cf73c0537e666975ac270ef35faf12e14cba39464d095c060e72675000db28c9", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 13019, + "estimated_input_tokens": 3255, + "estimated_eval_tokens": 6766 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: most items are unrelated bug reports that only share broad wording like loading, tokenizer, config, or save_pretrained. I do not see any soft pair that is the same underlying bug/change, so all soft edges should be rejected.", + "confidence": 0.91, + "canonical_issue_reason": "Issue 43723 is the broadest representative of the cluster\u2019s recurring tokenizer/loading theme, so it is the least-bad canonical issue choice even though the cluster is not a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43723 is the best overall representative because it has the clearest, most general failure mode among the issues here; it is also the closest to several superficially related tokenizer-loading reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "Both mention DSA/indexer internals, but one is about torch.split return values and the other about a missing ReLU; different code-path problems." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "Completely different subsystems: GLM/DSA indexing versus multimodal meta-initialization compatibility." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "Position_ids buffer access and processor.save_pretrained missing files are unrelated bugs." + }, + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Phi4Multimodal default vision/audio config initialization is unrelated to GPTNeoX rotary_pct persistence on reload." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Different failure modes and targets: missing rms_norm_eps in MLA q_a_layernorm versus old InternVL2 meta-init incompatibility." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Both involve model loading, but one is AutoTokenizer loading failure and the other is an eos_token_id config mismatch in Gemma-3; not the same bug." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Both are tokenizer-loading complaints, but they concern different tokenizers and likely different root causes." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Different model/tokenizer targets and different loading failures; too broad to treat as one bug." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Wrong docstring for position_ids is documentation-only; GLM-5 RoPE implementation is a code semantics issue." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence Transformers loading hang and zero probabilities in siglip2-base-patch16-224 are unrelated model-specific issues." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Both touch multimodal checkpoints, but one is a load-time meta-init incompatibility and the other is a save_pretrained regression with visual encoder keys; different code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 105, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42831", + "issue:42886", + "issue:42994", + "issue:43066", + "issue:43122", + "issue:43576", + "issue:43784", + "issue:43824", + "issue:44038", + "issue:44117", + "issue:44263", + "issue:44373", + "issue:44514", + "issue:44521", + "issue:44704", + "issue:45310" + ], + "soft_pairs": [ + "issue:41628|issue:43824", + "issue:42886|issue:42994", + "issue:42831|issue:45310", + "issue:43576|issue:43784", + "issue:42831|issue:44038", + "issue:43122|issue:44521", + "issue:44263|issue:44373", + "issue:42886|issue:43066", + "issue:44117|issue:44704", + "issue:43122|issue:44514" + ], + "prepared_review_unit_hash": "bdc0891a5d0a5436b8977f0bca29cf610535f04e84bf0cee672d6d22b39e1135", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12604, + "estimated_input_tokens": 3151, + "estimated_eval_tokens": 6558 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous; the soft-similarity pairs share broad themes (tokenizers, imports, multimodal processors) but not the same concrete bug or change, so none should be merged as duplicates.", + "confidence": 0.86, + "canonical_issue_reason": "If one issue must anchor the cluster, #44521 is the clearest standalone bug report: a specific, reproducible failure in `apply_chat_template` for multimodal inputs with active discussion.", + "canonical_pr_reason": null, + "best_issue_reason": "#44521 is the strongest issue to keep as the representative because it has a concrete symptom, clear scope, and ongoing user impact.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols (`AutoImageProcessor` vs `Qwen2_5_VLForConditionalGeneration`) and likely separate export/regression paths." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Tokenizer offline cache loading and quantized model saving are unrelated failure modes in different code paths." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "One is a FineGrainedFP8 accuracy regression; the other is a Qwen3.5-MoE `from_pretrained` loading error. Same broad area only." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "A broken `transformers env` CLI command is unrelated to a `NameError` during sentence-transformers import." + }, + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "These are different model-specific problems: FP8 accuracy vs Qwen3-VL-MoE behavior in v5." + }, + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Both involve tokenization/chat templating, but one reports version-to-version tokenization differences while the other is an all-zero assistant-mask bug for multimodal inputs." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "A `torch.split()` behavior issue and a wrong docstring for `position_ids` are not the same bug; one is code, the other documentation." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Offline tokenizer cache loading and wrong tokenizer decoder type in v5 are distinct defects affecting different mechanisms." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "`TOKENIZER_MAPPING_NAMES` returning `None` is a mapping/assumption bug; `AutoProcessor.from_pretrained` not forwarding kwargs to `cached_file` is a separate API plumbing issue." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "Both are tokenizer/processor related, but one is a general tokenization change across versions and the other is a batched `apply_chat_template` crash with `padding=False`." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 106, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:33357", + "issue:39401", + "issue:42371", + "issue:42907", + "issue:43232", + "issue:43937", + "issue:43976", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44488", + "issue:44704", + "issue:44743", + "issue:44945", + "issue:45245", + "issue:45290" + ], + "soft_pairs": [ + "issue:30990|issue:33357", + "issue:42371|issue:44263", + "issue:44704|issue:45290", + "issue:43232|issue:45245", + "issue:42907|issue:44945", + "issue:44295|issue:44297", + "issue:43232|issue:44743", + "issue:43976|issue:44945", + "issue:39401|issue:44488", + "issue:42907|issue:43937" + ], + "prepared_review_unit_hash": "fcf496f54045955e1554132065d050ad00aa65ffa65f52e01c3db89ba9c9028d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12620, + "estimated_input_tokens": 3155, + "estimated_eval_tokens": 6566 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is heterogeneous: the issues cover unrelated bugs in loading, tokenizers, generation, serialization, TF32 settings, and model-specific behavior. None of the soft pairs look like the same underlying defect, so they should remain separate.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue stands out; the items are not duplicates and do not share a single underlying bug or code path.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a strong global dedupe representative for this cluster; the closest matches are still different problems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "Different symptoms and models: Sentence Transformers loading hangs vs a MacOS bus error with a community CLIP model." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "Unrelated topics: TF32 behavior settings vs a GlmMoeDsaIndexer torch.split return-value issue." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "Different code paths: AutoProcessor kwargs passthrough to cached_file vs apply_chat_template crashing on tool-call assistant messages." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Different failures: generation kwargs handling after sync_gpus vs a categorical cardinality limit error." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Serialization of dequantized Ministral/Devstral models is unrelated to incorrect outputs under pipeline parallelism." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "Separate tokenizer/model bugs: reading position_ids after buffer registration vs tokenizer_class mismatch in tokenizer_config.json." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both involve generation, but one is sync_gpus kwargs handling and the other is recurrent-state reset in modular_qwen3_5; not the same bug." + }, + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Unrelated: Python version compatibility for Transformers 5.1.0 vs incorrect LLM output with pipeline parallelism." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Both are tokenizer-related, but one is wrong offset_mapping for Qwen3 and the other is failing to load a specific community model." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Different GLM-related issues: saving dequantized Ministral/Devstral vs invalid GenerationConfig for GLM-5." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 107, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:30990", + "issue:41628", + "issue:43116", + "issue:43122", + "issue:43408", + "issue:43475", + "issue:43992", + "issue:44360", + "issue:44485", + "issue:44625", + "issue:44704", + "issue:44779", + "issue:44829", + "issue:44987", + "issue:45200", + "issue:45278" + ], + "soft_pairs": [ + "issue:41628|issue:45278", + "issue:43116|issue:45200", + "issue:43408|issue:43475", + "issue:44625|issue:44829", + "issue:29127|issue:30990", + "issue:44360|issue:44485", + "issue:43992|issue:44704", + "issue:41628|issue:44987", + "issue:43122|issue:44779" + ], + "prepared_review_unit_hash": "da8812b3e1a2af01fce835fc0c248f8f95bf3c181899a5b4f6950fd4b8d4fc02", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12496, + "estimated_input_tokens": 3124, + "estimated_eval_tokens": 6504 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is a heterogeneous mix of unrelated transformer bugs, regressions, and feature issues; the soft links look like broad topical similarity rather than true duplicates. I would not collapse the cluster into a single canonical item.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "Both mention import problems, but one is a specific AutoImageProcessor import failure and the other is a broad upgrade-to-5.5.0 import breakage report; not the same concrete bug." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Different symptoms and code paths: empty multi-label classification results in an example script versus Gemma 4 mm_token_type_ids defaulting/propagation." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "Both are SAM 3 video-adjacent, but one is a model-type mismatch warning and the other is a missing attribute error in vision encoder output; separate failures." + }, + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "Qwen3.5 num_labels propagation and flash_attention_3 degenerate training are unrelated bugs in different parts of the stack." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and Sentence Transformers loading hang are entirely different issues." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "DSA indexer ReLU discussion and GLM-5 RoPE implementation are different model/component concerns, not one underlying defect." + }, + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "UMT5Encoder missing embed_tokens.weight and AutoProcessor kwargs not forwarded to cached_file are separate loading bugs." + }, + { + "left": "issue:41628", + "right": "issue:44987", + "accept": false, + "reason": "An AutoImageProcessor import error is not the same as failing to load a specific model repo in transformers>=5.1.0." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different models and likely different tokenizer code paths; too broad to treat as the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 108, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36296", + "issue:39401", + "issue:42371", + "issue:42831", + "issue:42907", + "issue:43116", + "issue:43122", + "issue:43452", + "issue:43824", + "issue:44038", + "issue:44222", + "issue:44261", + "issue:44295", + "issue:44297", + "issue:44351", + "issue:44373", + "issue:44521", + "issue:44977" + ], + "soft_pairs": [ + "issue:44038|issue:44297", + "issue:39401|issue:44521", + "issue:44038|issue:44295", + "issue:44261|issue:44373", + "issue:42371|issue:42831", + "issue:36296|issue:43116", + "issue:42907|issue:43452", + "issue:44038|issue:44222", + "issue:43824|issue:44351", + "issue:43122|issue:44977" + ], + "prepared_review_unit_hash": "758d62a24e2d8766fc27355e76b5ee7183fbac16906d353610170a7f08f90296", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13152, + "estimated_input_tokens": 3288, + "estimated_eval_tokens": 6832 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated issues across tokenization, model import/export, FP8/save_pretrained, training, and docs/API behavior. The soft-similarity pairs share only broad theme or model family, not the same concrete bug, so none should be merged as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "If a single representative is needed, issue 44038 is the broadest user-facing regression in the Qwen3-VL-Moe / transformers 5.0 area. But the set is not a true duplicate cluster, so this is only a loose anchor, not a strong canonical.", + "canonical_pr_reason": null, + "best_issue_reason": "44038 is the most central of the listed issues because it describes a concrete runtime regression and is thematically closest to the few Qwen-related reports. Still, the cluster is heterogeneous enough that no issue is a clean global canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "Both involve Qwen/transformers model handling, but one is a Qwen3-VL-Moe bug and the other is tokenizer_config save_pretrained metadata mismatch; different code paths and symptoms." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Both mention Qwen/tokenization, but offset_mapping errors and all-zero assistant masks come from different preprocessing stages and likely different fixes." + }, + { + "left": "issue:44038", + "right": "issue:44295", + "accept": false, + "reason": "Qwen3-VL-Moe regression and position_ids buffer reading error are unrelated bugs; no shared failure mode is evident." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "An MLA q_a_layernorm precision issue is not the same as a wrong docstring for position_ids; one is a functional bug, the other is documentation." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 API guidance and FineGrainedFP8 accuracy problems are different numerical/performance topics with different fixes." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor parallel training failure and multi-label classification returning empty results are distinct feature paths with no concrete overlap." + }, + { + "left": "issue:42907", + "right": "issue:43452", + "accept": false, + "reason": "Saving dequantized Ministral/Devstral models and gguf_file loader failures affect different model serialization/loading paths." + }, + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Both touch Qwen3-VL-Moe/FP8, but one is a general transformers 5.0 bug and the other is specifically FP8 save_pretrained behavior; not the same underlying defect." + }, + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "These are separate import errors for different symbols: Qwen2_5_VLForConditionalGeneration versus HybridCache." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer-version drift and flash-attention generation failure are different regressions; same broad Qwen3.5 area is not enough to duplicate them." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 109, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:42222", + "issue:42673", + "issue:42907", + "issue:43064", + "issue:43278", + "issue:43441", + "issue:43673", + "issue:43784", + "issue:44222", + "issue:44912", + "issue:44933", + "issue:44945", + "issue:45072", + "issue:45216", + "issue:45278", + "issue:45446", + "issue:45478" + ], + "soft_pairs": [ + "issue:43278|issue:44945", + "issue:45216|issue:45478", + "issue:42907|issue:44222", + "issue:43784|issue:45446", + "issue:42222|issue:43441", + "issue:42673|issue:45072", + "issue:42907|issue:44912", + "issue:38617|issue:45278", + "issue:43784|issue:44933", + "issue:43064|issue:43673" + ], + "prepared_review_unit_hash": "675bb7060e4418a39c4a647e0764f857a7a968e41788264bdf8ca502889968ec", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13308, + "estimated_input_tokens": 3327, + "estimated_eval_tokens": 6910 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated bug reports; none of the soft pairs look like the same underlying issue, so I rejected all duplicate edges. The broadest representative issue in the set is the import-regression report (#45278), but this cluster is not a true duplicate set.", + "confidence": 0.86, + "canonical_issue_reason": "#45278 is the broadest report here (multiple import errors after upgrading to 5.5.0) and is the closest thing to a cluster representative, though the set is heterogeneous.", + "canonical_pr_reason": null, + "best_issue_reason": "#45278 is the most general and central issue among these reports, making it the best representative if one must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Different bugs: one is a dtype mismatch between train/eval, the other is incorrect output under pipeline parallelism." + }, + { + "left": "issue:45216", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3.5, but one is a save_pretrained regression and the other is a from_pretrained/MoE loading error; not the same concrete fix." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both are save/load related, but they concern different quantization paths and model families (dequantized Ministral/Devstral vs FP8 MoE)." + }, + { + "left": "issue:43784", + "right": "issue:45446", + "accept": false, + "reason": "Both are import-related, but they are separate missing/guarded imports in different modules with different root causes." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "Completely different model/runtime failures: vitpose import breakage vs Ministral-3 FlashAttention failure." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "Different symptoms and code paths: VRAM leak in threaded Qwen3ForCausalLM use vs bfloat16 dtype mismatches in CI." + }, + { + "left": "issue:42907", + "right": "issue:44912", + "accept": false, + "reason": "Different quantization/save-load problems: saving dequantized weights is not the same as MXFP4 load fallback to bf16." + }, + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "#45278 is a broad import-regression report, but #38617 is a specific missing symbol import; not the same bug." + }, + { + "left": "issue:43784", + "right": "issue:44933", + "accept": false, + "reason": "Both mention imports, but they involve different missing symbols/modules and different failures." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Different training/distributed-state bug versus GenerationMixin cache issue; no shared concrete code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 110, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:42371", + "issue:43116", + "issue:43493", + "issue:43550", + "issue:43784", + "issue:43825", + "issue:43874", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44075", + "issue:44222", + "issue:44485", + "issue:44936", + "issue:45446", + "issue:45478" + ], + "soft_pairs": [ + "issue:43550|issue:43784", + "issue:43937|issue:44485", + "issue:44075|issue:44936", + "issue:22355|issue:29127", + "issue:42371|issue:43874", + "issue:43116|issue:43493", + "issue:43116|issue:43825", + "issue:44222|issue:45478", + "issue:43976|issue:45446", + "issue:44038|issue:45478" + ], + "prepared_review_unit_hash": "b05b955e3d30a08f17646b8ce2cff75707bedd259d4629d2bb4d3137b7c91913", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13207, + "estimated_input_tokens": 3302, + "estimated_eval_tokens": 6860 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a loose thematic cluster of unrelated Transformer issues, not a duplicate set. All soft pairs are false positives based on broad keyword overlap only.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue stands out because the items cover distinct bugs across different models, features, and code paths rather than one duplicated report.", + "canonical_pr_reason": "No PRs are present in this input, so there is no canonical PR candidate.", + "best_issue_reason": "No issue is a clear global representative for duplication triage; the closest title matches still describe different failures or different subsystems.", + "best_pr_reason": "No PRs are present in this input, so there is no best PR candidate.", + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43784", + "accept": false, + "reason": "Different bugs: Bamba torch.compile/SDPA failure vs sentence-transformers import NameError. Same broad library, not the same code-path." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "Both mention GLM-5, but one is a GenerationConfig validation error and the other is a RoPE implementation discussion. Different underlying problems." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "SGD optimizer arguments being ignored is unrelated to trainer.evaluate() failing after trainer.train(). Different components and symptoms." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "No module named transformers.onnx is an import/package issue; layoutlmv3 box clarity is an error-message/validation issue." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "TF32 API/settings guidance is unrelated to a missing get_number_of_image_patches method in an image processor." + }, + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "Multi-label classification returning empty results is not the same as a SigLIP2 HF-vs-JAX discrepancy. Different model/task and failure mode." + }, + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "Empty multi-label classification results and a pipeline() translation-task error message are unrelated issues." + }, + { + "left": "issue:44222", + "right": "issue:45478", + "accept": false, + "reason": "Both involve MoE/pretrained wording, but one is FP8 save_pretrained and the other is from_pretrained loading. Not the same concrete bug." + }, + { + "left": "issue:43976", + "right": "issue:45446", + "accept": false, + "reason": "Python version compatibility for Transformers 5.1.0 is unrelated to an incorrect PyTorch version check in flex_attention." + }, + { + "left": "issue:44038", + "right": "issue:45478", + "accept": false, + "reason": "Qwen3-VL-Moe on transformers 5.0 and Qwen3.5 MoE from_pretrained on transformers>=5.4.0 are different model/version bugs, not one merged fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 111, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33453", + "issue:39401", + "issue:42673", + "issue:43421", + "issue:43450", + "issue:43827", + "issue:43906", + "issue:43931", + "issue:43937", + "issue:43986", + "issue:44291", + "issue:44779", + "issue:44945", + "issue:44977", + "issue:45071", + "issue:45083", + "issue:45405", + "issue:45446" + ], + "soft_pairs": [ + "issue:39401|issue:44779", + "issue:33453|issue:39401", + "issue:43931|issue:44945", + "issue:45083|issue:45446", + "issue:43937|issue:44977", + "issue:42673|issue:43906", + "issue:43827|issue:45446", + "issue:43450|issue:43986", + "issue:45405|issue:45446", + "issue:44291|issue:45071", + "issue:39401|issue:43421" + ], + "prepared_review_unit_hash": "202bd1dacc6e86cd8be6f02995a2425ba899d1180b98a4194f7ed99fc5479b3a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13617, + "estimated_input_tokens": 3405, + "estimated_eval_tokens": 7066 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This is a noisy, heterogeneous cluster: the soft pairs share broad subsystem terms, but they are not the same concrete bug or change, so none should be merged as duplicates.", + "confidence": 0.82, + "canonical_issue_reason": "No single canonical issue cleanly covers the set; the items span unrelated tokenizer, model-loading, video, docs, and version-check problems.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must anchor the cluster, 39401 is the most concrete and self-contained tokenizer report, but it still only fits a narrow subset of the items.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer-related, but one is a Qwen3 offset_mapping bug and the other is a broader Deepseek v5 regression in outputs; not the same defect." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Tokenizer loading regression vs wrong offset_mapping; different symptoms and likely different code paths." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Model weight-shape loading failure vs incorrect LLM output under pipeline parallelism; these are distinct bugs." + }, + { + "left": "issue:45083", + "right": "issue:45446", + "accept": false, + "reason": "Qwen3 Omni feature helper behavior is unrelated to the flex_attention PyTorch version check." + }, + { + "left": "issue:43937", + "right": "issue:44977", + "accept": false, + "reason": "GenerationConfig validation failure and flash-attention generation breakage are different issues." + }, + { + "left": "issue:42673", + "right": "issue:43906", + "accept": false, + "reason": "VRAM leak in multi-threaded Qwen3ForCausalLM is not the same as an isolated reproduction of another issue." + }, + { + "left": "issue:43827", + "right": "issue:45446", + "accept": false, + "reason": "Docs still referencing removed pipeline() is unrelated to a flex_attention import/version-check bug." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "Batched video processor shape bug vs AutoProcessor crash without torchvision; related area, but not the same bug." + }, + { + "left": "issue:45405", + "right": "issue:45446", + "accept": false, + "reason": "PEFT minimum-version bump issue is unrelated to the flex_attention PyTorch version check." + }, + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "Unexpected _is_hf_initialized argument during init_empty_weights is a different failure mode from PretrainedConfig type checking." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Offset_mapping regression and runtime post-processor update for special tokens are separate tokenizer-backend problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 112, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42673", + "issue:43408", + "issue:43577", + "issue:43582", + "issue:43742", + "issue:44117", + "issue:44220", + "issue:44410", + "issue:44484", + "issue:44829", + "issue:45081", + "issue:45216", + "issue:45310" + ], + "soft_pairs": [ + "issue:38617|issue:41628", + "issue:44484|issue:45310", + "issue:44829|issue:45216", + "issue:42673|issue:43577", + "issue:44117|issue:45081", + "issue:42673|issue:43582", + "issue:38617|issue:44220", + "issue:42175|issue:42222", + "issue:44410|issue:45310", + "issue:43408|issue:43742" + ], + "prepared_review_unit_hash": "455512cbb1a82e031fb6bb52fe45225e8ef20b817d4254aa7e7f0ddcc99ae1c5", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12966, + "estimated_input_tokens": 3242, + "estimated_eval_tokens": 6740 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous and does not look like a single duplicate set. The only plausible duplicate pair is the two Qwen3.5 regression reports around save/load behavior in transformers>=5.4.0; the rest point to distinct subsystems and failure modes.", + "confidence": 0.74, + "canonical_issue_reason": "issue:45216 is the best issue anchor because it is the more specific regression report, names the version boundary, and directly describes the save_pretrained checkpoint corruption that the follow-up from_pretrained failure appears to stem from.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45216 is the strongest representative of the only apparent duplicate bug in the cluster and is more actionable than the broader follow-up report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they involve different missing symbols (`layer_type_validation` vs `AutoImageProcessor`) and different code paths." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "These are unrelated: one asks about shard-size defaults in `save_pretrained`, the other is a Qwen3.5 MoE load regression." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "Different bugs in different areas: flash-attention training behavior vs Qwen3.5 checkpoint serialization." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "One is a VRAM leak in Qwen3ForCausalLM with dataloader threads; the other is a dtype propagation issue in BLIP2 loading." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "`TOKENIZER_MAPPING_NAMES` returning None and a Mistral regex patch crash are separate tokenizer-loading failures." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "Different runtime problems: a Qwen3 VRAM leak vs an Apple Silicon TypeError in `caching_allocator_warmup`." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "ImportError for a missing config symbol is not the same underlying problem as `_torch_extract_fbank_features()` behavior." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Package/backend dependency omission and broken VitPose model files are unrelated." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "A missing projection layer in qwen3next is a model architecture bug, not the Qwen3.5 save/load regression." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "Model-type warning for sam3_tracker/sam3_video and a key error loading MobileLLM-125M are distinct model-loading issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 113, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42222", + "issue:42371", + "issue:42617", + "issue:43066", + "issue:43122", + "issue:43673", + "issue:43937", + "issue:44261", + "issue:44483", + "issue:44484", + "issue:44496", + "issue:44933", + "issue:44964", + "issue:45278", + "issue:45341", + "issue:45459", + "issue:45464", + "issue:45478" + ], + "soft_pairs": [ + "issue:42371|issue:44261", + "issue:43937|issue:44496", + "issue:44933|issue:45278", + "issue:44483|issue:45464", + "issue:42617|issue:45341", + "issue:43066|issue:43122", + "issue:44484|issue:45478", + "issue:44964|issue:45278", + "issue:42222|issue:45459", + "issue:43673|issue:45341" + ], + "prepared_review_unit_hash": "0cf0c9ed82cd7e60ca5b685be14b6a40d06d702ee95c25247bc8bc59d70bafe4", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13184, + "estimated_input_tokens": 3296, + "estimated_eval_tokens": 6848 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of separate Transformers v5 regression reports, not true duplicates. A few items are loosely similar within import/tokenization/chat-completions breakage, but the exact failure modes and code paths differ enough that none of the soft pairs should be merged.", + "confidence": 0.63, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole cluster. If forced to pick a representative, issue 45278 is the broadest umbrella for the import-regression subset, but the cluster spans multiple unrelated bugs and breakages.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45278 is the best overall anchor because it is open, broad, and directly describes a widespread post-upgrade import problem. It is still not a true duplicate target for the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 settings guidance vs. MLA q_a_layernorm precision bug; different subsystems and root causes." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "Both involve v5 config/model loading, but one is invalid GenerationConfig and the other is an unrecognized model config; not the same bug." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "45278 is a broad import-error report, but 44933 is a specific missing image_utils import; related theme, not the same underlying issue." + }, + { + "left": "issue:44483", + "right": "issue:45464", + "accept": false, + "reason": "Both mention chat/completions, but one is a general v5.3 request rejection and the other is a streaming-specific Qwen3.5 failure." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "A runtime script failure and a testing_utils bug are unrelated code paths." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both are tokenizer-related v5 regressions, but one is decoder-type resolution and the other is changed tokenization output; not the same defect." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "save_pretrained shard-size behavior and Qwen3.5 MoE from_pretrained errors are unrelated." + }, + { + "left": "issue:44964", + "right": "issue:45278", + "accept": false, + "reason": "Phi-4 multimodal loading failure is model-specific; 45278 is a generic import-error report, so they are not duplicates." + }, + { + "left": "issue:42222", + "right": "issue:45459", + "accept": false, + "reason": "VitPose model breakage and protobuf-related tokenizer error masking are different failure modes." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "GenerationMixin cache missing during chunked_prefill is unrelated to a testing_utils bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 114, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42222", + "issue:42994", + "issue:43012", + "issue:43097", + "issue:43408", + "issue:43441", + "issue:43874", + "issue:43986", + "issue:44261", + "issue:44263", + "issue:44410", + "issue:44484", + "issue:44485", + "issue:44493", + "issue:44829", + "issue:45071", + "issue:45478" + ], + "soft_pairs": [ + "issue:43097|issue:44493", + "issue:43874|issue:44261", + "issue:43986|issue:44484", + "issue:43441|issue:44829", + "issue:44410|issue:45478", + "issue:42222|issue:42994", + "issue:43012|issue:43408", + "issue:44263|issue:44485", + "issue:41628|issue:45071" + ], + "prepared_review_unit_hash": "f23fb8f8981b45537b4cfc123e4026de63d19701e7cb35be2383452f3a16de6a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13036, + "estimated_input_tokens": 3259, + "estimated_eval_tokens": 6774 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are thematically similar in being Transformers bug reports, but they do not describe the same underlying defect. They span unrelated areas like imports, model-specific architecture bugs, quantization/saving, attention backends, config regressions, and warning changes, so none of the soft pairs should be merged as duplicates.", + "confidence": 0.92, + "canonical_issue_reason": "No single issue is a good canonical representative: the cluster is too heterogeneous and the soft pairs only share broad subsystem similarity, not the same concrete bug.", + "canonical_pr_reason": null, + "best_issue_reason": "If one had to pick a broad representative, issue 43097 is the most regression-like and general, but it still does not subsume the other reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Different failures: one is about `tie_embeddings_and_encoder_decoder` removal in v5, the other about unexpected `position_id` keys across many models." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "Both are model-specific errors, but they involve different models and different root causes: missing image-processor method vs missing `rms_norm_eps` config." + }, + { + "left": "issue:43986", + "right": "issue:44484", + "accept": false, + "reason": "Unrelated problems: video AutoProcessor loading with missing torchvision vs `save_pretrained()` shard-size default behavior." + }, + { + "left": "issue:43441", + "right": "issue:44829", + "accept": false, + "reason": "Both mention FlashAttention 3, but the concrete bugs differ: one is a Ministral-3 inference failure, the other is degenerate training in sequence classification." + }, + { + "left": "issue:44410", + "right": "issue:45478", + "accept": false, + "reason": "Same model family, but different code paths and symptoms: missing projections in layer 0 vs a `from_pretrained` failure for Qwen3.5 MoE." + }, + { + "left": "issue:42222", + "right": "issue:42994", + "accept": false, + "reason": "Completely different issues: broken ViT-Pose models vs quantized model saving failure." + }, + { + "left": "issue:43012", + "right": "issue:43408", + "accept": false, + "reason": "Different concerns: a PyTorch warning during bfloat16 compilation vs a model-type mismatch warning for sam3_video/sam3_tracker." + }, + { + "left": "issue:44263", + "right": "issue:44485", + "accept": false, + "reason": "Same broad GLM area, but different bugs: `torch.split()` handling in MoE indexing vs RoPE implementation issues." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "Different API regressions: missing `AutoImageProcessor` import vs `PretrainedConfig` type-checking breakage." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 115, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43097", + "issue:43335", + "issue:43408", + "issue:43452", + "issue:43874", + "issue:43931", + "issue:43986", + "issue:44038", + "issue:44117", + "issue:44263", + "issue:44368", + "issue:44410", + "issue:44987", + "issue:45003", + "issue:45071", + "issue:45341", + "issue:45372" + ], + "soft_pairs": [ + "issue:45003|issue:45341", + "issue:43097|issue:43452", + "issue:44117|issue:44987", + "issue:43986|issue:45341", + "issue:43931|issue:44410", + "issue:43874|issue:44263", + "issue:43335|issue:45071", + "issue:43408|issue:44368", + "issue:43931|issue:44038", + "issue:43986|issue:45372" + ], + "prepared_review_unit_hash": "12a8d8451cd326f89e5531a029fd4e01018210e7a32b924a1f0f1372e63973fc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12928, + "estimated_input_tokens": 3232, + "estimated_eval_tokens": 6720 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated bug reports across different models/subsystems. I do not see any soft pair that clearly describes the same underlying defect, so all soft edges should be rejected.", + "confidence": 0.44, + "canonical_issue_reason": "issue:45003 is the broadest core-library bug report here, centered on a generic modeling_utils failure path and likely to be the most reusable anchor for duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45003 is the best global representative because it targets a central code path, is broadly phrased, and has active discussion.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Different subsystems and failures: sys.modules access in modeling_utils versus a testing_utils bug." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "One is about a removed config option; the other is a gguf_file loading regression in tokenizer/model loading." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "Both involve from_pretrained-style loading, but the concrete bugs differ: tokenizer mapping None-handling vs a specific model load failure for physical-intelligence/fast." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "Video AutoProcessor without torchvision is a model-loading dependency issue, not the same as a testing utility bug." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Different Qwen3-VL/Qwen3next model-specific architecture problems with no clear shared code-path defect." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "An image-processor missing method causing multimodal token counting failure is unrelated to a torch.split return-value issue." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "One is a SwitchTransformers sparse-layer config bug; the other is a PretrainedConfig type-checking regression." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "A sam3_video/sam3_tracker config warning and a Qwen3.5 tie_word_embeddings warning are different model/config problems." + }, + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "Both mention Qwen3-VL, but one is a weight-shape mismatch and the other is a transformers 5.0 loading issue for Qwen3-VL-Moe." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "Video AutoProcessor torchvision dependency crash is unrelated to a mistral_common ReasoningEffort import error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 116, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:43064", + "issue:43531", + "issue:43976", + "issue:43986", + "issue:44062", + "issue:44117", + "issue:44261", + "issue:44315", + "issue:44351", + "issue:44464", + "issue:44485", + "issue:44704", + "issue:44829", + "issue:45071", + "issue:45103", + "issue:45245", + "issue:45341" + ], + "soft_pairs": [ + "issue:44829|issue:45245", + "issue:43064|issue:44315", + "issue:43976|issue:45071", + "issue:44464|issue:45071", + "issue:43531|issue:45341", + "issue:44704|issue:45103", + "issue:44261|issue:44485", + "issue:38617|issue:44351", + "issue:44062|issue:44117", + "issue:43986|issue:44117" + ], + "prepared_review_unit_hash": "3432e3bf5cf93f7ffc9d47c8cbaba0a39cf2cea97a57fc672a48af92c3395263", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13455, + "estimated_input_tokens": 3364, + "estimated_eval_tokens": 6984 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is noisy: these issues cover unrelated bugs across imports, training, processors, tokenizers, configs, and model-specific behavior. None of the soft pairs look like true duplicates or the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": "issue:43064 is the most concrete and actionable report in the set, with a specific multi-component training failure mode and clear rank-dependent behavior.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43064 is the strongest standalone issue to keep: it is detailed, reproducible, and describes a high-impact training correctness bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "Different bugs: flash_attention_3 training degeneration vs a category-count runtime limit. No shared code-path or fix." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "Both involve training, but one is optimizer-state corruption under FSDP2/PEFT and the other is Liger Kernel not being applied with model_init. Different mechanisms and fixes." + }, + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "Separate compatibility issues: Python 3.9/v5.1.0 failure vs v5.4.0 PretrainedConfig type-checking breakage. Not the same underlying bug." + }, + { + "left": "issue:44464", + "right": "issue:45071", + "accept": false, + "reason": "Chunked generation with compiled forward is unrelated to PretrainedConfig type checking. Different subsystem and failure mode." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to a testing_utils bug." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "AutoProcessor kwargs forwarding to cached_file is a processor loading bug; the other is an auto_docstring crash with future annotations. Distinct code paths." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "Different model-specific architecture concerns: missing rms_norm_eps in MLA q_a_layernorm vs GLM-5 RoPE implementation. Not mergeable as one fix." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing exports ('layer_type_validation' vs 'HybridCache') and likely different release regressions." + }, + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "One is AddedToken construction with duplicate 'special' kwarg; the other is tokenizer mapping returning None during from_pretrained. Different root causes." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "AutoProcessor video loading without torchvision and tokenizer mapping None are unrelated loading paths with different fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 117, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:43066", + "issue:43097", + "issue:43502", + "issue:43582", + "issue:43824", + "issue:43873", + "issue:43906", + "issue:43957", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44351", + "issue:44410", + "issue:44484", + "issue:45341", + "issue:45375" + ], + "soft_pairs": [ + "issue:44410|issue:45375", + "issue:43502|issue:43992", + "issue:43582|issue:45341", + "issue:38617|issue:43097", + "issue:44038|issue:44410", + "issue:43957|issue:44351", + "issue:43992|issue:44484", + "issue:43824|issue:43986", + "issue:43066|issue:43906", + "issue:43097|issue:43873" + ], + "prepared_review_unit_hash": "552f1152506ec9c20b864327721e37b816e9752c2b57b867ab48cad21ada4d5d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12709, + "estimated_input_tokens": 3178, + "estimated_eval_tokens": 6612 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the soft pairs share broad Transformers/v5 or Qwen-themed wording, but each describes a different concrete failure mode or code path. None look like safe duplicate merges.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Both are Qwen3-related, but one reports missing attention projections in layer 0 while the other reports a missing config field dropped by strict parsing. Different underlying bugs." + }, + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "One is about unwanted network requests with local_files_only=True; the other is missing embed_tokens.weight when loading UMT5Encoder. Different load-time failures." + }, + { + "left": "issue:43582", + "right": "issue:45341", + "accept": false, + "reason": "Apple Silicon TypeError in caching_allocator_warmup is unrelated to a testing_utils bug. No shared code path." + }, + { + "left": "issue:38617", + "right": "issue:43097", + "accept": false, + "reason": "Both mention Transformers v5 API breakage, but the missing import of layer_type_validation and removal of tie_embeddings_and_encoder_decoder are distinct API changes." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "Both involve Qwen model support, but the reported symptoms differ: a general Qwen3-VL-Moe bug versus missing Qwen3Next projections. Not the same concrete fix." + }, + { + "left": "issue:43957", + "right": "issue:44351", + "accept": false, + "reason": "Meta-device loading failures and missing HybridCache import are unrelated failures." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "Loading missing weights versus save_pretrained shard-size behavior are different mechanisms and different code paths." + }, + { + "left": "issue:43824", + "right": "issue:43986", + "accept": false, + "reason": "An import error for Qwen2_5_VLForConditionalGeneration and a crash in AutoProcessor without torchvision are separate issues." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "The titles suggest different bugs; one is a tokenizer decoder type regression, the other is a reproduction of another issue with no clear shared failure mode." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "Removal of tie_embeddings_and_encoder_decoder and offloading problems with quantization are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 118, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42886", + "issue:42907", + "issue:43012", + "issue:43097", + "issue:43208", + "issue:43352", + "issue:43381", + "issue:43525", + "issue:43550", + "issue:43825", + "issue:43901", + "issue:43937", + "issue:44117", + "issue:44368", + "issue:44380", + "issue:44655", + "issue:44792", + "issue:45479" + ], + "soft_pairs": [ + "issue:43352|issue:44380", + "issue:43825|issue:44655", + "issue:43097|issue:43901", + "issue:43012|issue:43550", + "issue:43012|issue:44368", + "issue:42886|issue:43381", + "issue:43208|issue:43550", + "issue:43937|issue:44792", + "issue:44380|issue:45479", + "issue:42907|issue:44655", + "issue:43525|issue:44117" + ], + "prepared_review_unit_hash": "0dddcd192daadb0bd172fd4cc1d1cef74af2a82156cdffaacbb66e1c3425cd45", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13716, + "estimated_input_tokens": 3429, + "estimated_eval_tokens": 7114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates of one another; they span unrelated tokenizer, pipeline, config, attention, saving, and model-specific bugs. None of the soft pairs looks like the same underlying change or fix.", + "confidence": 0.97, + "canonical_issue_reason": "No suitable canonical issue: the cluster is heterogeneous and does not represent one underlying bug or feature request.", + "canonical_pr_reason": null, + "best_issue_reason": "No clear global best issue: the items are too unrelated for one issue to serve as a representative duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "Both mention attention backends, but one is a model-specific FlashAttention support error and the other is GPT-2 attention scaling behavior; different code paths and fixes." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "One is about a misleading pipeline() error message, the other about save_pretrained for Pipeline objects; unrelated behavior." + }, + { + "left": "issue:43097", + "right": "issue:43901", + "accept": false, + "reason": "Both touch docs/API messaging, but they concern different deprecated behaviors in different pipelines and are not the same bug." + }, + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "A PyTorch warning during bfloat16 compilation is not the same as a torch.compile failure on Bamba with SDPA." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "These are distinct warning reports affecting different models and triggers; not the same underlying issue." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Tokenizer offline cache loading and gradient checkpointing in eval mode are unrelated failures." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "xLSTM training bugs and Bamba torch.compile/SDPA failure are separate model-specific problems." + }, + { + "left": "issue:43937", + "right": "issue:44792", + "accept": false, + "reason": "Invalid GenerationConfig for GLM-5 and a Janus image-generation test failure are different subsystems and symptoms." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "GPT-2 attention scaling under SDPA/FlashAttention is unrelated to the sequence-classification zero-loss bug." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Saving dequantized models and saving Pipeline objects both mention save behavior, but they are different save paths with different fixes." + }, + { + "left": "issue:43525", + "right": "issue:44117", + "accept": false, + "reason": "A missing Llama4Config attribute and a tokenizer mapping None-assumption bug are different initialization issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 119, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:38617", + "issue:43012", + "issue:43064", + "issue:43208", + "issue:43352", + "issue:43502", + "issue:43526", + "issue:43582", + "issue:43618", + "issue:43761", + "issue:43976", + "issue:44380", + "issue:44483", + "issue:44514", + "issue:44683", + "issue:45290" + ], + "soft_pairs": [ + "issue:43352|issue:43976", + "issue:43064|issue:44380", + "issue:43208|issue:43526", + "issue:43618|issue:44380", + "issue:44483|issue:44514", + "issue:36683|issue:43582", + "issue:43618|issue:43761", + "issue:43012|issue:44683", + "issue:38617|issue:43502", + "issue:44483|issue:45290" + ], + "prepared_review_unit_hash": "1db2210f17bc0fd987faeeb5ab386a6df94c877ff9f25a56c33dcb59fb254f4b", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12884, + "estimated_input_tokens": 3221, + "estimated_eval_tokens": 6698 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated models, APIs, runtime errors, and backend-specific regressions. I do not see a duplicate set or a single canonical representative.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "Different problems: Flash Attention support for Nemotron vs a Python/version import-compatibility issue." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "Distinct code paths: FSDP2/PEFT optimizer-state corruption vs GPT-2 attention scaling being ignored in SDPA/FlashAttention." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "Unrelated components and symptoms: xLSTM training bugs vs BeitImageProcessorFast label reduction behavior." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "Both mention attention, but one is CLIP output fields missing and the other is GPT-2 backend-specific attention scaling; not the same bug." + }, + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "Both are chat/template-related, but one is an API request rejection and the other is a processor crash on batched input with padding disabled." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "Completely different areas: Gemma3 config attribute error vs Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": false, + "reason": "Related CLIP family, but one bug is missing attentions and the other is hidden_states not returned; these are separate output regressions." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "Both involve attention/precision context, but one is a PyTorch warning from bfloat16 compilation and the other is a compiled flex_attention failure on newer torch." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "Different failure modes: missing import from configuration_utils vs unexpected network requests despite local_files_only=True." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Both concern apply_chat_template, but the failures are different: request handling in /v1/chat/completions vs tool-call assistant messages with no content." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 120, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:42994", + "issue:43066", + "issue:43299", + "issue:43352", + "issue:43475", + "issue:43756", + "issue:43867", + "issue:43901", + "issue:43937", + "issue:44038", + "issue:44220", + "issue:44230", + "issue:44483", + "issue:44485", + "issue:44655", + "issue:45216", + "issue:45381" + ], + "soft_pairs": [ + "issue:44038|issue:44230", + "issue:44483|issue:45381", + "issue:43299|issue:43867", + "issue:43756|issue:44485", + "issue:43901|issue:44655", + "issue:43867|issue:45216", + "issue:42994|issue:44655", + "issue:36683|issue:43475", + "issue:43352|issue:43937", + "issue:44220|issue:44655", + "issue:43066|issue:45381" + ], + "prepared_review_unit_hash": "ef01d4af9a076acecd33fdb0b182fd8ba269c4ee4a4cd7bdbdb3510d5c11172e", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13443, + "estimated_input_tokens": 3361, + "estimated_eval_tokens": 6978 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes unrelated bugs around model loading/saving, RoPE, tokenizer/docs, flash-attn support, and API validation. The soft links are mostly superficial (shared model families or shared serialization terminology), not the same underlying bug or change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "Both mention Qwen3-VL-Moe/fp8, but one is a broad Transformers 5 breakage/loading report and the other is a specific fp8 inference support request; different underlying problems." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "One is chat/completions request rejection in v5.3, the other is a qwen2.5-vl video position-id bug; different code paths and symptoms." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Both are model-loading failures, but one is Qwen3-VL-Moe regression and the other is state_dict ordering causing load errors; not the same bug." + }, + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Both reference RoPE, but one is a Smollm3 layer-count mismatch and the other is a GLM-5 RoPE implementation discussion; not mergeable as one fix." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Docs mentioning return_all_scores is a documentation mismatch, while the pipeline issue is a save_pretrained serialization failure; unrelated." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "Both involve checkpoint/load/save behavior, but one is sorted state_dict loading and the other is Qwen3.5 save_pretrained producing a bad checkpoint; different failure modes." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Quantized model saving and pipeline object saving are separate serialization paths; same broad theme, but not the same concrete bug." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Both are attribute errors on model-related objects, but they concern different missing fields on different model families." + }, + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "Flash Attention 2.0 unsupported is a capability/runtime guard, while GenerationConfig invalid is a config-validation issue; unrelated." + }, + { + "left": "issue:44220", + "right": "issue:44655", + "accept": false, + "reason": "Audio feature extraction and pipeline saving are unrelated code paths; no evidence of the same defect." + }, + { + "left": "issue:43066", + "right": "issue:45381", + "accept": false, + "reason": "Tokenizer decoder-type documentation/regression and qwen2.5-vl video vision_position_ids are different issues with different fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 121, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:43010", + "issue:43064", + "issue:43525", + "issue:43618", + "issue:43976", + "issue:43992", + "issue:44077", + "issue:44188", + "issue:44242", + "issue:44263", + "issue:44380", + "issue:44492", + "issue:44938", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45440" + ], + "soft_pairs": [ + "issue:43064|issue:44492", + "issue:44077|issue:44380", + "issue:36683|issue:44077", + "issue:44263|issue:45440", + "issue:43976|issue:44938", + "issue:36683|issue:43525", + "issue:44188|issue:44380", + "issue:43992|issue:45310", + "issue:43618|issue:45216", + "issue:36683|issue:44242", + "issue:43010|issue:45278" + ], + "prepared_review_unit_hash": "f1c0781a821c537cdbf52a73173ce8202ea13717b925a974d67bf79c72b1a4bb", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13575, + "estimated_input_tokens": 3394, + "estimated_eval_tokens": 7044 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated issues spanning different models, loading paths, attention backends, platform compatibility, and one training/FSDP bug. No PRs are present, and all soft-similarity pairs should be rejected as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No clear canonical issue: the set does not form one duplicate cluster, and the reports describe distinct failures in different code paths and model families.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good global representative. The closest broad report is still too general and does not subsume the other bugs without conflating unrelated problems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "Completely different bugs: FSDP/PEFT optimizer-state corruption vs a typo in cache strategies." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "Different concrete problems: optional post_init handling vs GPT2 attention scaling being ignored under SDPA/FlashAttention." + }, + { + "left": "issue:36683", + "right": "issue:44077", + "accept": false, + "reason": "Different failures and code paths: missing Gemma3Config.vocab_size vs patchtsmixer post_init policy." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "Unrelated: a torch.split return-value issue in an indexer vs DeepseekV3MoE behavioral divergence." + }, + { + "left": "issue:43976", + "right": "issue:44938", + "accept": false, + "reason": "Both are runtime compatibility reports, but they are not the same bug: Python version support failure vs Python 3.14 import/load failure." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Similar symptom class (missing config attribute), but different model attributes and fixes: Gemma3 vocab_size vs Llama4 pad_token_id." + }, + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Both touch attention behavior, but one is a torch.compile branching divergence and the other is backend-specific scaling config handling." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "Both concern model loading, but they affect different model families and failure modes; not the same underlying bug." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "Different model/output regressions: CLIPOutput attentions assignment vs Qwen3.5 save_pretrained checkpoint corruption." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "Gemma3Config missing vocab_size is unrelated to MoE load-balancing loss being skipped when output_router_logits=False." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Generic similarity only: a no_grad decoration request is unrelated to broad import errors after upgrading Transformers." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 122, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:41669", + "issue:43010", + "issue:43064", + "issue:43352", + "issue:43867", + "issue:43931", + "issue:43976", + "issue:43992", + "issue:44246", + "issue:44360", + "issue:44938", + "issue:45071", + "issue:45357", + "issue:45375", + "issue:45381", + "issue:45440", + "issue:45478" + ], + "soft_pairs": [ + "issue:41669|issue:44246", + "issue:36683|issue:43352", + "issue:43352|issue:43931", + "issue:43992|issue:45478", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:44938|issue:45071", + "issue:43867|issue:45357", + "issue:43010|issue:43976", + "issue:44938|issue:45381", + "issue:44360|issue:45440" + ], + "prepared_review_unit_hash": "abc9509323b6ef70af94ef79c459d1c5f5265f605200dbb09c60f43f883d4c8a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13577, + "estimated_input_tokens": 3395, + "estimated_eval_tokens": 7046 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a single duplicate set; the soft pairs are mostly false positives across unrelated bugs, regressions, and performance issues. Several pairs only share a broad subsystem or wording, not the same concrete failure mode.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "Both are about import performance, but one is specifically removing `import *` from models while the other is an intermittent slow `import transformers` complaint. No concrete same fix path is evident." + }, + { + "left": "issue:36683", + "right": "issue:43352", + "accept": false, + "reason": "Different model/runtime failures: Gemma3 config missing `vocab_size` vs NemotronHForCausalLM rejecting Flash Attention 2.0. Not the same bug." + }, + { + "left": "issue:43352", + "right": "issue:43931", + "accept": false, + "reason": "Unrelated model-specific loading/support issues: Flash Attention 2.0 support gating vs Qwen3-VL weight shape mismatch." + }, + { + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "Both mention Qwen3.5/MoE loading, but one is a missing `embed_tokens.weight` in UMT5Encoder and the other is a `from_pretrained` error for Qwen3.5 MoE. Too different to treat as the same bug." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "Both concern loading/configuration, but one is a state_dict ordering load error and the other is a missing `deepstack_visual_indexes` config field. Different root causes." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "No overlap beyond general training/model code: Gemma3 config attribute error vs FSDP2/PEFT optimizer-state corruption. Different code paths and symptoms." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Both are Python 3.14 / v5-related compatibility problems, but one is import/load failure and the other is `PretrainedConfig` type-checking regression. Not the same change." + }, + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "One is a generic state_dict load failure; the other is a Qwen3.5 save_pretrained visual-encoder key regression. Different direction and failure point." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "`@torch.no_grad` on cache/layer update methods is unrelated to the Python 3.9+/5.1.0 compatibility failure. No shared bug." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Different versioned Python/Qwen-VL issues: general Python 3.14 load failure vs Qwen2.5-VL video `vision_position_ids` mismatch." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "The DSA indexer discussion is unrelated to DeepseekV3MoE divergence from remote implementation." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 123, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:33453", + "issue:36296", + "issue:42890", + "issue:43502", + "issue:43575", + "issue:43618", + "issue:43644", + "issue:43867", + "issue:44062", + "issue:44077", + "issue:44112", + "issue:44484", + "issue:44857", + "issue:44938", + "issue:44964", + "issue:44977" + ], + "soft_pairs": [ + "issue:43618|issue:44077", + "issue:43644|issue:44938", + "issue:36296|issue:43575", + "issue:43867|issue:44484", + "issue:42890|issue:44112", + "issue:42890|issue:44964", + "issue:43618|issue:44857", + "issue:28282|issue:33453", + "issue:43618|issue:44977", + "issue:43502|issue:44062" + ], + "prepared_review_unit_hash": "c3bfe4103107eeb8be315646ccf7e06c203fd2dd0859e24315d4cb839e7e34de", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12610, + "estimated_input_tokens": 3153, + "estimated_eval_tokens": 6562 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the issues cover unrelated bugs in model loading, tensor parallelism, CI flakiness, buffer initialization, tokenizer construction, and config/runtime behavior. The shared wording around \"load\" or \"bug\" is superficial; there is no strong duplicate group here.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out because the items describe different concrete failures in different code paths and models.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick the most self-contained report, issue 43502 is a clear reproducible bug description, but it is not representative of the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "Different problems: CLIPOutput attentions missing vs PatchTSMixer post_init allowance. No shared code-path bug." + }, + { + "left": "issue:43644", + "right": "issue:44938", + "accept": false, + "reason": "One is buffer initialization junk in Transformers 5.0.0; the other is a Python 3.14 import/load failure. Unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Both mention tensor parallelism, but one is a generic training bug and the other is an OOM when loading a specific Qwen model. Not the same bug." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "State-dict loading error and a question about save_pretrained shard size are different behaviors and code paths." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Both are flaky tests, but for different models and causes; not the same underlying failure." + }, + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Test seed flakiness vs model loading failure for Phi-4 multimodal. Different issues." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "CLIPOutput attentions regression and LwDetrImageLoss AMP/CUDA crash are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "PyTorch missing ImportError vs tokenizer-loading regression. Different subsystems and failure modes." + }, + { + "left": "issue:43618", + "right": "issue:44977", + "accept": false, + "reason": "Missing CLIPOutput attentions is unrelated to Qwen3.5 generation problems with flash-attention." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "Local-files-only API leakage and AddedToken keyword-argument error are unrelated bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 124, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42548", + "issue:42890", + "issue:43010", + "issue:43065", + "issue:43232", + "issue:43723", + "issue:44075", + "issue:44222", + "issue:44351", + "issue:44355", + "issue:44387", + "issue:44556", + "issue:44749", + "issue:45125", + "issue:45341", + "issue:45375", + "issue:45478" + ], + "soft_pairs": [ + "issue:43010|issue:44075", + "issue:42890|issue:45341", + "issue:43723|issue:44556", + "issue:42548|issue:44351", + "issue:44387|issue:44556", + "issue:43010|issue:43065", + "issue:43010|issue:43232", + "issue:44222|issue:45125", + "issue:44355|issue:44749", + "issue:45125|issue:45478", + "issue:45125|issue:45375" + ], + "prepared_review_unit_hash": "f766ccf5eababff706548df5c6cd1782d7f07389ffed24e7090e643593dcf589", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12995, + "estimated_input_tokens": 3249, + "estimated_eval_tokens": 6754 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it contains several small thematic groups (Qwen3.5 MoE config/tensor-parallel issues, v5 loading/reload regressions, import/export failures, and unrelated test/cache/generation bugs), but the soft pairs are not close enough to be true duplicates.", + "confidence": 0.78, + "canonical_issue_reason": "issue:45478 is the strongest representative of a concrete, user-facing regression: a specific from_pretrained failure for Qwen3.5.5 MoE on transformers>=5.4.0, with a clear runtime symptom and current relevance.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45478 is the best global issue candidate because it is specific, reproducible, and broad enough to matter to affected users, while the other issues are either narrower, older migration problems, or clearly different defects.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "Both mention model/optimizer behavior, but one is about cache/layer update under no_grad and the other is about SGD args being ignored; different code paths and fixes." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "These are unrelated test flakiness reports in different utilities/models; no shared bug mechanism is evident." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Both are v5 loading-related, but one is tokenizer import/loading and the other is checkpoint reload after upgrading versions; related theme, but not the same concrete failure." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different symbols (PreTrainedModel vs HybridCache) and likely different missing exports or package issues." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "One is an int4 quantization memory/OOM regression, the other is a model reload compatibility issue; different symptoms and fixes." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "Cache update no_grad and Sam3PixelDecoder dummy Conv2d are unrelated implementation concerns." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Generation kwargs syncing and cache update decoration are different generation internals, but not the same bug." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "FP8 save_pretrained for MoE and missing _tp_plan for tensor parallelism affect different features and failure modes." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Both concern a slowdown/behavior change after upgrading versions, but one is compiled Python file errors and the other is data filtering becoming much slower; distinct issues." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3.5 MoE, but one is missing tensor-parallel planning and the other is a from_pretrained error in newer transformers; not the same concrete bug." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Missing _tp_plan and missing deepstack_visual_indexes are different config/model metadata omissions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 125, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:31795", + "issue:36296", + "issue:38175", + "issue:42890", + "issue:43010", + "issue:43299", + "issue:43502", + "issue:44075", + "issue:44315", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44373", + "issue:44485", + "issue:44556", + "issue:44655", + "issue:44961" + ], + "soft_pairs": [ + "issue:30990|issue:31795", + "issue:43502|issue:44485", + "issue:44315|issue:44360", + "issue:36296|issue:44075", + "issue:43010|issue:44368", + "issue:38175|issue:43502", + "issue:42890|issue:44373", + "issue:43299|issue:44556", + "issue:43502|issue:44961", + "issue:44355|issue:44655" + ], + "prepared_review_unit_hash": "810fee4be05e9bd413beb97dc672a6d77e4ed201a36c5d76fb6c4ba3592f9eb0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13169, + "estimated_input_tokens": 3293, + "estimated_eval_tokens": 6842 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are largely unrelated issues spanning docs, model loading, training, tests, and optimization. No soft pair looks like the same underlying bug, so none should be merged as duplicates. If a single issue must be chosen as the best representative, the clearest concrete user-facing bug is #43502.", + "confidence": 0.96, + "canonical_issue_reason": "#43502 is the most concrete and reproducible runtime bug in the set; the rest are mostly unrelated or narrower follow-ups.", + "canonical_pr_reason": null, + "best_issue_reason": "#43502 describes a specific, actionable failure mode with clear user impact ('local_files_only=True' still triggering network access).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Different problems: model loading hangs vs confusing forward() docs for inputs/caching." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Different subsystems and symptoms: offline file loading/network requests vs GLM-5 RoPE implementation discussion." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Unrelated bugs: Liger kernel not applied with model_init vs DSA indexer ReLU discussion." + }, + { + "left": "issue:36296", + "right": "issue:44075", + "accept": false, + "reason": "Different training/optimization issues: tensor parallel bug vs SGD arguments not being used." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "Different concerns: no_grad decoration for cache/layer updates vs a tie_word_embeddings warning during LoRA fine-tuning." + }, + { + "left": "issue:38175", + "right": "issue:43502", + "accept": false, + "reason": "Different model behavior: zero probabilities in SigLIP2 output vs offline loading still making API requests." + }, + { + "left": "issue:42890", + "right": "issue:44373", + "accept": false, + "reason": "Different scope: flaky integration test due to missing seeds vs a documentation typo for position_ids." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Both are loading/version-related, but the concrete failures differ: Qwen3VL MoE load breakage vs checkpoint reload incompatibility across versions." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "No substantive similarity; 'racoon' is not evidence of the same bug as offline network requests." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Different failure modes: compiled Python file errors vs inability to save Pipeline objects." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 126, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38617", + "issue:42831", + "issue:42890", + "issue:43232", + "issue:43576", + "issue:43673", + "issue:43824", + "issue:43827", + "issue:43874", + "issue:44222", + "issue:44230", + "issue:44355", + "issue:44492", + "issue:44961", + "issue:45071" + ], + "soft_pairs": [ + "issue:44222|issue:44230", + "issue:44355|issue:45071", + "issue:42831|issue:43874", + "issue:42890|issue:43232", + "issue:33453|issue:34689", + "issue:33453|issue:36296", + "issue:43827|issue:44961", + "issue:43576|issue:45071", + "issue:43576|issue:43824", + "issue:43673|issue:44492", + "issue:38617|issue:43576" + ], + "prepared_review_unit_hash": "f5a87a3c372b1121e6940665d64bcec4d9cf63706e0266d57149f6faa67a2277", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13420, + "estimated_input_tokens": 3355, + "estimated_eval_tokens": 6966 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose semantic grouping of unrelated Transformers issues (v5 regressions, loading/import failures, generation/cache bugs, FP8, docs, tests). None of the soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.23, + "canonical_issue_reason": "Issue 43576 is the closest thing to a thematic anchor because it is a broad v5 breakage report and has the most cluster-like linkage, but the overall set is too heterogeneous to treat as a true duplicate family.", + "canonical_pr_reason": null, + "best_issue_reason": "43576 is the best available representative only by breadth and centrality, not because it matches the other reports; the remaining issues each point to different code paths or failure modes.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "Both mention FP8/MoE, but one is about save_pretrained and the other about inference support; different behaviors and likely different fixes." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "Compiled Python file errors and PretrainedConfig type-checking breakage are unrelated problems." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "Accuracy regression in FineGrainedFP8 is not the same as a missing method causing an AttributeError in GLM46V image token counting." + }, + { + "left": "issue:42890", + "right": "issue:43232", + "accept": false, + "reason": "A flaky integration test due to missing set_seed is separate from a generation kwargs bug after sync_gpus." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Both are loading-related, but tokenizer loading regression and a specific model-loading failure are different failure modes." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading regression and tensor-parallel training bug do not look like the same defect." + }, + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Docs referencing removed pipeline() is unrelated to the racoon issue." + }, + { + "left": "issue:43576", + "right": "issue:45071", + "accept": false, + "reason": "Both are v5 regressions, but one is an env CLI command issue and the other is a type-checking breakage in PretrainedConfig." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "Broken env command and a missing model import are separate v5 breakages in different code paths." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "Generation cache missing during chunked prefill is unrelated to a typo in cache strategy docs/code." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "An import error from configuration_utils is not the same as the broken env command." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 127, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31795", + "issue:33357", + "issue:33453", + "issue:36296", + "issue:38617", + "issue:41628", + "issue:42548", + "issue:43526", + "issue:43576", + "issue:43901", + "issue:44273", + "issue:44351", + "issue:44485", + "issue:44908", + "issue:44938", + "issue:44961", + "issue:45003", + "issue:45468" + ], + "soft_pairs": [ + "issue:33357|issue:33453", + "issue:41628|issue:42548", + "issue:44961|issue:45468", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:43576|issue:44938", + "issue:44485|issue:44961", + "issue:44273|issue:45003", + "issue:36296|issue:43901", + "issue:36296|issue:43526" + ], + "prepared_review_unit_hash": "db88692f20a49d561f0739affc22e457a50cf8ff6b6615504ebeaa044057e72c", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13329, + "estimated_input_tokens": 3333, + "estimated_eval_tokens": 6922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a grab bag of unrelated issues: import errors, docs regressions, scheduler behavior, model-specific bugs, and lazy-loading problems. None of the soft-edge pairs look like the same underlying bug or change, so I rejected all of them. If a single anchor is needed, issue #45003 is the most concrete standalone bug report in the set.", + "confidence": 0.95, + "canonical_issue_reason": "Issue #45003 is the most concrete, actionable bug report here: it names a specific unsafe sys.modules access path and describes a clear runtime failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45003 is the best representative issue in this set because it is specific, technical, and actionable, unlike several broader or documentation-only reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:33453", + "accept": false, + "reason": "MacOS bus error in a CLIP model load is not the same bug as a tokenizer-loading regression; the failure modes and likely code paths differ." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "Both are import errors, but for different symbols with different causes; this is too broad to treat as the same underlying issue." + }, + { + "left": "issue:44961", + "right": "issue:45468", + "accept": false, + "reason": "'racoon' is unrelated to Gemma-4 audio positional encoding; no shared code path or failure signature is evident." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Documentation confusion about forward arguments is unrelated to an ImportError for layer_type_validation." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "A broken env CLI command and a HybridCache import error are different problems affecting different surfaces." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Scheduler kwargs being ignored is unrelated to the 'racoon' issue; there is no evidence of a shared bug." + }, + { + "left": "issue:43576", + "right": "issue:44938", + "accept": false, + "reason": "A v5 env command regression and a Python 3.14 load failure are different failures, not one concrete bug." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "GLM-5 RoPE implementation discussion does not match the unrelated 'racoon' issue." + }, + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Both mention loading/lazy behavior, but one is a broad lazy-loading report and the other is a specific sys.modules access bug; not the same underlying issue." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "A tensor-parallel training bug is unrelated to a docs/behavior mismatch in TextClassificationPipeline." + }, + { + "left": "issue:36296", + "right": "issue:43526", + "accept": false, + "reason": "Tensor parallel training and BeitImageProcessorFast label reduction are different subsystems and different bug types." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 128, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36296", + "issue:41628", + "issue:41669", + "issue:43012", + "issue:43576", + "issue:43673", + "issue:44038", + "issue:44261", + "issue:44355", + "issue:44360", + "issue:44393", + "issue:44556", + "issue:44623", + "issue:45071", + "issue:45092", + "issue:45125", + "issue:45230" + ], + "soft_pairs": [ + "issue:41628|issue:44355", + "issue:36296|issue:43012", + "issue:44623|issue:45230", + "issue:44261|issue:44360", + "issue:43576|issue:44556", + "issue:44393|issue:45071", + "issue:41669|issue:43673", + "issue:44038|issue:45125", + "issue:44556|issue:45092" + ], + "prepared_review_unit_hash": "643abf8bdf78d0808345718941c351412ecf2514c861503511964270023ae973", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12216, + "estimated_input_tokens": 3054, + "estimated_eval_tokens": 6364 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items cover unrelated bugs in import/export, compilation, config typing, generation, tensor parallelism, and model-specific compatibility. None of the soft pairs look like the same underlying issue, so there is no strong duplicate canonicalization here.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:44623 is the clearest, most actionable report in the set: it describes a specific save_pretrained() missing-files failure with a concrete symptom, even though it does not unify the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "Different failure modes: AutoImageProcessor import failure vs errors running compiled Python files; no shared code path or fix." + }, + { + "left": "issue:36296", + "right": "issue:43012", + "accept": false, + "reason": "Tensor-parallel training bug and bf16 precision warning are unrelated symptoms in different parts of the stack." + }, + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "44623 is a specific processor.save_pretrained file-loss bug; 45230 is a generic, underspecified bug report with no evidence of the same issue." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "MLA q_a_layernorm rms_norm_eps precision issue and DSA indexer missing ReLU are distinct model/kernel bugs." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "transformers env CLI breakage and checkpoint reload incompatibility after upgrade are different user-facing problems." + }, + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Qwen3-VL 2D bounding-box output issues are model-specific generation problems, while PretrainedConfig type checking is a separate API/type-system regression." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "Import-* cleanup in models and missing GenerationMixin cache in chunked_prefill are not the same bug and would not be merged as one fix." + }, + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Both mention Qwen3 MoE, but one is a general v5 bug and the other is a missing _tp_plan for tensor parallelism; different concrete code-path failures." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both concern v5 compatibility, but one is checkpoint reload after upgrade and the other is remote-code/meta-init incompatibility for InternVL2; too different to be duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 129, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42371", + "issue:42890", + "issue:43352", + "issue:43408", + "issue:43576", + "issue:43976", + "issue:44246", + "issue:44273", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44393", + "issue:44492", + "issue:44704", + "issue:44908", + "issue:45071", + "issue:45230" + ], + "soft_pairs": [ + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44704|issue:44908", + "issue:44246|issue:44273", + "issue:44297|issue:45230", + "issue:43408|issue:44315", + "issue:43576|issue:44908", + "issue:43352|issue:44393", + "issue:43976|issue:44393", + "issue:44295|issue:45230", + "issue:44908|issue:45071" + ], + "prepared_review_unit_hash": "e64fdc118c70d4f58579559ed04d5cc0533116fc97f677a4125ed8917966b9a0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12906, + "estimated_input_tokens": 3227, + "estimated_eval_tokens": 6710 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated issue reports; there are no PRs. The only plausible duplicate pair is the lazy-loading/import regression pair (#44246, #44273).", + "confidence": 0.89, + "canonical_issue_reason": "#44273 is the most direct and specific report of the shared lazy-loading problem; #44246 reads like a symptom of the same import/lazy-loading regression.", + "canonical_pr_reason": null, + "best_issue_reason": "#44273 is the best representative issue because it names the underlying mechanism (lazy loading) rather than just the downstream symptom (slow import).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 settings and a cache-strategy typo are unrelated topics with no shared bug path." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Missing set_seed in integration tests is unrelated to scheduler kwargs handling." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "Both involve ignored kwargs, but in different code paths and components; not the same bug." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": true, + "reason": "Both point to the same lazy-loading/import regression: one reports slow imports, the other says lazy loading is not working properly." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "Tokenizer save_pretrained metadata mismatch is unrelated to an unspecified generic bug report." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "SAM tracker/model-type warning and Liger Kernel application in model_init are separate issues." + }, + { + "left": "issue:43576", + "right": "issue:44908", + "accept": false, + "reason": "A broken env command in v5 and a scheduler kwarg bug are different features with different failure modes." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash Attention support for Nemotron and Qwen3-VL 2D bbox errors are unrelated model-specific bugs." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility and Qwen3-VL output errors are unrelated." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "Position_ids buffer access error is too specific and unrelated to an unspecified generic bug report." + }, + { + "left": "issue:44908", + "right": "issue:45071", + "accept": false, + "reason": "Scheduler kwargs being ignored and PretrainedConfig type-checking regressions are different problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 130, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31795", + "issue:33453", + "issue:41669", + "issue:43723", + "issue:43827", + "issue:44077", + "issue:44315", + "issue:44351", + "issue:44393", + "issue:44492", + "issue:44655", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:45341", + "issue:45478" + ], + "soft_pairs": [ + "issue:41669|issue:44351", + "issue:41669|issue:45341", + "issue:44315|issue:44829", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:44393|issue:45478", + "issue:44077|issue:44861", + "issue:31795|issue:33453", + "issue:43827|issue:44908", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:44655|issue:44861" + ], + "prepared_review_unit_hash": "aafcbf85e8a880f2b557e502c1b7e5b1c2b4c0b13f03717cd06ecf5d0e4ddd55", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 12, + "serialized_chars": 13239, + "estimated_input_tokens": 3310, + "estimated_eval_tokens": 6876 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a mix of unrelated issues: docs, tokenizer/model loading, v5 migration fallout, cache/type errors, training bugs, and model-specific failures. I do not see a single underlying bug or change linking them as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No exact canonical issue: the items span several distinct problems rather than one shared defect.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 41669 is the closest cluster hub by discussion/activity and broad loader/import-related scope, but it is still not a true representative for the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41669", + "right": "issue:44351", + "accept": false, + "reason": "Import cleanup/perf regression and a HybridCache import failure are different problems with different failure modes." + }, + { + "left": "issue:41669", + "right": "issue:45341", + "accept": false, + "reason": "A model import/perf issue and a testing_utils bug are unrelated." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Both involve model creation/training, but one is about Liger Kernel application with model_init and the other is a flash_attention_3 training degeneration bug; not the same code-path failure." + }, + { + "left": "issue:44492", + "right": "issue:44945", + "accept": false, + "reason": "A cache-strategy typo and incorrect pipeline-parallel output are unrelated." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import-* cleanup and a cache-strategy typo are not the same underlying issue." + }, + { + "left": "issue:44393", + "right": "issue:45478", + "accept": false, + "reason": "Both are Qwen-related, but one is a vision/VL bounding-box output problem and the other is a from_pretrained failure in Qwen3.5 MoE; different bugs." + }, + { + "left": "issue:44077", + "right": "issue:44861", + "accept": false, + "reason": "Optional post_init validation for patchtsmixer and an AttributeError in tied-weight key handling are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Documentation confusion around forward args is not a duplicate of a tokenizer loading regression." + }, + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Pipeline-removal documentation drift and inverse_sqrt scheduler kwargs handling are different issues." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "A model import/perf issue does not match a scheduler kwargs bug." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3-VL output errors affect different subsystems and behaviors." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Saving Pipeline objects and tied-weight-key attribute handling are separate code paths and bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 131, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31795", + "issue:34689", + "issue:37428", + "issue:41669", + "issue:42371", + "issue:43519", + "issue:43576", + "issue:43704", + "issue:44273", + "issue:44315", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44908", + "issue:45071", + "issue:45230", + "issue:45310", + "issue:45468" + ], + "soft_pairs": [ + "issue:43576|issue:44273", + "issue:44360|issue:45468", + "issue:45230|issue:45310", + "issue:44273|issue:44908", + "issue:41669|issue:43704", + "issue:43519|issue:44485", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31795|issue:37428", + "issue:44315|issue:44393", + "issue:44273|issue:45071", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704" + ], + "prepared_review_unit_hash": "e27e06600d74effacb209489a79ab138e0906942517d08dd3361f55cade42f14", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 14, + "serialized_chars": 14276, + "estimated_input_tokens": 3569, + "estimated_eval_tokens": 7394 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like a true duplicate; the matches are broad topical similarities at best, not the same underlying bug or change. The strongest standalone issue is the Qwen3.5 MoE from_pretrained regression because it is concrete and version-scoped.", + "confidence": 0.91, + "canonical_issue_reason": "issue:45310 is specific, reproducible, and version-scoped, making it a better canonical issue than the vague or overly broad reports in this set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45310 has a clear model, API path, and failing version range, so it is the most actionable and representative issue here.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Different problems: one is a broken CLI command in v5, the other is a generic lazy-loading complaint. Same release area, but not the same bug." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "Unrelated model/component issues: DSA indexer ReLU discussion vs Gemma-4 audio positional encoding bug." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "issue:45230 is too generic to prove identity; 45310 is a specific Qwen3.5 MoE from_pretrained regression. No evidence they are the same bug." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading and inverse_sqrt scheduler kwargs are unrelated subsystems and failure modes." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "One is about import-star usage in models, the other about VRAM leakage in Qwen3ForCausalLM across dataloader threads. Different code paths and symptoms." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Timestamp calculation in Qwen3VL Processor and GLM-5 RoPE implementation are different model-specific bugs." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Vision timestamp math and scheduler kwarg handling are unrelated issues." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Documentation confusion versus a model loading regression are not the same underlying problem." + }, + { + "left": "issue:31795", + "right": "issue:37428", + "accept": false, + "reason": "Docs wording issue and flash-attention import error are distinct; no shared failing code path is evident." + }, + { + "left": "issue:44315", + "right": "issue:44393", + "accept": false, + "reason": "Liger Kernel application during model_init and Qwen3-VL 2D bbox output errors are different behaviors in different components." + }, + { + "left": "issue:44273", + "right": "issue:45071", + "accept": false, + "reason": "Lazy loading complaint and PretrainedConfig type-checking breakage are separate issues, even if both relate to v5 changes." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak in multi-threaded loading and RoPE implementation are unrelated." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "GLM-5 RoPE discussion and Gemma-4 audio positional encoding are different model-specific implementations." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 behavior settings and Qwen3ForCausalLM VRAM leakage are unrelated; same project area is not enough." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 132, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42890", + "issue:42913", + "issue:43066", + "issue:43502", + "issue:43519", + "issue:43606", + "issue:43618", + "issue:43704", + "issue:44261", + "issue:44263", + "issue:44492", + "issue:44623", + "issue:44861", + "issue:44908", + "issue:44998", + "issue:45290", + "issue:45341", + "issue:45468" + ], + "soft_pairs": [ + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:42913|issue:43066", + "issue:42890|issue:43704", + "issue:44908|issue:44998", + "issue:44861|issue:45290", + "issue:44998|issue:45341", + "issue:44492|issue:44998", + "issue:43618|issue:44998" + ], + "prepared_review_unit_hash": "34a816c11f9bfd7e098f5f80c2cbfc74bc1596a2c10c31a3ea6b6e828569ddfd", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 13, + "serialized_chars": 14035, + "estimated_input_tokens": 3509, + "estimated_eval_tokens": 7274 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues do not form a true duplicate cluster: they span unrelated tokenizer, processor, model, scheduler, cache, and test bugs. No soft edge should be merged.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue exists here because the items are not the same underlying bug; they are mostly unrelated standalone reports.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45290 is the strongest standalone report: it describes a specific crash condition in apply_chat_template with a clear edge case. Even so, it is not a canonical duplicate representative for the whole set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "Both are API/processor-related, but one is about local_files_only network calls and the other about Qwen3VL timestamp math; different code paths and bugs." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Processor timestamp calculation vs save_pretrained missing files are unrelated failures in different subsystems." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Qwen3VL timestamp computation and VRAM leakage in Qwen3ForCausalLM are distinct issues with different symptoms and fixes." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return handling in GlmMoeDsaIndexer is unrelated to Gemma4AudioRelPositionalEncoding." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "Missing rms_norm_eps in MLA q_a_layernorm is a precision/config bug, not the same as Gemma-4 audio positional encoding." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch for bark-small is unrelated to Gemma-4 audio positional encoding." + }, + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both mention tokenizer v5, but one reports a broad behavior change from v4 and the other a specific decoder-type bug; not the same concrete defect." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "A flaky integration test due to missing set_seed is unrelated to VRAM leakage in Qwen3ForCausalLM." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "inverse_sqrt scheduler kwargs are unrelated to an issue titled 'Unemployment'." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "_get_tied_weight_keys AttributeError and apply_chat_template crashing on tool-call assistant messages are separate code paths." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "The titles are unrelated; neither indicates the same bug or subsystem." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "A cache-strategy typo does not match the unrelated 'Unemployment' issue." + }, + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "CLIPOutput attentions assignment regression and 'Unemployment' are clearly unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 133, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31515", + "issue:31795", + "issue:33453", + "issue:36296", + "issue:36683", + "issue:38617", + "issue:43298", + "issue:43525", + "issue:43572", + "issue:44230", + "issue:44556", + "issue:44623", + "issue:44861", + "issue:44998", + "issue:45310" + ], + "soft_pairs": [ + "issue:44556|issue:44998", + "issue:43525|issue:44861", + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:44230|issue:45310", + "issue:31515|issue:31795", + "issue:31515|issue:33453", + "issue:43298|issue:44230", + "issue:31515|issue:36683" + ], + "prepared_review_unit_hash": "85e71e9da3a939a76a2813c467648668026ec55dc0cc018e9dbaac84131acbdc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 15, + "item_count": 15, + "soft_pair_count": 11, + "serialized_chars": 11714, + "estimated_input_tokens": 2929, + "estimated_eval_tokens": 6114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated. A few revolve around loading/from_pretrained, but they describe different symptoms or code paths (performance, tokenizer regressions, config attributes, version-specific reload failures), so none of the soft candidates should be merged as duplicates.", + "confidence": 0.88, + "canonical_issue_reason": "issue:44556 is the most representative concrete loading bug in the set: it targets from_pretrained/reload breakage after a version upgrade, which is closer to the core loading/reload theme than the performance-only or model-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44556 is the best single issue to anchor this cluster because it is specific, actionable, and about an actual checkpoint reload failure rather than a docs or performance complaint.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Completely unrelated: checkpoint reload failure vs an off-topic 'Unemployment' issue." + }, + { + "left": "issue:43525", + "right": "issue:44861", + "accept": false, + "reason": "Different bugs: missing config attribute on Llama4Config vs _tied_weights_keys list handling." + }, + { + "left": "issue:43572", + "right": "issue:44861", + "accept": false, + "reason": "Unrelated code paths: StableLmConfig pad_token_idx regression vs tied-weight key extraction crash." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "Different symptoms and subsystems: slow from_pretrained checkpoint loading vs import error for layer_type_validation." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "Feature/support request for fp8 inference is not the same as processor.save_pretrained missing files." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Checkpoint loading performance issue is unrelated to tensor-parallel training behavior." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen models, but one is fp8 inference support and the other is a from_pretrained failure; not the same bug." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Loading speed problem and forward-pass documentation confusion are unrelated." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Both involve loading, but one is a speed complaint and the other is a tokenizer-loading regression; different concrete failures." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Continuous batching audio-model support is a different feature gap from fp8 inference support." + }, + { + "left": "issue:31515", + "right": "issue:36683", + "accept": false, + "reason": "from_pretrained slowness is unrelated to Gemma3Config missing vocab_size." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-26350-2", + "nodes": [ + "issue:26350", + "issue:33260" + ], + "soft_pairs": [ + "issue:26350|issue:33260" + ], + "prepared_review_unit_hash": "aaf6c048daf322a888ee8aa4206847c55185a3f69683e0795283f8b25dc47495", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate feature requests about extending support to different backends/features; they are not duplicates.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "If one must be surfaced, issue 26350 is the older and more discussed tracker, but it is not a duplicate of 33260.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:26350", + "right": "issue:33260", + "accept": false, + "reason": "Different requested capabilities (Flash Attention 2 support vs GGUF support) and likely different implementation paths; the shared theme of broad architecture support is too general to merge them." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "cc91f282234355fa2b4aaa9604c184bfd8f6e8e836d922aa67b3b39faf4d4a00", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These look like separate documentation-translation issue templates/requests rather than duplicates of one concrete task. The Turkish issue is the only specific one; the others are generic language placeholders and should not be merged just on template similarity.", + "confidence": 0.89, + "canonical_issue_reason": "issue:27088 is the only concrete, language-specific request ([i18n-TR] Turkish) and is the best issue to anchor this cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:27088 is the clearest and most specific issue in the set; the others are generic placeholders with no concrete target language visible.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": false, + "reason": "Both are translation-request issues, but the target language/topic is not the same concrete task; this is not enough to treat them as the same underlying issue." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": false, + "reason": "The Turkish-specific request is distinct from another language translation request; shared template wording does not indicate a duplicate." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": false, + "reason": "This appears to be a different language translation request, not the same bug/change as the Turkish docs translation issue." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": false, + "reason": "These are generic translation-template issues, but there is no evidence they refer to the same language or same concrete task." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": false, + "reason": "High text similarity alone is insufficient; these look like separate translation requests, not one duplicate issue." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": false, + "reason": "The available evidence only shows similar template text, not the same underlying translation request." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:43909|issue:44205", + "issue:43671|issue:43837", + "issue:44121|issue:45322", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:37015|issue:43909", + "issue:43671|issue:45177", + "issue:37015|issue:43837", + "issue:43671|issue:45027", + "issue:37015|issue:44121", + "issue:43837|issue:44121", + "issue:43837|issue:45177", + "issue:43671|issue:43909", + "issue:43671|issue:44121", + "issue:41211|issue:43671", + "issue:36817|issue:41211", + "issue:37015|issue:42503", + "issue:37015|issue:45027", + "issue:36817|issue:43671", + "issue:42738|issue:45027", + "issue:43837|issue:45027", + "issue:42503|issue:43837", + "issue:42503|issue:42738", + "issue:42503|issue:43671" + ], + "prepared_review_unit_hash": "ff22bed71b64b670ea2b2f680c1d17496b90585069ffa734aa19436190181c4c", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16006, + "estimated_input_tokens": 4002, + "estimated_eval_tokens": 8260 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 24, + "serialized_chars": 14121, + "estimated_input_tokens": 3531, + "estimated_eval_tokens": 7318 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; it is a loose collection of unrelated model-add/support requests across text, audio/ASR/TTS, OCR, vision, and architecture-specific models. None of the soft edges look like the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a true canonical representative because the requests cover different models and modalities rather than one shared bug/change.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43909", + "right": "issue:44205", + "accept": false, + "reason": "Different targets: LFM2.5 Audio support vs SAM3-LiteText. Separate model requests." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "Same vendor family but different tasks/modality: Qwen3-TTS vs Qwen3-ASR. Not the same underlying change." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "Unrelated architectures and use cases: weight-sparse transformer vs EUPE encoder." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "Different audio models and support paths: Qwen3-ASR vs LFM2.5 Audio." + }, + { + "left": "issue:43837", + "right": "issue:45322", + "accept": false, + "reason": "ASR support request vs a perception encoder model request; unrelated." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT text model request is unrelated to LFM2.5 Audio." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "TTS support request vs DeepSeek-OCR2; different modalities and code paths." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT addition vs Qwen3-ASR support; distinct models." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Both are audio-related but different concrete models (Qwen3-TTS vs Voxtral TTS) and likely separate integration work." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT model addition is not the same as adding a weight-sparse transformer." + }, + { + "left": "issue:43837", + "right": "issue:44121", + "accept": false, + "reason": "ASR model support and sparse transformer support are unrelated changes." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "ASR support vs OCR model support; different modalities." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-TTS and LFM2.5 Audio are different models; not a duplicate." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "TTS support request vs sparse transformer architecture request." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 addition is unrelated to Qwen3-TTS support." + }, + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "EuroBert config addition vs DEIMv2; separate model requests." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "Different model families: NeoBERT vs ModernVBERT." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT text model request is unrelated to Voxtral TTS support." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBert config request is unrelated to Qwen3-TTS support." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "Generic BERT-like/RoPE discussion vs a specific TTS model support request." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Both are audio-related, but ASR vs TTS for different model families; not the same concrete change." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT model addition vs Qwen3-ASR support; different targets." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "ModernVBERT models and BERT-like models with RoPE are not the same request; one is specific, the other is a broad architecture discussion." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT models are unrelated to Qwen3-TTS support." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 13, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:42503", + "issue:45027" + ], + "soft_pairs": [ + "issue:42503|issue:45027" + ], + "prepared_review_unit_hash": "fcf04445269dcf344adc9b86a47051d33e0ecd82f77ee6760afeca428a5efc5e", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16006, + "estimated_input_tokens": 4002, + "estimated_eval_tokens": 8260 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1433, + "estimated_input_tokens": 359, + "estimated_eval_tokens": 974 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates: one requests adding ModernVBERT models, while the other requests support for Voxtral-4B-TTS-2603. They target different model families and different functionality (generic model addition vs TTS support).", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the two tickets describe different requests for different models and code paths, so neither should subsume the other.", + "canonical_pr_reason": null, + "best_issue_reason": "Neither issue is a good cluster canonical because they are unrelated; if forced to choose a representative, issue #45027 is the more concrete feature request, but it should not absorb #42503.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42503", + "right": "issue:45027", + "accept": false, + "reason": "Different underlying requests: ModernVBERT model addition vs Voxtral-4B-TTS-2603 support. They do not describe the same bug or change and should not be merged." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "adfee87fd10a6f609a0cf5f5dd8b7fccee791790523526507cf7bc368d336366", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These items are not duplicates; they are a loose cluster of unrelated model/config/tokenizer issues that only share generic ML-library similarity.", + "confidence": 0.96, + "canonical_issue_reason": "No canonical issue: the titles point to different bugs in different subsystems/models, not the same underlying problem.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong representative issue; 42936 is the most discussed, but it does not cover the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "Different model components and bug types: Llama4TextL2Norm vs ModernBERT config type hint." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Mask2Former ignore_value handling is unrelated to kwargs not being passed to a loss function." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "Mask2Former ignore_value bug is unrelated to tokenizer regex warning behavior." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "Different subsystems: Mask2Former loss masking vs a typo in a quantization attribute name." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Different issues in different models: Mask2Former ignore_value vs ModernBERT norm_eps typing." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "Loss kwargs plumbing and config type-hint correctness are unrelated bugs." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "ModernBERT config typing issue and tokenizer regex warning are separate problems." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "A config type-hint typo is unrelated to a quantization-operation attribute typo." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "2c3661ea485823452c8fda7c4dc3cc85d434aed57fd1b4466414ff0aadf1e892", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two issues are not duplicates: one requests a generic \"Set Block Decoding\" capability, while the other asks for \"DeepSeek v3.2 support.\" They describe different features and do not point to the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43037 is the better representative issue because it is more specific and actionable, naming a concrete model/version support request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Different requested features with no shared code path, target, or explicit linkage; not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "06fca2f379985d728236dd16de09c917f28e1c9b3756c5a128045ccccbbeb088", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate Trainer customizability issues: one about `remove_unused_column` behavior, the other about FLOPS estimation for custom trainers. They share a broad theme but not the same underlying bug or fix.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 43598 is the more concrete/feature-specific request, focused on revisiting `remove_unused_column` in `Trainer` to improve customizability.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43598 is the best issue anchor because it is narrowly scoped and describes a specific Trainer behavior change request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Rejected: the titles and likely problems are different. One concerns `remove_unused_column` customization, while the other concerns FLOPS estimation for custom trainers; they do not describe the same bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "c228be1521990ed314b34580fe1c9670fddaf80e7c0c2225989e6d136e529a82", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are both model-registration support issues, but they target different model families and different AutoModel entry points: Qwen3-Omni for AutoModel/AutoModelForConditionalGeneration vs. Qwen3.5 for AutoModelForSequenceClassification. They do not look like duplicates of the same underlying change.", + "confidence": 0.95, + "canonical_issue_reason": "Issue #43684 is the better canonical issue because it is broader and more directly about adding model registration support, with more discussion activity.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43684 is the best representative of the cluster since it covers the general registration gap and is the broader, more canonical support request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different models and different AutoModel heads; this is related support work, not the same bug/change. They would not plausibly be merged into one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "326262ddbfa7659213b0f549b61914c101946f82c629fc23b7c2f69999ecb1bc", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4266, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4266, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Three issues titled \"huggingface\" look like probable duplicates of each other, while \"Fett\" appears unrelated and should not be merged into that cluster. With no body details or explicit targets, only very weak triage is possible.", + "confidence": 0.39, + "canonical_issue_reason": "Issue 43845 is the earliest of the near-identical \"huggingface\" reports, so it is the safest placeholder canonical issue among the likely duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "43845 is the best issue candidate because it is one of the clearly similar reports and was created first; however, the lack of body content makes this only a tentative choice.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Titles are unrelated (\u0000b0Fett\u0000b0 vs \u0000b0huggingface\u0000b0), and there is no body or target evidence tying them to the same underlying issue." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "No concrete overlap beyond weak similarity; the titles do not indicate the same bug or request." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "\u0000b0Fett\u0000b0 does not plausibly refer to the same underlying issue as the \u0000b0huggingface\u0000b0 reports." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "These are both titled \u0000b0huggingface\u0000b0, but there is no description/target evidence to confirm they are the same underlying issue rather than separate reports." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "Same broad title is not enough to safely deduplicate without body details or explicit links." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "The available evidence is only title similarity; that's too weak to accept as the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "3c9726f83b8d85e5cb7d0cd87c486f7412e87becc21ffc47a643fdac9a8dd38a", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All items look like unrelated low-information/spam issues rather than duplicates of one concrete bug or request. The identical \"\" titles are not enough to treat them as the same underlying issue.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": false, + "reason": "Same spammy title, but no body detail, targets, or evidence of a shared underlying bug/change." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": false, + "reason": "Same spammy title, but these look like separate junk reports rather than one concrete issue." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "One is spam and the other is a generic documentation suggestion; no common problem statement." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "Spam title versus unrelated branded text; no indication they describe the same issue." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "Generic docs suggestion and spam are unrelated; no shared technical content." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Generic docs suggestion and spam are unrelated; not the same underlying request." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": false, + "reason": "Identical spam titles are not sufficient to merge as duplicates without any concrete shared bug or change description." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "Spam issue and unrelated marketing-style title; no common subject matter." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "Spam issue and unrelated marketing-style title; no evidence of the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "b0f2278ebda48cfb39cce3644c42d5871fcb0958bfdb4225404c4ac67b84ef31", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are very likely duplicates: they have identical titles, similar size, and a soft-similarity signal. #45145 is the better canonical issue because it is earlier and already has an inbound reference. ", + "confidence": 0.93, + "canonical_issue_reason": "#45145 is earlier and already has an inbound reference, so it is the best canonical thread for consolidating this likely duplicate report.", + "canonical_pr_reason": null, + "best_issue_reason": "#45145 is the best issue candidate because it is the earliest matching report and already has inbound discussion.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": true, + "reason": "The titles are identical and the soft-similarity signal is strong, so these appear to be likely duplicates even though the packet does not expose the full bodies." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The canonical choice is grounded (earlier issue with an inbound reference and identical title), but the duplicate judgment is overstated. The packet only shows exact title match plus a soft-similarity signal; it does not provide the bodies, so \"near-identical content/body\" and \"straightforward duplicate\" are stronger than the evidence supports. A more conservative wording would be that they are likely duplicates based on identical titles and similarity signals." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 13, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are the same missing feature request for Gemma 4 sequence classification support; the newer issue is more specific and better documented.", + "confidence": 0.97, + "canonical_issue_reason": "Issue #45373 is the clearest canonical issue because it precisely names the missing class (`Gemma4ForSequenceClassification`) and states the gap in the `gemma4` module.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45373 is the best representative: it is open, more detailed, and directly describes the exact API missing from Gemma 4.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-14", + "nodes": [ + "pull_request:39895", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45078", + "pull_request:45170", + "pull_request:45213", + "pull_request:45435", + "pull_request:45454", + "pull_request:45489" + ], + "soft_pairs": [ + "pull_request:43251|pull_request:43254", + "pull_request:45454|pull_request:45489", + "pull_request:44827|pull_request:45489", + "pull_request:42668|pull_request:43823", + "pull_request:42668|pull_request:45078", + "pull_request:45078|pull_request:45454", + "pull_request:43823|pull_request:45489", + "pull_request:45213|pull_request:45435", + "pull_request:43823|pull_request:45454", + "pull_request:43823|pull_request:45435", + "pull_request:44827|pull_request:45170", + "pull_request:43424|pull_request:44827", + "pull_request:43823|pull_request:43995", + "pull_request:43251|pull_request:43823", + "pull_request:43995|pull_request:45078", + "pull_request:45078|pull_request:45435", + "pull_request:43995|pull_request:45435", + "pull_request:43424|pull_request:45435", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:43823", + "pull_request:43995|pull_request:45213", + "pull_request:39895|pull_request:43424", + "pull_request:44827|pull_request:45213", + "pull_request:43995|pull_request:44827" + ], + "prepared_review_unit_hash": "0e3c5f39a143fc8545c980985ff4c6ceac70c4fa2dac26d4446a9ee58f303c0a", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 41382, + "estimated_input_tokens": 10346, + "estimated_eval_tokens": 20948 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 24, + "serialized_chars": 37962, + "estimated_input_tokens": 9491, + "estimated_eval_tokens": 19238 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs are mostly unrelated. The only clear duplicate pair is the two `fixed_cross_entropy` patches, which both add `weight` and `label_smoothing` support and forward them to `nn.functional.cross_entropy` in the same helper.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43251 is the best canonical representative for the loss fix: it has the explicit issue link, the more complete patch, and the broader discussion around passing kwargs through `fixed_cross_entropy`.", + "best_issue_reason": null, + "best_pr_reason": "PR 43251 is the strongest global canonical PR among these because it most directly addresses a specific bug, has the linked target issue, and cleanly represents the duplicate loss-helper change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Same underlying change in `src/transformers/loss/loss_utils.py`: both add `weight` and `label_smoothing` support to `fixed_cross_entropy` and pass them into `nn.functional.cross_entropy`." + }, + { + "left": "pull_request:45454", + "right": "pull_request:45489", + "accept": false, + "reason": "Both touch Gemma model code, but one is about text-only training samples in Gemma4; the other is about Gemma3n cache-sharing/attention behavior. Different bugs/changes." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45489", + "accept": false, + "reason": "Mistral4 test fixes and Gemma3n/Gemma4 cache-sharing refactoring are unrelated changes; shared ecosystem and model files are not enough to make them duplicates." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Processor auto-loading robustness vs adding a new MobileLLM model are unrelated PRs, even though both involve model infrastructure." + }, + { + "left": "pull_request:42668", + "right": "pull_request:45078", + "accept": false, + "reason": "One broadens processor/tokenizer loading behavior; the other changes tokenizer auto-selection error handling. Different code paths and bugs." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45454", + "accept": false, + "reason": "Tokenizer auto-selection logic and Gemma4 training-mask behavior are completely different areas of the codebase." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45489", + "accept": false, + "reason": "New MobileLLM model support and Gemma3n/Gemma4 cache-sharing alignment are unrelated model additions/refactors." + }, + { + "left": "pull_request:45213", + "right": "pull_request:45435", + "accept": false, + "reason": "A model-creation skill PR and a Whisper tokenizer bounds fix are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45454", + "accept": false, + "reason": "MobileLLM model addition and Gemma4 training-only sample handling are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45435", + "accept": false, + "reason": "MobileLLM model addition does not overlap with the Whisper special-token indexing bug fix." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 test fixes and a `layrnorm`/`layernorm` rename across CLIP-like models are different changes." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "Executorch dynamic-shape export tests and Mistral4 test fixes are unrelated test additions." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "Adding MobileLLM and refactoring Falcon output collection are unrelated model changes." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss helper kwargs support and MobileLLM model addition have no shared bug or code path." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45078", + "accept": false, + "reason": "Falcon output-collection refactoring and tokenizer auto-selection error handling are unrelated." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45435", + "accept": false, + "reason": "Tokenizer auto-selection behavior and Whisper tokenization bounds checking are distinct tokenizer bugs." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45435", + "accept": false, + "reason": "Falcon forward/output refactoring and Whisper special-token indexing are unrelated." + }, + { + "left": "pull_request:43424", + "right": "pull_request:45435", + "accept": false, + "reason": "Executorch export tests and Whisper tokenization fix do not address the same underlying issue." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "Cross-entropy kwargs support and Mistral4 test fixes are unrelated." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "Executorch dynamic-shape test coverage and MobileLLM model addition are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Falcon standardized-output refactor and the model-creation skill draft are unrelated." + }, + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "VideoPrism model/docs addition and Executorch export tests are unrelated additions." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45213", + "accept": false, + "reason": "Mistral4 test fixes and the model-creation skill draft are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon output-collection refactor and Mistral4 test fixes are different model-specific changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-14", + "nodes": [ + "pull_request:43823", + "pull_request:44827", + "pull_request:45078", + "pull_request:45213" + ], + "soft_pairs": [ + "pull_request:43823|pull_request:44827", + "pull_request:43823|pull_request:45213", + "pull_request:45078|pull_request:45213", + "pull_request:43823|pull_request:45078" + ], + "prepared_review_unit_hash": "9550ced32705c66e6f2fd580ac700e34c207e21ac6d896f0fe4fa9351bf50e4e", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 41382, + "estimated_input_tokens": 10346, + "estimated_eval_tokens": 20948 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 4, + "serialized_chars": 10285, + "estimated_input_tokens": 2572, + "estimated_eval_tokens": 5400 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These four PRs are not duplicates: one adds MobileLLM, one fixes Mistral4 tests and grouped-mm behavior, one changes tokenizer auto-selection/error handling, and one is a large draft model-creation skill/tooling PR.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR 43823 is the most self-contained substantive code addition with model files and tests, but it is unrelated to the other PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "Different changes and code paths: new MobileLLM model vs Mistral4 test fix and grouped_mm contiguity tweak." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "MobileLLM model implementation is unrelated to the model-creation skill/tooling and docs workflow PR." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45213", + "accept": false, + "reason": "Tokenizer auto-loading logic change is a separate runtime behavior fix, not the model-creation skill or scaffolding PR." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45078", + "accept": false, + "reason": "New model implementation vs tokenizer_auto conversion/error handling; they do not fix the same underlying bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "6258f3819f6acfa2fdab78d7e7a3247eaeb585d87fb5e74413ff5b199f334567", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs are VibeVoice-related, but they are not the same change: #40546 is the base VibeVoice implementation, while #42781 adds a separate realtime extension and acoustic tokenizer work. The overlap is mostly shared subsystem files, not a single concrete fix/change.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "#40546 is the primary, non-draft VibeVoice implementation with substantial discussion and broader initial coverage; it is the better anchor PR for this cluster.", + "best_issue_reason": null, + "best_pr_reason": "#40546 best represents the cluster because it is the main implementation PR, whereas #42781 is a draft follow-up for realtime support and is narrower/specialized.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "Shared VibeVoice files and auto-registry edits indicate related area, but #42781 adds realtime/acoustic-tokenizer functionality and generation changes rather than fixing or implementing the same concrete code path as #40546. They would not plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "ca08cfafffb81953b0a8dd69f72b038e36d33eb0ee0c1977f4bceb0c4fd8a2ea", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4674, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4674, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two PRs are unrelated: one adds TimesFM covariate forecasting support, while the other adds DINOv3 image classification support. They do not fix the same bug or implement the same change, so this is not a duplicate cluster.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "No single best PR for deduplication: the items target different model families, different files, and different user-facing capabilities.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Different features in different subsystems: TimesFM covariate forecasting vs. DINOv3 image classification. No shared code-path problem or mergeable overlap." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "bf853edcac36b9d46727ab7091e6d4edc2525d28b28d3cc612346441fafe02a8", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests are the same feature family: adding DEIMv2 support to Transformers. PR 44339 is the more complete and later integration, while PR 41356 is an earlier/basic scaffold for the same model addition.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "pull_request:44339 is the better canonical PR because it is the fuller DEIMv2 integration (model, loss, auto mappings, conversion script, modular code, and tests) and appears to supersede the earlier partial implementation.", + "best_issue_reason": null, + "best_pr_reason": "pull_request:44339 is the strongest representative for the cluster since it contains the most complete implementation path for adding DEIMv2 and covers the same underlying change as the earlier PR.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": true, + "reason": "Yes: both PRs implement DEIMv2 support for the same model addition; 44339 is a superset/follow-up of the earlier 41356 scaffold and could plausibly be merged as one feature PR." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target the same issue (41211) and add DEIMv2 support, with 44339 clearly covering a broader implementation surface than 41356. The soft-edge verdict is conservative enough, since the overlap is real and the two changes appear to be successive parts of the same feature rather than unrelated work." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "73cb19002c31055ec111d538563a22c523e9281928f2d85f19cf165b9e871102", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two PRs are both new-model integration patches, but they add different models (DeepSeek OCR vs Isaac) and do not fix the same underlying bug/change. The shared files are mostly generic Transformers scaffolding, so this is not a duplicate pair.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR 41797 is the better representative artifact for the cluster because it is a complete model-addition PR with broad integration coverage and substantial review context, but it is still unrelated to PR 45186.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Different model additions with distinct codepaths and artifacts; overlap is limited to shared framework registration/docs boilerplate, so they are not the same change and should not be merged as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42166-2", + "nodes": [ + "pull_request:42166", + "pull_request:45426" + ], + "soft_pairs": [ + "pull_request:42166|pull_request:45426" + ], + "prepared_review_unit_hash": "c44458b8a8a9be4fe66685cd762a1528157fa896c200c396e7af829c061a9514", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5407, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5407, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two unrelated model-addition PRs that only overlap in shared auto-registration plumbing. They do not appear to fix the same bug or implement the same change, so the soft duplicate link should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: #42166 adds InternVLFlash, while #45426 adds AXK1; they are distinct feature PRs with different model implementations and docs.", + "best_issue_reason": null, + "best_pr_reason": "#42166 is the better representative PR for the cluster only because it is larger and more mature, but it is not a duplicate of #45426.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42166", + "right": "pull_request:45426", + "accept": false, + "reason": "Shared transformer registry files are incidental; the PRs add different models (InternVLFlash vs AXK1) with different code paths and cannot plausibly be merged as one fix/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42227-4", + "nodes": [ + "pull_request:42227", + "pull_request:43291", + "pull_request:43665", + "pull_request:44815" + ], + "soft_pairs": [ + "pull_request:42227|pull_request:43291", + "pull_request:42227|pull_request:43665", + "pull_request:42227|pull_request:44815", + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "af869772927016408959bba30c7230e0e7560e2048c770e9a1f3ae5f753d376b", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10167, + "estimated_input_tokens": 2542, + "estimated_eval_tokens": 5340 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10167, + "estimated_input_tokens": 2542, + "estimated_eval_tokens": 5340 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is not a duplicate set: it mixes a Whisper ASR bugfix, a Whisper tokenizer/test change, a CLIP/ViT test-only diff, and an FP8 dequant/model-loading change. They do not look like the same underlying bug or change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR fits the cluster: the items touch different code paths and purposes (Whisper ASR return_language plumbing, Whisper tokenizer timestamp decoding/tests, unrelated CLIP/ViT test edits, and FP8 dequant/model-loading changes).", + "best_issue_reason": null, + "best_pr_reason": "PR 42227 is the strongest standalone fix to keep as representative because it contains a concrete runtime bugfix with an explicit issue target; the others are either test-only, unrelated, or broader refactors.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42227", + "right": "pull_request:43291", + "accept": false, + "reason": "Both are Whisper-related, but they fix different paths: 42227 plumbs return_language through the ASR pipeline, while 43291 changes tokenizer decode/timestamp handling and tests. Not the same change." + }, + { + "left": "pull_request:42227", + "right": "pull_request:43665", + "accept": false, + "reason": "Unrelated changes: 42227 is Whisper ASR logic, while 43665 is test-only edits for CLIP/ViT. Not the same bug or fix." + }, + { + "left": "pull_request:42227", + "right": "pull_request:44815", + "accept": false, + "reason": "Different subsystems and problems: Whisper ASR language return vs FP8 dequant/model-loading work. Not mergeable as one PR." + }, + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "Whisper tokenizer behavior vs CLIP/ViT tests; different code paths and no shared underlying bug." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "Whisper tokenizer decoding changes are unrelated to FP8 dequant/model-loading fixes." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "Test-only CLIP/ViT edits are unrelated to the FP8 dequant/model-loading change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "60617b591df0c371a7ec5114a7742ef002d411e8dd0d437e1219354434e3ae26", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two unrelated maintenance PRs that both update GitHub Actions versions across workflows, but they target different actions and different scopes/motivations. They are similar in theme, not the same underlying change.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #42975 is the more representative cluster member because it broadly upgrades multiple GitHub Actions to Node 24-compatible versions across many workflows, matching the cluster\u2019s shared maintenance theme.", + "best_issue_reason": null, + "best_pr_reason": "PR #42975 is the best single representative of the cluster because it is the broader, more general GitHub Actions compatibility upgrade; #42976 is a narrower version bump focused mainly on docker/build-push-action and a few related workflow updates.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "Both are CI maintenance PRs, but they update different third-party actions (Node 24 compatibility vs latest docker/build-push-action versions) and do not appear to fix the same concrete problem or be mergeable into one PR as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-4", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451", + "pull_request:45490" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:42978|pull_request:45490", + "pull_request:43448|pull_request:43451", + "pull_request:43448|pull_request:45490", + "pull_request:43451|pull_request:45490" + ], + "prepared_review_unit_hash": "a9cb13311be3ecb2735116917f14c051e4d842f81e9f9df85f0ae86d04a68992", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 14136, + "estimated_input_tokens": 3534, + "estimated_eval_tokens": 7324 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 14136, + "estimated_input_tokens": 3534, + "estimated_eval_tokens": 7324 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All four PRs are separate model-addition changes, not duplicates: ViT NEPA, Molmo, Molmo2, and CTSM. The overlap is limited to shared boilerplate areas like docs, auto-registry mappings, and test scaffolding.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: each PR introduces a different model family and distinct implementation files, so they should not be merged as one underlying change.", + "best_issue_reason": null, + "best_pr_reason": "No single best PR for duplicate triage; none of these is a duplicate of the others, and the shared files are generic model-registration/docs plumbing rather than the same code-path fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "ViT NEPA vs Molmo are different model additions with different configs/modeling/tests; shared registry/docs files are boilerplate only." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "ViT NEPA and Molmo2 are distinct model families with separate source trees; no same underlying bug/change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:45490", + "accept": false, + "reason": "ViT NEPA and CTSM add unrelated models; overlap is limited to generic docs/auto-mapping updates." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are related by name but implement different model families and codepaths; not mergeable as one PR." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo and CTSM are separate model additions; shared changes are only broad integration boilerplate." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo2 and CTSM add different models with different implementation files; not the same concrete change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-3", + "nodes": [ + "pull_request:43488", + "pull_request:43532", + "pull_request:45476" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532", + "pull_request:43488|pull_request:45476", + "pull_request:43532|pull_request:45476" + ], + "prepared_review_unit_hash": "598280c219cb888b3aa248bab5b45f510020632848474e2671034e1feaf3725f", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7469, + "estimated_input_tokens": 1868, + "estimated_eval_tokens": 3992 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7469, + "estimated_input_tokens": 1868, + "estimated_eval_tokens": 3992 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three pull requests are unrelated test/CI or placeholder changes; they do not appear to fix the same underlying bug or implement the same change. The shared Vit test file is incidental and not enough to merge any pair.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "Different changesets: one alters setup/versioning and a Vit test, the other rewires self-comment CI workflows and testing utilities. Shared filename alone is incidental." + }, + { + "left": "pull_request:43488", + "right": "pull_request:45476", + "accept": false, + "reason": "Unrelated purposes: one is a repo-bot/version formatting check, the other adds a PR CI workflow and unrelated code/test edits. No common concrete bug or fix." + }, + { + "left": "pull_request:43532", + "right": "pull_request:45476", + "accept": false, + "reason": "Both touch CI/test areas, but they are not the same code-path problem. The workflow rewrites and added dummy edits are distinct and would not plausibly be merged as one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "4a48926e4e2e05016c6e89b018607273646819f9d25b7f41bd77599a14767b8e", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6082, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6082, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related only at the tooling/docs level; they add different pipelines for different tasks and should not be deduplicated.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR: the two pull requests implement different pipeline features (concept segmentation vs visual segmentation) with different model/test surfaces.", + "best_issue_reason": null, + "best_pr_reason": "PR 43613 is the broader and more complete pipeline addition, touching more model integrations and tests, so it is the better representative of the cluster even though it is not a duplicate of 43612.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Different concrete changes: one adds Promptable Concept Segmentation, the other Promptable Visual Segmentation. Shared docs/pipeline plumbing is too broad to treat as the same fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "0085a96816bcdd342f6b2ba4f2084924e960065c7e6d22dc441574e0b5ca7b57", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are small trainer.py PRs, but they address different code paths: gradient-accumulation loss scaling vs dataloader signature-column filtering. They should not be deduplicated.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR cleanly represents the cluster as a duplicate set; 43651 and 43663 change unrelated Trainer behaviors despite both touching trainer.py.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a strong canonical representative for the pair. If one must be chosen, 43651 is the more self-contained behavior fix, but it is not a duplicate of 43663.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Different underlying changes: 43651 alters loss scaling during gradient accumulation, while 43663 adds a hook for signature-column filtering. Same file/subsystem is not enough to treat them as the same fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "1ea3ba19090d629f59021f2199adc50692738eaa6c65ef7f4687326c0123a4a2", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs touch `src/transformers/cli/serve.py` and the CLI\u2019s TypeAdapter/runtime typing problem, but they are not all the same fix. PR 43842 is the most focused patch for the actual import-time NameError; 43836 looks like a broader alternative workaround in the same area; 43656 appears to be a separate, broader typing cleanup despite the similar title.", + "confidence": 0.69, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43842 is the best canonical PR: it directly targets the TypeAdapter NameError in the CLI with the smallest, clearest change set and the strongest issue linkage.", + "best_issue_reason": null, + "best_pr_reason": "PR 43842 is the best overall fit because it is the most narrowly scoped and directly addresses the concrete runtime NameError without introducing broader unrelated typing changes.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": false, + "reason": "Both mention the CLI TypeAdapter error, but 43656 is a broader typing/`__future__` cleanup tied to a different issue target and does not clearly look like the same concrete fix as 43842." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "Both target the same serve.py import-time TypeAdapter NameError in the CLI, and the fixes are alternative ways of deferring annotation evaluation in the same code path." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays appropriately conservative. The rejection of 43656 vs 43842 is justified by the different issue target and the fact that the diff preview suggests a broader typing cleanup rather than the same concrete fix. The acceptance of 43836 vs 43842 is also reasonable: both target the same issue in the same file and appear to be alternative ways of fixing the same import-time TypeAdapter problem." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43757-3", + "nodes": [ + "pull_request:43757", + "pull_request:45500", + "pull_request:45506" + ], + "soft_pairs": [ + "pull_request:45500|pull_request:45506", + "pull_request:43757|pull_request:45500", + "pull_request:43757|pull_request:45506" + ], + "prepared_review_unit_hash": "01c33bc963e7f0ba36ed7a427db39f572861f21fabed6710902203b682053d7b", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7783, + "estimated_input_tokens": 1946, + "estimated_eval_tokens": 4148 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7783, + "estimated_input_tokens": 1946, + "estimated_eval_tokens": 4148 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "45500 and 45506 are near-identical duplicate PRs for GPT-OSS GGUF loading support; 45506 is the newer copy. 43757 is related but only a narrower fallback patch, not the same full change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "45506 is the latest duplicate of the full GPT-OSS GGUF loading support PR, with the same title, files, and issue target as 45500.", + "best_issue_reason": null, + "best_pr_reason": "45506 is the best representative because it is the newest identical full-support PR and includes the complete code/docs/tests update, not just the earlier fallback workaround.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45500", + "right": "pull_request:45506", + "accept": true, + "reason": "Same PR content: identical title, same files, same issue target, and effectively the same GGUF GPT-OSS support change; 45506 is just the newer duplicate." + }, + { + "left": "pull_request:43757", + "right": "pull_request:45500", + "accept": false, + "reason": "43757 is only a small fallback to avoid hard failure for gpt-oss GGUF, while 45500 adds full loading support plus docs/tests; they are related but not the same concrete change." + }, + { + "left": "pull_request:43757", + "right": "pull_request:45506", + "accept": false, + "reason": "Same as 45500: 43757 is a partial workaround, not the same full GPT-OSS GGUF support PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs target the same SwanLabCallback enhancement in the same file and appear to implement the same underlying run-resume/id support for SwanLab init. PR 43816 looks like the more complete formulation, with explicit parameter docs plus the code-path change.", + "confidence": 0.79, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43816 is the better canonical representative because it more clearly and completely describes the same SwanLab resume/id feature, including documentation for the new `SWANLAB_RUN_ID` and `SWANLAB_RESUME` parameters alongside the integration change.", + "best_issue_reason": null, + "best_pr_reason": "PR 43816 is the strongest overall candidate: it targets the same issue, touches the same code path, and appears to be the more complete implementation of the SwanLab `id`/`resume` support.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": true, + "reason": "Same underlying SwanLabCallback change: both modify `integration_utils.py` to support `id`/`resume`-style run resumption, share the same issue target, and could plausibly be merged into one PR." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The packet supports the analyst\u2019s conclusion: both PRs target issue 43698, touch the same file, and are clearly about the same SwanLab `id`/`resume` resume-support bugfix. The soft-edge mergeability verdict is conservative enough here. The only caveat is that PR 43816\u2019s preview is truncated, so the \u201cmore complete\u201d claim should be treated as tentative, but it is still grounded by the titles and diff previews." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43838-3", + "nodes": [ + "pull_request:43838", + "pull_request:43973", + "pull_request:45283" + ], + "soft_pairs": [ + "pull_request:43838|pull_request:45283", + "pull_request:43973|pull_request:45283" + ], + "prepared_review_unit_hash": "be2c93a177e9dfc00b51bc0ba9b57f20502e1545c380122387f4dc914c3770ba", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 8699, + "estimated_input_tokens": 2175, + "estimated_eval_tokens": 4606 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 8699, + "estimated_input_tokens": 2175, + "estimated_eval_tokens": 4606 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three separate feature PRs: Qwen3-ASR support, Lfm2Audio support, and Qwen3.5 GGUF loading. They share only loose naming/subsystem similarity and do not look like the same underlying change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43838 is the most representative item only because it is the largest, most developed PR and has an explicit issue link, but it is still a distinct Qwen3-ASR feature PR, not a duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 43838 is the best single representative for the cluster due to its scope, review activity, and explicit tracking issue, but it should not be merged with the other two PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43838", + "right": "pull_request:45283", + "accept": false, + "reason": "Qwen3-ASR model support versus Qwen3.5 GGUF loading are different changes touching different code paths; shared Qwen naming is not enough to treat them as the same PR." + }, + { + "left": "pull_request:43973", + "right": "pull_request:45283", + "accept": false, + "reason": "Lfm2Audio model integration and Qwen3.5 GGUF loading are unrelated feature additions in different subsystems and are not plausibly one merged fix/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:44007", + "pull_request:44013", + "pull_request:44018", + "pull_request:44019", + "pull_request:44044", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44085", + "pull_request:44086", + "pull_request:44116", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44066|pull_request:44071", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44071|pull_request:44085" + ], + "prepared_review_unit_hash": "60eb5f5e06020335410adf6c9e23b5e5831db61c1b09ee926747a29ff899ce93", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 24, + "serialized_chars": 34769, + "estimated_input_tokens": 8693, + "estimated_eval_tokens": 17642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a wave of model-specific output-tracing refactors. I only see a couple of near-duplicate pairs within the same model family; most other pairs are just related by the shared umbrella issue or refactor theme, not the same concrete change.", + "confidence": 0.83, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44066 is the clearest representative: it has an explicit refactor scope, an issue link, and a concrete decorator-based output-tracing conversion across GPT-J/CodeGen.", + "best_issue_reason": null, + "best_pr_reason": "PR 44066 is the best overall exemplar of the cluster because it is issue-linked, concrete, and more representative than the narrower per-model refactors.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and files; same umbrella output-tracing theme is not enough to make these the same change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "CVT/FNet refactor vs DeBERTa refactor; only the general tracing pattern overlaps." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": false, + "reason": "Both touch GPT-J-style output tracing, but the evidence is too weak to treat them as the same concrete PR-level change." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "ResNet-family refactor vs EfficientNet refactor; shared issue target does not imply duplicate code change." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "EfficientNet and GPT-J are different model implementations, so these are not the same underlying fix." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "Different model families and different code paths; only the standardized output-tracing pattern matches." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "MobileNetV2 vs DeBERTa; same refactor style, but not the same bug or change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J/CodeGen refactor versus MGP-STR refactor; no evidence of the same concrete change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "Different model stack and different files; this is only broad similarity around output capture." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Same model, same file, and very similar GPT-Neo output-tracing decorator refactor; these look like duplicate implementations of the same change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-J/CodeGen refactor and GPT-Neo refactor are different model-specific changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44066", + "accept": false, + "reason": "GPT-Neo versus GPT-J/CodeGen; they share the umbrella refactor theme but not the same code path." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet output-tracing refactor and GPT-J output-tracing refactor are different concrete changes." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "MGP-STR and GPT-J are unrelated model implementations for duplicate-triage purposes." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet refactor versus a GPT-J-related refactor; not the same underlying bug/change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are different model code paths, so these are not duplicates." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo refactor versus Flaubert refactor; only the output-tracing mechanism is shared." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo and GPT-J are separate model implementations, so these are not the same concrete change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo versus Flaubert; this is broad architectural similarity, not duplicate content." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo and MGP-STR are different model-specific refactors." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and no strong evidence that the exact same implementation change is being made." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44085", + "accept": false, + "reason": "MPT and GPT-J are unrelated concrete refactors despite both being about output tracing." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44017", + "pull_request:44018", + "pull_request:44056", + "pull_request:44059", + "pull_request:44068", + "pull_request:44071", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:44068|pull_request:44086", + "pull_request:44059|pull_request:44086", + "pull_request:44059|pull_request:44722", + "pull_request:44085|pull_request:44086", + "pull_request:44098|pull_request:44101", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:44059|pull_request:44071", + "pull_request:43997|pull_request:44073", + "pull_request:44017|pull_request:44071", + "pull_request:44059|pull_request:44068", + "pull_request:44073|pull_request:44101", + "pull_request:44018|pull_request:44059", + "pull_request:44056|pull_request:44101", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:43997|pull_request:44017", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101" + ], + "prepared_review_unit_hash": "46eec002f913899a4b70e884219c51975a8aaf8827132ae7314272fb1244b899", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 16, + "item_count": 16, + "soft_pair_count": 24, + "serialized_chars": 38236, + "estimated_input_tokens": 9559, + "estimated_eval_tokens": 19374 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly separate model-specific refactors to standardized output tracing/capture_outputs, not duplicates of one another. The only real commonality is the refactor pattern; the concrete codepaths and model files differ, so all soft-edge candidate merges should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR is the same underlying change as the others; they are distinct per-model refactors. If one must be chosen as the representative PR, 44161 is the clearest because it explicitly targets the shared output-tracing migration and is the most broadly framed of the set.", + "best_issue_reason": null, + "best_pr_reason": "44161 is the best representative PR for the cluster because it explicitly references the shared decorator-based output-tracing migration, has the broadest refactor scope, and aligns with the common theme across the other PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44068", + "right": "pull_request:44086", + "accept": false, + "reason": "Different models and codepaths (GPT-Neo vs MGP-STR); same refactor style only, not the same change." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-2 vs MGP-STR are unrelated model implementations; both mention output tracing but fix different forward paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44722", + "accept": false, + "reason": "Both touch output tracing, but one is GPT-2 and the other is GPT-J/CodeGen; separate model-specific refactors." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J and MGP-STR are different architectures with different forward logic; not the same underlying bug or change." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT vs Flaubert/XLM are different model families; these are parallel refactors, not duplicates." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are distinct codepaths; similar standardized-output cleanup, but not one mergeable duplicate PR." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44722", + "accept": false, + "reason": "SegFormer output tracing refactor is unrelated to GPT-J/CodeGen implementation details." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44161", + "accept": false, + "reason": "SegFormer and LongT5 are different models; same infrastructure pattern, different concrete changes." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44073", + "accept": false, + "reason": "SegFormer vs VisualBert targets different forward/capture paths; not the same underlying fix." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are separate model implementations; the refactors are analogous but not duplicates." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44086", + "accept": false, + "reason": "MPT and MGP-STR touch different model-specific output handling; no shared concrete bug." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-2 and MPT are different models; only the refactor theme matches." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44073", + "accept": false, + "reason": "RegNet and VisualBert have different output structures and files; these are separate standardized-output ports." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44071", + "accept": false, + "reason": "SegFormer and MPT refactors affect different model internals and output capture paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-2 vs GPT-Neo are distinct model files and forward implementations; not mergeable as one PR." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44101", + "accept": false, + "reason": "VisualBert and Flaubert/XLM are unrelated model families; same pattern, different changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44059", + "accept": false, + "reason": "GPT-Neo and GPT-2 are different model codepaths; these are parallel refactors, not duplicates." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44101", + "accept": false, + "reason": "MPNet and Flaubert/XLM are different model implementations; no evidence of the same bug or patch." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert and LongT5 are unrelated model-specific output-tracing refactors." + }, + { + "left": "pull_request:44101", + "right": "pull_request:44161", + "accept": false, + "reason": "Flaubert/XLM and LongT5 are different model families; same standardized-output theme only." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44017", + "accept": false, + "reason": "RegNet and SegFormer are separate vision model implementations with different output-capture behavior." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet and MPNet are unrelated models; these are not the same underlying change." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44076", + "accept": false, + "reason": "MPT and ImageGPT are different model files and output paths; similar refactor pattern only." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet and Flaubert/XLM are different architectures; no shared concrete fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 3, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44017", + "pull_request:44018", + "pull_request:44044", + "pull_request:44056", + "pull_request:44066", + "pull_request:44071", + "pull_request:44073", + "pull_request:44074", + "pull_request:44085", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44018", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154" + ], + "prepared_review_unit_hash": "0bd87223b3bd5fc5bafd82823b0d52d503a0971cfa1ba04def4f32e58cd949e0", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 23, + "serialized_chars": 40366, + "estimated_input_tokens": 10092, + "estimated_eval_tokens": 20440 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of output-tracing refactor PRs across different model families. Most are related by the same broader initiative, but they are not duplicates because they change different model code paths. The only clear duplicate-like overlap is among the GPT-J/CodeGen PRs.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44066 is the best representative: it directly targets #43979, covers the central GPT-J/CodeGen output-tracing refactor, and appears to be the earliest complete version in this set.", + "best_issue_reason": null, + "best_pr_reason": "44066 is the strongest global PR choice because it is the most central and complete match to the cluster\u2019s underlying refactor theme.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "Both are output-tracing refactors, but for different models (MPNet vs LongT5) and different code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet and GPT-Neo are unrelated model implementations; same broad refactor theme is not enough." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "Different model families and different forward paths; not the same underlying change." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet and LongT5 are separate model refactors, not one shared concrete bug fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44071", + "accept": false, + "reason": "RegNet and MPT are different code paths; only the output-tracing theme overlaps." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBert and MPT are unrelated model-specific changes." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "SqueezeBert and VisualBert refactors are not the same underlying change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-Neo and SpeechT5 touch different architectures and output code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44074", + "accept": false, + "reason": "RegNet and TextNet are distinct model-specific output refactors." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44129", + "accept": false, + "reason": "Different models and different implementation details; not mergeable as one fix." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44018", + "accept": false, + "reason": "Different models and separate decorator migrations; not the same concrete bug." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44129", + "accept": false, + "reason": "MPT and SpeechT5 are unrelated changes despite the common refactor pattern." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44161", + "accept": false, + "reason": "CVT/FNet versus LongT5 are different model code paths, so this is not a duplicate." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "CVT/FNet and SegFormer are separate refactors across different model families." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44044", + "accept": false, + "reason": "SqueezeBert and DeBERTa-v2 are unrelated model-specific output changes." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44074", + "accept": false, + "reason": "DeBERTa-v2 and TextNet do not share the same concrete implementation change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44722", + "accept": true, + "reason": "Both target the GPT-J/CodeGen output-tracing refactor for #43979 and touch the same code paths." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44722", + "accept": true, + "reason": "Both are GPT-J output-tracing refactors on the same underlying code path, so they are plausible duplicates/merge candidates." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-J and SpeechT5 are different architectures and unrelated fixes." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44007", + "accept": false, + "reason": "CVT/FNet versus RegNet/ResNet/RT-DETR are different model families and separate refactors." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "Different model families and different output paths; not the same change." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44129", + "accept": false, + "reason": "DeBERTa-v2 and SpeechT5 do not share the same concrete bug or patch." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44154", + "accept": false, + "reason": "DeBERTa-v2 and VITS are separate model-specific changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 4, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44024", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44024|pull_request:44076", + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "5ea2a279e80ab312807e208b5c76a3c0dfba1175efe7b4d5cd80e05cc01ba1df", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17126, + "estimated_input_tokens": 4282, + "estimated_eval_tokens": 8820 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a set of model-specific refactors to standardized output tracing/capture_outputs across unrelated architectures, not duplicate fixes for the same underlying code path.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44073 is the clearest representative of the cluster: its title matches the migration theme, and the diff shows concrete output-tracing refactoring with capture_outputs/can_return_tuple plus model-specific hook mapping.", + "best_issue_reason": null, + "best_pr_reason": "PR 44073 is the best overall representative because it is clearly scoped, self-explanatory, and directly implements the standardized output-tracing migration in a concrete model file.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Different models (FocalNet vs ImageGPT) and separate model-specific output-capture migrations; same umbrella pattern, but not the same concrete change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "VisualBert and TextNet are different architectures with distinct implementation details; both refactor output tracing, but they are not the same code-path fix." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "MPNet and VisualBert each add their own output-recording hooks and decorator usage; similar refactor theme, but no shared concrete bug or mergeable single PR." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "MPNet vs TextNet are unrelated model implementations; the diffs are separate output-tracing refactors, not duplicates." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert touch different model families and different output-handling code paths; same migration style, not the same change." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "SqueezeBert and SpeechT5 are unrelated model code paths; despite both involving output tracing, they are distinct refactors and should not be merged as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44003", + "pull_request:44001|pull_request:44003", + "pull_request:44002|pull_request:44003", + "pull_request:44000|pull_request:44003", + "pull_request:44003|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:43998|pull_request:44001", + "pull_request:44026|pull_request:44029", + "pull_request:44029|pull_request:44030", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44027|pull_request:44028", + "pull_request:44001|pull_request:44002", + "pull_request:44000|pull_request:44001" + ], + "prepared_review_unit_hash": "6b42c5fd153c729b9730320dec6b8ac7cf0f10ebf71e14d4195ba269a94e2eee", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 42, + "serialized_chars": 40936, + "estimated_input_tokens": 10234, + "estimated_eval_tokens": 20724 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 24, + "serialized_chars": 32101, + "estimated_input_tokens": 8026, + "estimated_eval_tokens": 16308 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a set of similarly named but separate PRs that all refactor output tracing for different model families. They share a theme, but they are not the same underlying change and should not be deduplicated together.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "No issue is present; as a representative PR, #44003 is the clearest anchor because it shows the common output-tracing refactor pattern across a related model family and has a broader, multi-file change than most others.", + "best_issue_reason": null, + "best_pr_reason": "#44003 is the best single PR representative for the cluster theme, but it is still only one model-family refactor rather than a superset of the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "Both are output-tracing refactors, but they touch different model code paths (`timm_backbone` vs `falcon_mamba`/`mamba`) and are not a single mergeable change." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "Same broad refactor theme, but `univnet` and `mamba` are unrelated implementations with different forward signatures and helper usage." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "`upernet` and `mamba` are different architectures; the similarity is only in the output-tracing refactor pattern, not the concrete bug/change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "These modify different model families (`vision_text_dual_encoder` vs `mamba`) and do not appear to fix the same code-path problem." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "`mamba`/`falcon_mamba` output tracing and `codegen` cache/output handling are distinct implementations, so this is not one combined change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44004", + "accept": false, + "reason": "Both are refactors around model outputs, but they affect unrelated code paths (`vision_text_dual_encoder` vs `codegen`) and should stay separate." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "Different model families (`mobilenet_v1` vs `univnet`) and different output-capture mechanics; not the same underlying fix." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "`mobilenet_v1` and `upernet` are separate models with only a shared refactor style, not a shared concrete bug." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "These are separate output-tracing updates in different models (`mobilenet_v1` vs `depth_anything`/`prompt_depth_anything`), not one PR-sized change." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "`mobilenet_v1` and `vision_encoder_decoder` are unrelated code paths; the overlap is only in the refactoring theme." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "`mobilenet_v1` and `speech_encoder_decoder` are distinct model implementations, so they are not duplicates." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "`timm_backbone` and `upernet` both live in vision, but the patch targets different classes and forward/output plumbing, so they are not the same change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "`timm_backbone` and `vision_text_dual_encoder` have different model contracts and output paths; this is only a superficial similarity." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "These are separate model refactors (`timm_backbone` vs `univnet`) with no shared concrete code-path bug." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "`vision_encoder_decoder` and `rwkv` are unrelated architectures; the PRs do not look mergeable into one concrete fix." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "`rwkv` and `dpr` change different model internals and output signatures, so they are not the same underlying bug/change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "Both are output-tracing refactors, but they target different encoder/decoder and depth models; the code paths are separate." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "`depth_anything` and `speech_encoder_decoder` are unrelated implementations, so this is not a duplicate change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "`depth_anything` and `superpoint` both update output handling, but they are distinct model families and not one underlying fix." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "`vision_encoder_decoder` and `speech_encoder_decoder` are separate encoder-decoder stacks; same theme, different code paths." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "`vision_encoder_decoder` and `superpoint` are unrelated models with different forward logic, so they should not be merged as duplicates." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "`speech_encoder_decoder` and `superpoint` are different architectures with no shared concrete patch target." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "`univnet` and `upernet` share only the refactor motif; their model internals and output contracts are different." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "`vision_text_dual_encoder` and `univnet` are unrelated implementations and not the same bug fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:44000|pull_request:44002", + "pull_request:43998|pull_request:44004", + "pull_request:43998|pull_request:44029", + "pull_request:44002|pull_request:44029", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44001|pull_request:44025", + "pull_request:44001|pull_request:44026", + "pull_request:44002|pull_request:44025", + "pull_request:44000|pull_request:44025", + "pull_request:44000|pull_request:44026" + ], + "prepared_review_unit_hash": "d6e4727c76c17e9ac429ab7a9b4fed50a1888231983726aff88d4426ab4aba22", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 42, + "serialized_chars": 40936, + "estimated_input_tokens": 10234, + "estimated_eval_tokens": 20724 + }, + "budget": { + "node_count": 11, + "item_count": 11, + "soft_pair_count": 18, + "serialized_chars": 26343, + "estimated_input_tokens": 6586, + "estimated_eval_tokens": 13428 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are separate model-specific output-tracing refactors with similar wording, but they do not appear to be duplicate fixes or mergeable into one PR. The shared theme is broad; the concrete code paths and files differ across models.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR is a true duplicate target for the cluster: each PR updates a different model implementation and forward signature. If one must be chosen as representative, PR 44025 is the broadest example because it spans two related model files, but it is still not a canonical duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 44025 is the best representative PR in this set because it is a clean, self-contained output-tracing refactor affecting two closely related files. That said, the cluster is not a duplicate set, so this is only a representative choice, not a merge target.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Same refactor theme, but different model families and different forward paths/files; not the same underlying change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "Both touch output-tracing logic, but they are unrelated model implementations with different APIs and code paths." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "Different models and different implementation details; shared wording is too broad to treat as one change." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "Different subsystems and files; this is a similar refactor pattern, not the same bug or fix." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "Distinct models and forward signatures; they do not fix the same concrete code-path problem." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "Only the high-level refactor theme matches; the actual code paths and model targets differ." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "Unrelated models and files; no evidence they address the same underlying issue." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "Different model implementations with different forward behavior; not plausibly one merged PR." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "Different encoder-decoder and DPR code paths; this is not the same concrete fix." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model families and different output-tracing changes; too broad to merge as duplicates." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "Shared refactor wording only; the affected code and behavior are different." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model targets and implementation changes; not the same underlying change." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "These are separate model-specific refactors, not duplicate fixes." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "Different architectures and files; the overlap is only the output-tracing refactor pattern." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44026", + "accept": false, + "reason": "No shared concrete bug or fix path; just similar API cleanup across different models." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model code paths and outputs; not the same change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model families and different forward implementations; too dissimilar for duplicate merging." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44026", + "accept": false, + "reason": "Same style of refactor, but unrelated model targets and behavior." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "ab317a3058e004cc23c01a74311c78198d533eebb7988efc3933cd0e8336c847", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11073, + "estimated_input_tokens": 2769, + "estimated_eval_tokens": 5794 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11073, + "estimated_input_tokens": 2769, + "estimated_eval_tokens": 5794 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs are separate model-integrations with shared Hugging Face boilerplate, not duplicate bugfixes or the same concrete change. No pair should be merged as a duplicate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45075 is the strongest standalone representative: it is a complete model add, non-draft, and has the most review activity among the three, but it is not a duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR #45075 is the best representative PR in this cluster because it is the most mature and fully-scoped integration, with broader implementation coverage and more review feedback.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model integrations (PenguinVL vs Voxtral TTS). They share only generic auto-registration/docs scaffolding, not the same code-path or feature." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "Different model integrations (PenguinVL vs DeepSeek-OCR-2). Overlap is limited to standard model boilerplate and registry updates, so they are not the same change." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "Different models and different implementation targets (DeepSeek-OCR-2 vs Voxtral TTS). Shared framework files are expected for new model additions, but the concrete changes are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "247a8b461d08abde754a61f6383060cbf593f427f418fd093c4a7f6b5ac240a0", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4408, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4408, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs are related to module/kernel fusion, but they are not the same concrete change: #44979 adds a new module fusion API and tests, while #45363 wires n-to-1 kernel fusion through `KernelConfig` and related integration points. They could land independently and are not a mergeable duplicate pair.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "#45363 is the more complete user-facing fusion feature, spanning `KernelConfig`, integration hooks, and an example; #44979 is a lower-level API foundation rather than the same end-to-end change.", + "best_issue_reason": null, + "best_pr_reason": "#45363 is the best representative of the cluster because it shows the concrete fusion workflow and broader integration surface, whereas #44979 is just the underlying API scaffold.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "Different layers of the fusion stack: one introduces the module fusion API, the other integrates n-to-1 kernel fusion via `KernelConfig`. Related, but not the same bug/fix or a single plausible merged PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "7d7c419fbad4f8ceeeae1af97c511ade8eb19d28975c021389c0640e201e0456", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are near-identical fixes for the same Mistral regex AttributeError in `tokenization_utils_tokenizers.py`, both targeting issue #45081. They patch the same code path and differ mainly in added tests and PR polish, so they should be clustered together.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45317 is the better representative: it contains the same core fix as #45086 plus test coverage and more review activity, making it the more complete implementation of the same bug fix.", + "best_issue_reason": null, + "best_pr_reason": "PR #45317 is the strongest cluster member for the underlying change because it implements the same fix with additional validation and appears to be the more mature version of the patch.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Same bug, same file, same issue target, and effectively the same code change fixing `_patch_mistral_regex`; the added tests in #45317 do not change the underlying fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "3e4e003158cf9f75f31d454f0830428b3e67187aa75e017ffc7d3f0ae0bc9724", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two related open PRs for heterogeneous model support, but they split different layers of the feature: one adds per-layer config infrastructure, the other adds broader modeling/cache/masking support. They share files and theme, but they do not look like the same concrete change.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45332 is the better canonical representative because it is the broader feature PR and appears to encompass the downstream modeling work, while 45333 is a narrower config-only slice.", + "best_issue_reason": null, + "best_pr_reason": "PR 45332 is the strongest single representative of the cluster: it includes the most complete implementation surface (config, cache, masking, modeling, tests) and is closer to the overall heterogeneous support effort.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "Related feature work, but not the same underlying change. 45333 adds per-layer heterogeneous config support; 45332 adds heterogeneous model/runtime support and only partially overlaps via shared config infrastructure. They could be separate PRs and do not look merge-equivalent." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "6e4909aaaea00bafd711cf7883af51cd69ffd6dcfad919d7d6544a45101d8fa7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5304, + "estimated_input_tokens": 1326, + "estimated_eval_tokens": 2908 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5304, + "estimated_input_tokens": 1326, + "estimated_eval_tokens": 2908 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are typing-related pull requests, but they address different changes: PR 45415 is a broad repo-wide type-checking sweep plus pyproject ignore tuning, while PR 45425 adds specific typing protocols/aliases to support modeling_utils and PEFT integration. They are related in theme, not the same underlying change.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45425 is the more self-contained and coherent typing change, centered on `_typing.py` and specific downstream annotations/support. PR 45415 is a much broader type-checking sweep across many files plus lint config changes, so it is less clean as a canonical representative.", + "best_issue_reason": null, + "best_pr_reason": "PR 45425 best represents the cluster because it has a focused code change with a clear purpose, whereas PR 45415 is a large cross-cutting typing cleanup that is not the same concrete fix/change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "They are not the same underlying bug or change. 45415 is a broad typing/configuration sweep across many files, while 45425 adds specific typing abstractions for modeling_utils/PEFT; they would not plausibly be merged into one PR as a single fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45470-2", + "nodes": [ + "pull_request:45470", + "pull_request:45487" + ], + "soft_pairs": [ + "pull_request:45470|pull_request:45487" + ], + "prepared_review_unit_hash": "143e61e685e46b05b7f94a899a20fadbe95580e2e71f5c82773eae430c6827eb", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4769, + "estimated_input_tokens": 1193, + "estimated_eval_tokens": 2642 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4769, + "estimated_input_tokens": 1193, + "estimated_eval_tokens": 2642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs are unrelated: one is a test skip for Sam3 Lite Text flash-attn dispatch behavior, while the other fixes model-parallel/device handling in AltCLIP/ChineseClip and related text models.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45487 is the better canonical PR for the cluster because it contains the substantive code fix and a clear bug target (model parallel/device placement) across implementation files, whereas #45470 is only a test skip.", + "best_issue_reason": null, + "best_pr_reason": "PR #45487 is the strongest representative because it addresses a concrete runtime bug with actual model code changes; #45470 is a narrow test adjustment and not a comparable fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45470", + "right": "pull_request:45487", + "accept": false, + "reason": "Different underlying problems and code paths: #45470 skips a flash-attn test for Sam3 Lite Text, while #45487 fixes token-type/device handling for AltCLIP/ChineseClip model parallelism." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +} diff --git a/analysis/current/manifest.json b/analysis/current/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..5f72f7df618784daeb601592395ea9448c1c1ff3 --- /dev/null +++ b/analysis/current/manifest.json @@ -0,0 +1,18 @@ +{ + "analysis_id": "hybrid-gpt54mini-20260418t150720z", + "archived_artifacts": { + "hybrid": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.llm-reviews.json" + }, + "artifacts": { + "hybrid": "analysis/current/analysis-report-hybrid.json", + "hybrid_reviews": "analysis/current/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "model": null, + "published_at": "2026-04-18T15:42:46Z", + "repo": "huggingface/transformers", + "schema_version": 1, + "snapshot_id": "20260418T150536Z", + "variant": "hybrid" +} diff --git a/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.json b/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.json new file mode 100644 index 0000000000000000000000000000000000000000..71f10390197d6b7b3e60ff6748e7abfb366ebf9a --- /dev/null +++ b/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.json @@ -0,0 +1,2002 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260418T150536Z", + "generated_at": "2026-04-18T15:42:30Z", + "evidence_quality": "full", + "llm_enrichment": true, + "meta_bugs": [ + { + "cluster_id": "cluster-43979-11", + "summary": "Cluster of 1 issues and 10 PRs centered on issue #43979.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43979, + "canonical_pr_number": 44007, + "issue_numbers": [ + 43979 + ], + "pr_numbers": [ + 43996, + 44007, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43996, + "right_pr_number": 44007, + "code_similarity": 0.179, + "size_similarity": 0.576, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.429, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44013, + "code_similarity": 0.122, + "size_similarity": 0.318, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.392, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44044, + "code_similarity": 0.245, + "size_similarity": 0.864, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.479, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44066, + "code_similarity": 0.225, + "size_similarity": 0.818, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.408, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44072, + "code_similarity": 0.14, + "size_similarity": 0.303, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.528, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44085, + "code_similarity": 0.216, + "size_similarity": 0.783, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44129, + "code_similarity": 0.163, + "size_similarity": 0.643, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.229, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44154, + "code_similarity": 0.153, + "size_similarity": 0.535, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.31, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 43996, + "right_pr_number": 44722, + "code_similarity": 0.225, + "size_similarity": 0.848, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.368, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44013, + "code_similarity": 0.19, + "size_similarity": 0.553, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.531, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44044, + "code_similarity": 0.186, + "size_similarity": 0.667, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.354, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44066, + "code_similarity": 0.188, + "size_similarity": 0.704, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.315, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44072, + "code_similarity": 0.212, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.708, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44085, + "code_similarity": 0.195, + "size_similarity": 0.735, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.318, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44129, + "code_similarity": 0.103, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.191, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44154, + "code_similarity": 0.238, + "size_similarity": 0.93, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.344, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44007, + "right_pr_number": 44722, + "code_similarity": 0.178, + "size_similarity": 0.679, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.28, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44044, + "code_similarity": 0.126, + "size_similarity": 0.368, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.351, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44066, + "code_similarity": 0.127, + "size_similarity": 0.389, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.325, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44072, + "code_similarity": 0.29, + "size_similarity": 0.952, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.667, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44085, + "code_similarity": 0.131, + "size_similarity": 0.406, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.329, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44129, + "code_similarity": 0.07, + "size_similarity": 0.205, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.192, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44154, + "code_similarity": 0.177, + "size_similarity": 0.594, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.389, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44013, + "right_pr_number": 44722, + "code_similarity": 0.118, + "size_similarity": 0.375, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.287, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44066, + "code_similarity": 0.25, + "size_similarity": 0.947, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.404, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44072, + "code_similarity": 0.136, + "size_similarity": 0.351, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.442, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44085, + "code_similarity": 0.24, + "size_similarity": 0.906, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.394, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44129, + "code_similarity": 0.147, + "size_similarity": 0.555, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.243, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44154, + "code_similarity": 0.17, + "size_similarity": 0.62, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.306, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44044, + "right_pr_number": 44722, + "code_similarity": 0.257, + "size_similarity": 0.982, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.402, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44072, + "code_similarity": 0.133, + "size_similarity": 0.37, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.393, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44085, + "code_similarity": 0.763, + "size_similarity": 0.957, + "file_overlap": 0.5, + "area_overlap": 0.825, + "patch_similarity": 0.887, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ] + } + ] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44129, + "code_similarity": 0.145, + "size_similarity": 0.526, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.263, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44154, + "code_similarity": 0.174, + "size_similarity": 0.654, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.286, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44066, + "right_pr_number": 44722, + "code_similarity": 0.907, + "size_similarity": 0.964, + "file_overlap": 1.0, + "area_overlap": 0.808, + "patch_similarity": 0.874, + "shared_filenames": [ + "src/transformers/models/codegen/modeling_codegen.py", + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/codegen/modeling_codegen.py", + "left_ranges": [ + [ + 245, + 266 + ] + ], + "right_ranges": [ + [ + 228, + 234 + ] + ] + }, + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 41 + ], + [ + 174, + 181 + ], + [ + 250, + 257 + ], + [ + 398, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 466 + ], + [ + 471, + 488 + ], + [ + 518, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 606 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44085, + "code_similarity": 0.137, + "size_similarity": 0.387, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.398, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44129, + "code_similarity": 0.074, + "size_similarity": 0.195, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.231, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44154, + "code_similarity": 0.175, + "size_similarity": 0.566, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.414, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44072, + "right_pr_number": 44722, + "code_similarity": 0.124, + "size_similarity": 0.357, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.347, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44129, + "code_similarity": 0.141, + "size_similarity": 0.503, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.272, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44154, + "code_similarity": 0.18, + "size_similarity": 0.684, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.289, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44085, + "right_pr_number": 44722, + "code_similarity": 0.728, + "size_similarity": 0.923, + "file_overlap": 0.5, + "area_overlap": 0.791, + "patch_similarity": 0.78, + "shared_filenames": [ + "src/transformers/models/gptj/modeling_gptj.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gptj/modeling_gptj.py", + "left_ranges": [ + [ + 33, + 40 + ], + [ + 173, + 180 + ], + [ + 249, + 256 + ], + [ + 397, + 405 + ], + [ + 408, + 419 + ], + [ + 425, + 434 + ], + [ + 459, + 465 + ], + [ + 470, + 489 + ], + [ + 517, + 539 + ], + [ + 553, + 559 + ], + [ + 565, + 574 + ], + [ + 579, + 597 + ], + [ + 600, + 611 + ], + [ + 633, + 639 + ], + [ + 645, + 652 + ], + [ + 657, + 673 + ], + [ + 716, + 728 + ], + [ + 737, + 743 + ], + [ + 748, + 771 + ], + [ + 789, + 794 + ] + ], + "right_ranges": [ + [ + 33, + 42 + ], + [ + 175, + 182 + ], + [ + 251, + 258 + ], + [ + 399, + 420 + ], + [ + 426, + 435 + ], + [ + 460, + 467 + ], + [ + 472, + 489 + ], + [ + 519, + 540 + ], + [ + 554, + 560 + ], + [ + 566, + 575 + ], + [ + 580, + 598 + ], + [ + 601, + 612 + ], + [ + 634, + 640 + ], + [ + 646, + 653 + ], + [ + 658, + 663 + ], + [ + 666, + 674 + ], + [ + 717, + 722 + ], + [ + 738, + 744 + ], + [ + 749, + 772 + ], + [ + 790, + 795 + ] + ] + } + ] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44154, + "code_similarity": 0.099, + "size_similarity": 0.344, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.199, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44129, + "right_pr_number": 44722, + "code_similarity": 0.146, + "size_similarity": 0.545, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.247, + "shared_filenames": [], + "shared_file_areas": [] + }, + { + "left_pr_number": 44154, + "right_pr_number": 44722, + "code_similarity": 0.164, + "size_similarity": 0.631, + "file_overlap": 0.0, + "area_overlap": 0.0, + "patch_similarity": 0.255, + "shared_filenames": [], + "shared_file_areas": [] + } + ] + }, + { + "cluster_id": "cluster-41211-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #41211.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 41211, + "canonical_pr_number": 44339, + "issue_numbers": [ + 41211 + ], + "pr_numbers": [ + 41356, + 44339 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 41356, + "right_pr_number": 44339, + "code_similarity": 0.164, + "size_similarity": 0.096, + "file_overlap": 0.3, + "area_overlap": 0.078, + "patch_similarity": 0.182, + "shared_filenames": [ + "docs/source/en/model_doc/deimv2.md", + "src/transformers/models/deimv2/__init__.py", + "src/transformers/models/deimv2/configuration_deimv2.py", + "src/transformers/models/deimv2/modeling_deimv2.py", + "tests/models/deimv2/__init__.py", + "tests/models/deimv2/test_modeling_deimv2.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/deimv2.md", + "left_ranges": [ + [ + 1, + 132 + ] + ], + "right_ranges": [ + [ + 1, + 65 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/__init__.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 29 + ] + ] + }, + { + "filename": "src/transformers/models/deimv2/configuration_deimv2.py", + "left_ranges": [ + [ + 1, + 74 + ] + ], + "right_ranges": [ + [ + 1, + 266 + ] + ] + }, + { + "filename": "tests/models/deimv2/test_modeling_deimv2.py", + "left_ranges": [ + [ + 1, + 15 + ] + ], + "right_ranges": [ + [ + 1, + 1753 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-36246-4", + "summary": "Cluster of 2 issues and 2 PRs centered on issue #43824.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43824, + "canonical_pr_number": 43836, + "issue_numbers": [ + 36246, + 43824 + ], + "pr_numbers": [ + 43836, + 43842 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43836, + "right_pr_number": 43842, + "code_similarity": 0.332, + "size_similarity": 0.088, + "file_overlap": 1.0, + "area_overlap": 0.033, + "patch_similarity": 0.017, + "shared_filenames": [ + "src/transformers/cli/serve.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/cli/serve.py", + "left_ranges": [ + [ + 11, + 18 + ], + [ + 359, + 429 + ], + [ + 584, + 590 + ], + [ + 1892, + 1910 + ], + [ + 1917, + 1923 + ] + ], + "right_ranges": [ + [ + 54, + 61 + ], + [ + 587, + 593 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43366-4", + "summary": "Cluster of 1 issues and 3 PRs centered on issue #43366.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 43366, + "canonical_pr_number": 43757, + "issue_numbers": [ + 43366 + ], + "pr_numbers": [ + 43757, + 45500, + 45506 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 43757, + "right_pr_number": 45500, + "code_similarity": 0.096, + "size_similarity": 0.149, + "file_overlap": 0.2, + "area_overlap": 0.005, + "patch_similarity": 0.031, + "shared_filenames": [ + "src/transformers/modeling_gguf_pytorch_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 462, + 474 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + } + ] + }, + { + "left_pr_number": 43757, + "right_pr_number": 45506, + "code_similarity": 0.096, + "size_similarity": 0.149, + "file_overlap": 0.2, + "area_overlap": 0.005, + "patch_similarity": 0.031, + "shared_filenames": [ + "src/transformers/modeling_gguf_pytorch_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 462, + 474 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + } + ] + }, + { + "left_pr_number": 45500, + "right_pr_number": 45506, + "code_similarity": 1.0, + "size_similarity": 1.0, + "file_overlap": 1.0, + "area_overlap": 1.0, + "patch_similarity": 1.0, + "shared_filenames": [ + "docs/source/en/model_doc/gpt_oss.md", + "src/transformers/integrations/ggml.py", + "src/transformers/modeling_gguf_pytorch_utils.py", + "tests/quantization/ggml/test_ggml.py" + ], + "shared_file_areas": [ + { + "filename": "docs/source/en/model_doc/gpt_oss.md", + "left_ranges": [ + [ + 70, + 93 + ] + ], + "right_ranges": [ + [ + 70, + 93 + ] + ] + }, + { + "filename": "src/transformers/integrations/ggml.py", + "left_ranges": [ + [ + 89, + 109 + ] + ], + "right_ranges": [ + [ + 89, + 109 + ] + ] + }, + { + "filename": "src/transformers/modeling_gguf_pytorch_utils.py", + "left_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ], + "right_ranges": [ + [ + 171, + 277 + ], + [ + 456, + 462 + ], + [ + 518, + 525 + ], + [ + 620, + 627 + ], + [ + 709, + 753 + ] + ] + }, + { + "filename": "tests/quantization/ggml/test_ggml.py", + "left_ranges": [ + [ + 351, + 358 + ], + [ + 386, + 406 + ] + ], + "right_ranges": [ + [ + 351, + 358 + ], + [ + 386, + 406 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43240-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43240.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43240, + "canonical_pr_number": 43251, + "issue_numbers": [ + 43240 + ], + "pr_numbers": [ + 43251, + 43254 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43251, + "right_pr_number": 43254, + "code_similarity": 0.794, + "size_similarity": 0.64, + "file_overlap": 1.0, + "area_overlap": 0.667, + "patch_similarity": 0.882, + "shared_filenames": [ + "src/transformers/loss/loss_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/loss/loss_utils.py", + "left_ranges": [ + [ + 30, + 50 + ] + ], + "right_ranges": [ + [ + 30, + 43 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-45081-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #45081.", + "status": "open", + "confidence": 0.8, + "canonical_issue_number": 45081, + "canonical_pr_number": 45317, + "issue_numbers": [ + 45081 + ], + "pr_numbers": [ + 45086, + 45317 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target", + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 45086, + "right_pr_number": 45317, + "code_similarity": 0.589, + "size_similarity": 0.34, + "file_overlap": 0.5, + "area_overlap": 1.0, + "patch_similarity": 0.136, + "shared_filenames": [ + "src/transformers/tokenization_utils_tokenizers.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/tokenization_utils_tokenizers.py", + "left_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ], + "right_ranges": [ + [ + 1360, + 1370 + ], + [ + 1374, + 1380 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-43698-3", + "summary": "Cluster of 1 issues and 2 PRs centered on issue #43698.", + "status": "open", + "confidence": 0.75, + "canonical_issue_number": 43698, + "canonical_pr_number": 43779, + "issue_numbers": [ + 43698 + ], + "pr_numbers": [ + 43779, + 43816 + ], + "evidence_types": [ + "closing_reference", + "shared_issue_target" + ], + "pr_comparisons": [ + { + "left_pr_number": 43779, + "right_pr_number": 43816, + "code_similarity": 0.418, + "size_similarity": 0.538, + "file_overlap": 1.0, + "area_overlap": 0.02, + "patch_similarity": 0.02, + "shared_filenames": [ + "src/transformers/integrations/integration_utils.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/integrations/integration_utils.py", + "left_ranges": [ + [ + 2227, + 2241 + ], + [ + 2303, + 2309 + ] + ], + "right_ranges": [ + [ + 2278, + 2291 + ], + [ + 2309, + 2322 + ] + ] + } + ] + } + ] + }, + { + "cluster_id": "cluster-44018-2", + "summary": "Cluster of 2 related pull requests linked by soft_similarity.", + "status": "open", + "confidence": 0.5, + "canonical_issue_number": null, + "canonical_pr_number": 44068, + "issue_numbers": [], + "pr_numbers": [ + 44018, + 44068 + ], + "evidence_types": [ + "soft_similarity" + ], + "pr_comparisons": [ + { + "left_pr_number": 44018, + "right_pr_number": 44068, + "code_similarity": 0.766, + "size_similarity": 0.939, + "file_overlap": 1.0, + "area_overlap": 0.425, + "patch_similarity": 0.866, + "shared_filenames": [ + "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "tests/models/gpt_neo/test_modeling_gpt_neo.py" + ], + "shared_file_areas": [ + { + "filename": "src/transformers/models/gpt_neo/modeling_gpt_neo.py", + "left_ranges": [ + [ + 26, + 46 + ], + [ + 138, + 143 + ], + [ + 182, + 187 + ], + [ + 283, + 295 + ], + [ + 327, + 341 + ], + [ + 348, + 354 + ], + [ + 360, + 369 + ], + [ + 399, + 406 + ], + [ + 411, + 419 + ], + [ + 428, + 433 + ], + [ + 472, + 492 + ], + [ + 507, + 513 + ], + [ + 519, + 528 + ], + [ + 541, + 559 + ], + [ + 562, + 567 + ], + [ + 595, + 601 + ], + [ + 607, + 614 + ], + [ + 627, + 643 + ], + [ + 685, + 690 + ], + [ + 708, + 714 + ], + [ + 720, + 727 + ], + [ + 740, + 757 + ], + [ + 761, + 766 + ], + [ + 780, + 786 + ], + [ + 791, + 798 + ], + [ + 807, + 822 + ], + [ + 840, + 845 + ] + ], + "right_ranges": [ + [ + 26, + 31 + ], + [ + 34, + 44 + ], + [ + 136, + 141 + ], + [ + 180, + 185 + ], + [ + 281, + 293 + ], + [ + 325, + 339 + ], + [ + 346, + 352 + ], + [ + 358, + 367 + ], + [ + 397, + 404 + ], + [ + 409, + 417 + ], + [ + 426, + 434 + ], + [ + 463, + 483 + ], + [ + 498, + 504 + ], + [ + 510, + 519 + ], + [ + 532, + 550 + ], + [ + 553, + 564 + ], + [ + 586, + 592 + ], + [ + 598, + 605 + ], + [ + 618, + 634 + ], + [ + 676, + 688 + ], + [ + 699, + 705 + ], + [ + 711, + 718 + ], + [ + 731, + 748 + ], + [ + 752, + 762 + ], + [ + 771, + 777 + ], + [ + 782, + 789 + ], + [ + 798, + 813 + ], + [ + 831, + 836 + ] + ] + }, + { + "filename": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "left_ranges": [ + [ + 458, + 464 + ] + ], + "right_ranges": [ + [ + 458, + 464 + ] + ] + } + ] + } + ] + } + ], + "duplicate_issues": [ + { + "cluster_id": "cluster-36246-4", + "canonical_issue_number": 43824, + "duplicate_issue_numbers": [ + 36246 + ], + "reason": "Issues in cluster-36246-4 are treated as duplicates because they share closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "duplicate_prs": [ + { + "cluster_id": "cluster-36246-4", + "canonical_pr_number": 43836, + "duplicate_pr_numbers": [ + 43842 + ], + "target_issue_number": 43824, + "reason": "PRs in cluster-36246-4 are treated as duplicates because they converge on issue #43824 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-41211-3", + "canonical_pr_number": 44339, + "duplicate_pr_numbers": [ + 41356 + ], + "target_issue_number": 41211, + "reason": "PRs in cluster-41211-3 are treated as duplicates because they converge on issue #41211 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43240-3", + "canonical_pr_number": 43251, + "duplicate_pr_numbers": [ + 43254 + ], + "target_issue_number": 43240, + "reason": "PRs in cluster-43240-3 are treated as duplicates because they converge on issue #43240 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43366-4", + "canonical_pr_number": 43757, + "duplicate_pr_numbers": [ + 45500, + 45506 + ], + "target_issue_number": 43366, + "reason": "PRs in cluster-43366-4 are treated as duplicates because they converge on issue #43366 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-43698-3", + "canonical_pr_number": 43779, + "duplicate_pr_numbers": [ + 43816 + ], + "target_issue_number": 43698, + "reason": "PRs in cluster-43698-3 are treated as duplicates because they converge on issue #43698 with closing_reference, shared_issue_target evidence." + }, + { + "cluster_id": "cluster-43979-11", + "canonical_pr_number": 44007, + "duplicate_pr_numbers": [ + 43996, + 44013, + 44044, + 44066, + 44072, + 44085, + 44129, + 44154, + 44722 + ], + "target_issue_number": 43979, + "reason": "PRs in cluster-43979-11 are treated as duplicates because they converge on issue #43979 with closing_reference, shared_issue_target, soft_similarity evidence." + }, + { + "cluster_id": "cluster-44018-2", + "canonical_pr_number": 44068, + "duplicate_pr_numbers": [ + 44018 + ], + "target_issue_number": null, + "reason": "PRs in cluster-44018-2 are treated as duplicates because they share soft_similarity evidence." + }, + { + "cluster_id": "cluster-45081-3", + "canonical_pr_number": 45317, + "duplicate_pr_numbers": [ + 45086 + ], + "target_issue_number": 45081, + "reason": "PRs in cluster-45081-3 are treated as duplicates because they converge on issue #45081 with closing_reference, shared_issue_target, soft_similarity evidence." + } + ], + "best_issue": { + "cluster_id": "cluster-43366-4", + "issue_number": 43366, + "reason": "Issue #43366 is the strongest global issue candidate because it is open, belongs to a cluster with 4 artifacts, and carries 5 discussion comments plus 7 inbound references.", + "score": 157.0 + }, + "best_pr": { + "cluster_id": "cluster-43979-11", + "pr_number": 44007, + "reason": "PR #44007 is the strongest global PR candidate because it is open, belongs to a cluster with 11 artifacts, links to 1 issues, and carries 2 review/discussion events.", + "score": 238.886 + } +} diff --git a/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.llm-reviews.json b/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.llm-reviews.json new file mode 100644 index 0000000000000000000000000000000000000000..ad1972b821b55694c7847d20c20d71fecc98274b --- /dev/null +++ b/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.llm-reviews.json @@ -0,0 +1,23066 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260418T150536Z", + "generated_at": "2026-04-18T15:42:30Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "dcd9f595a958e37b4890a6765530239dc1f6801a44b3ae39d5bee95b8f597333", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2479, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are not duplicates: one is about Tapas tokenization vs TensorFlow behavior, one is about a misleading private-model auth error, and one is a generic model-loading failure for bert-base-uncased. The only plausible cluster center is the broad model-loading report (#16618), but the soft links should be rejected.", + "confidence": 0.79, + "canonical_issue_reason": "#16618 is the broadest and most central of the three, focused on model loading, while the other two are narrower and clearly different problems.", + "canonical_pr_reason": null, + "best_issue_reason": "#16618 is the best representative for this cluster because it is the most general issue title and the only one plausibly related to the soft-similarity links.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Tapas tokenization mismatch is a different code path and symptom than failing to load bert-base-uncased; they are not the same underlying bug." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "A bad error when downloading a private model without login is an authentication/error-reporting issue, not the same as a generic model-loading failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30277", + "issue:35545", + "issue:36010", + "issue:41720", + "issue:42915", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43828", + "issue:44292", + "issue:44322", + "issue:44509", + "issue:44512", + "issue:44805", + "issue:44918", + "issue:45161", + "issue:45237", + "issue:45464" + ], + "soft_pairs": [ + "issue:44322|issue:45464", + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:43638|issue:44805", + "issue:35545|issue:36010", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918" + ], + "prepared_review_unit_hash": "56f2c4dcf5377f5ae1f4ecea8ce3c641dc1271d3fa3d5658198cfd0f2f793b44", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 13, + "serialized_chars": 14253, + "estimated_input_tokens": 3564, + "estimated_eval_tokens": 7384 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. The only near-duplicate pair is the v5 docs cleanup pair, but even that is not the same concrete change. Most other soft links are model-specific runtime errors that share broad symptoms, not the same underlying bug.", + "confidence": 0.7, + "canonical_issue_reason": "issue:44805 is the best representative of the most generic failure mode in the cluster: a shape/indexing runtime error. It is broader than the more model-specific zero3/BERT or Qwen-specific reports, so it is the most plausible anchor if one issue must represent the set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44805 is the strongest global representative because it is the least model-specific and closest to a reusable bug pattern. The rest are either narrower variants or unrelated runtime/doc issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44322", + "right": "issue:45464", + "accept": false, + "reason": "Both are Qwen3.5-related, but one is a config attribute error and the other is a streaming chat/completions failure; different code paths and symptoms." + }, + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Both mention Qwen/GPT-OSS-style model execution problems, but one is a dtype mismatch in preprocessing and the other is an AMD GPU compatibility failure." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "A torch.autocast dtype mismatch for Phi-tiny-MoE is not the same underlying bug as GPT-OSS failing on AMD GPUs." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Qwen-3 NVFP4 execution and GPT-OSS-on-AMD are different model/backends failures with no clear shared code path." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "Jamba + DeepSpeed ZeRO3 collective mismatch and ModernBERT ONNX export are unrelated bugs." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "ONNX export error for ModernBERT and Qwen3Moe FP8 failure are different features and failure modes." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "ModernBERT ONNX export and Qwen3 auto-device-map CUDA assert are unrelated model/runtime paths." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are v5 docs removals, but they target different removed entries and likely need separate documentation edits." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Both are indexing errors, but one is a DeepSpeed ZeRO3/BERT initialization issue while the other is a mask-shape mismatch; not the same concrete bug." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "ModernBERT export to ONNX and GenerationMixin import failure are distinct issues." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "DeepSpeed ZeRO3 + BertModel out-of-bounds index and a tensor mask shape mismatch are different error conditions and code paths." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "A generic mask/tensor indexing mismatch is not the same bug as GPT-OSS MoE tensor-parallel-only failure." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "TRL SFT unpacking failure for Qwen3.5 and a mask-shape IndexError are different training-time bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 2, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34634", + "issue:34928", + "issue:35545", + "issue:36331", + "issue:39290", + "issue:43039", + "issue:43596", + "issue:43638", + "issue:43975", + "issue:44292", + "issue:44457", + "issue:44661", + "issue:44805", + "issue:45081", + "issue:45161", + "issue:45237" + ], + "soft_pairs": [ + "issue:44661|issue:44805", + "issue:30277|issue:43638", + "issue:34634|issue:35545", + "issue:43596|issue:43638", + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43975|issue:45237", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:39290|issue:45081" + ], + "prepared_review_unit_hash": "a273f6ab58c2c009e9fa5a99599110ad7ea3bccb669713d081a342feb93f55f6", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 14, + "serialized_chars": 14841, + "estimated_input_tokens": 3711, + "estimated_eval_tokens": 7678 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous. The only likely duplicate pair is the two DeepSpeed ZeRO3 + Bert IndexError reports (43596/43638); the rest describe unrelated models, tokenizer, training, or export issues.", + "confidence": 0.79, + "canonical_issue_reason": "Issue 43596 is the cleaner representative of the shared DeepSpeed ZeRO3/Bert zero-sized tensor IndexError; 43638 appears to be a narrower variant of the same failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "43596 best captures the underlying bug with a concise, general title and clear error signature, making it the best canonical issue for the one true duplicate pair.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Different subsystems and failure modes: tokenizer mapping/add-new-model support vs a mask/tensor shape mismatch IndexError." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "Both involve DeepSpeed, but the concrete errors differ: collective mismatch in Jamba-v01 vs zero-sized tensor IndexError in Bert init." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "Unrelated problems: BarkProcessor voice_preset handling vs ModernBERT ONNX export." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "Same concrete DeepSpeed ZeRO3/Bert initialization failure pattern with the same IndexError class; the non-pretrained Bert detail looks like a repro variant." + }, + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Different model/output issues: detokenization bug vs LoRA merge/save/reload output mismatch." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Different code paths: Liger Kernel cross_entropy fallback vs GPT-OSS MoE tensor-parallel behavior." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "Qwen NVFP4 runtime failure is unrelated to LoRA merge/reload consistency." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Both are tensor shape errors, but the contexts differ substantially: FSDP checkpointing recompute vs mask/indexing mismatch." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Different DeepSpeed-related errors and model paths; one is a weight dimensionality runtime error, the other a collective mismatch." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "No shared underlying bug: detokenization in DeepSeek vs GPT-OSS AMD GPU execution." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "Both mention Bert/GPT-OSS style distributed setups, but the specific failures are different and not the same code-path bug." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "Activation-checkpointing/FSDP recompute mismatch is unrelated to CustomTrainer's unexpected num_items_in_batch argument." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "DeepSpeed ZeRO3 Bert IndexError is unrelated to GPT-OSS MoE tensor-parallel failure." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Different tokenizer/runtime bugs: Gemma3 config missing attribute under vLLM vs Mistral regex patch crashing on tokenizer backend access." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 3, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30277", + "issue:34928", + "issue:36331", + "issue:41720", + "issue:41762", + "issue:43039", + "issue:43425", + "issue:43541", + "issue:43638", + "issue:43716", + "issue:43827", + "issue:43828", + "issue:44292", + "issue:44512", + "issue:44560", + "issue:44805", + "issue:44918", + "issue:45161" + ], + "soft_pairs": [ + "issue:43828|issue:45161", + "issue:43425|issue:44292", + "issue:43541|issue:43716", + "issue:44918|issue:45161", + "issue:34928|issue:41720", + "issue:43828|issue:44292", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:43039|issue:44918", + "issue:41762|issue:43638", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43716|issue:44918", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805" + ], + "prepared_review_unit_hash": "b355ccd8c574bcbbc1eea51b8caf2e88dbea5f1e555545e32673d0be126e1773", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 16, + "serialized_chars": 15284, + "estimated_input_tokens": 3821, + "estimated_eval_tokens": 7898 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous and does not form a broad duplicate set. The only strong near-duplicate is the DeepSpeed ZeRO-3 \"index 0 out of bounds for dimension 0 with size 0\" pair (41762, 43638); the rest are different bugs, models, or subsystems and should stay separate.", + "confidence": 0.87, + "canonical_issue_reason": "issue:41762 is the clearest representative of the shared ZeRO-3 zero-length indexing failure and is slightly more specific/reproducible than 43638.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:41762 is the best issue to anchor the one real duplicate-like pair in this set; it captures the common failure mode most clearly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Both mention model/runtime limitations around MoE/dtype/device behavior, but they describe different failures and code paths; not the same underlying bug." + }, + { + "left": "issue:43425", + "right": "issue:44292", + "accept": false, + "reason": "Torch-version incompatibility is not the same as a Qwen NVFP4 runtime failure; too broad and unrelated." + }, + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Both involve dtype-ish runtime problems, but one is a grouped_mm tracing error and the other is an image preprocessor/model mismatch; different root causes." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "Different model/trainer behavior: SFT embedding unpacking vs GPT-OSS TP failure. Shared MoE/model wording is not enough." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "Activation checkpointing/FSDP tensor-size mismatch and Qwen auto device mapping cudaErrorAssert are distinct distributed/runtime issues." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "One is a torch.autocast dtype mismatch on Phi-tiny-MoE, the other is a Qwen NVFP4 runtime error; not the same bug." + }, + { + "left": "issue:43716", + "right": "issue:44292", + "accept": false, + "reason": "Both are model/runtime dtype-related, but one is an image preprocessor mismatch and the other is a Qwen NVFP4 execution problem." + }, + { + "left": "issue:44292", + "right": "issue:44918", + "accept": false, + "reason": "Different failure modes and subsystems; no concrete evidence they share a root cause." + }, + { + "left": "issue:43039", + "right": "issue:44918", + "accept": false, + "reason": "Liger Kernel cross_entropy routing and TRL SFT embedding unpacking are unrelated changes/failures." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": true, + "reason": "Both report the same ZeRO-3 failure pattern: 'index 0 is out of bounds for dimension 0 with size 0' during model loading/training, making them very likely the same underlying bug." + }, + { + "left": "issue:43827", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs-related references to removed commands, but they are different documentation pages and not the same concrete issue." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Both involve model-specific runtime issues, but the reported errors and affected components differ (dtype mismatch vs StopIteration in video input handling)." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Image-preprocessor dtype mismatch and TRL SFT embedding unpacking are different bugs." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Liger Kernel cross_entropy dispatch and a torch.autocast dtype mismatch are not the same code-path problem." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "DeepSpeed collective mismatch during Jamba training and a Trainer API signature mismatch are completely different issues." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Cross-entropy dispatch under Liger Kernel is unrelated to a mask/index shape mismatch error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 4, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34928", + "issue:36010", + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:43039", + "issue:43425", + "issue:43531", + "issue:43596", + "issue:43638", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:44512", + "issue:44560", + "issue:44863", + "issue:45070" + ], + "soft_pairs": [ + "issue:34928|issue:36010", + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:43039|issue:44560", + "issue:39290|issue:41720", + "issue:43866|issue:44863", + "issue:43854|issue:43866", + "issue:43425|issue:45070", + "issue:39290|issue:43531", + "issue:41093|issue:43638" + ], + "prepared_review_unit_hash": "f2f70eb1ca163cbf28eea131a8016f7aa406cf3084ead543432d729dc1c7acf3", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13481, + "estimated_input_tokens": 3371, + "estimated_eval_tokens": 6998 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: most items are unrelated regressions or docs issues that only share broad error patterns. A few pairs are superficially similar (especially zero3/index-error reports and Qwen3 sliding_window reports), but not close enough to be the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out for the whole cluster; the items span distinct bugs across checkpointing/FSDP, imports, vLLM/Gemma, masking/indexing, device mapping, DeepSpeed ZeRO-3, checkpoint corruption, docs, and config compatibility.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:41762 is the strongest representative of one real duplicate family because it gives a concrete, specific DeepSpeed ZeRO-3 index-out-of-bounds failure; among the listed items it is one of the clearest and most actionable reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "Different failure modes and subsystems: activation checkpointing/FSDP tensor-size mismatch vs a GenerationMixin import error." + }, + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Both involve ZeRO-3 index errors, but the affected model/setup and exact failure context differ; not enough evidence of the same underlying bug." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs-related, but one is about return_all_scores behavior and the other about a removed run command; different documentation issues." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross_entropy call vs Qwen3-vl-embedding video StopIteration are unrelated change areas and error paths." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Both mention model/runtime errors, but Gemma3 sliding_window_pattern with vLLM is a different bug than Qwen3 auto-device-mapping cudaErrorAssert." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Both are model-loading problems, but they concern different models and likely different root causes; not the same concrete bug." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "One is a unit-test load failure for GLM-4.7-Flash; the other is a corrupted Ovis2 checkpoint. Different models and failure causes." + }, + { + "left": "issue:43425", + "right": "issue:45070", + "accept": false, + "reason": "Torch 2.10 incompatibility is a broad dependency issue, while the pydantic PretrainedConfig field regression is a separate configuration-model bug." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Both reference sliding_window, but they are different models and different symptoms; not a single concrete fixable code-path problem." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Both are index-related, but one is a mask/tensor shape mismatch and the other is a ZeRO-3 BertModel index-out-of-bounds issue; too different to merge." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 5, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:39290", + "issue:41093", + "issue:41762", + "issue:43296", + "issue:43366", + "issue:43404", + "issue:43541", + "issue:43572", + "issue:43792", + "issue:43828", + "issue:43866", + "issue:44841", + "issue:45070", + "issue:45084", + "issue:45362", + "issue:45464" + ], + "soft_pairs": [ + "issue:43792|issue:45070", + "issue:45362|issue:45464", + "issue:43404|issue:43866", + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762", + "issue:43572|issue:45070", + "issue:43296|issue:43366" + ], + "prepared_review_unit_hash": "a52bc6fff58a03ef6e44031962dd0f1e20e164832b2a6aeaaf7d3c4feca32011", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12473, + "estimated_input_tokens": 3119, + "estimated_eval_tokens": 6494 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated model-specific regressions and API/config issues, not a true duplicate cluster. The only broadly similar items are the two Qwen3.5 chat-related reports, but they still appear to describe different failure modes.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45070 is the broadest, most general regression report in the set (v5.4.0 / PretrainedConfig pydantic break) and is the best single issue to use as a loose cluster anchor, though it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Different model and failure mode: Whisper loading/runtime issue vs a PretrainedConfig pydantic regression." + }, + { + "left": "issue:45362", + "right": "issue:45464", + "accept": false, + "reason": "Both involve Qwen3.5 and chat, but one is a crash in transformers chat while the other is a streaming chat/completions API failure on a different model size; not the same concrete bug." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Mistral3 weight-tying bug and Ovis2 checkpoint corruption are unrelated issues." + }, + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Grouped_mm tracing error in Mixtral is a different code path from a torch.autocast dtype mismatch in Phi-tiny-MoE." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "ImportError for GenerationMixin is unrelated to the Gemma3 sliding_window_pattern attribute error with vLLM." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Processor failure for Voxtral differs from a template-node compilation TypeError; no shared underlying bug is evident." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Mask shape mismatch during indexing is a different failure from zero-sized dimension errors while loading Gemma3 with DeepSpeed ZeRO-3." + }, + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "Missing pad_token_idx in StableLmConfig is a specific config field regression, not the broader PretrainedConfig pydantic-model break." + }, + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "PaddleOCR-VL load failure in vLLM and GGUF gpt-oss support are different model/support requests, not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 6, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39290", + "issue:41093", + "issue:43054", + "issue:43366", + "issue:43531", + "issue:43541", + "issue:43596", + "issue:43742", + "issue:43828", + "issue:43994", + "issue:44291", + "issue:44387", + "issue:44589", + "issue:44661", + "issue:45005", + "issue:45070", + "issue:45161", + "issue:45464" + ], + "soft_pairs": [ + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:43828|issue:45464", + "issue:44291|issue:45070", + "issue:43531|issue:45070", + "issue:43366|issue:45464", + "issue:44661|issue:45464", + "issue:43541|issue:44589", + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:41093|issue:43596" + ], + "prepared_review_unit_hash": "6716d1a3fef821f8f949b815891237d8fa3bcc3dd83c3704ddafe2d18f250a53", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13714, + "estimated_input_tokens": 3429, + "estimated_eval_tokens": 7114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a loose set of unrelated bugs and feature requests across different model families and failure modes. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR cluster.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "Different problems: int4 quantization OOM vs tied-weights handling for translation models in v5." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Both mention MoE, but one is a Mixtral torch-dynamo grouped_mm runtime error and the other is TP-only failure on GPT-OSS MoE; not the same bug." + }, + { + "left": "issue:43828", + "right": "issue:45464", + "accept": false, + "reason": "Unrelated failures: autocast dtype mismatch in Phi-tiny-MoE vs streaming chat/completions API failure on Qwen3.5." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "Different regressions: unexpected _is_hf_initialized argument during init_empty_weights vs pydantic PretrainedConfig field breakage." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "Sliding-window handling for Qwen3-MoE is unrelated to the v5.4.0 pydantic config-field issue." + }, + { + "left": "issue:43366", + "right": "issue:45464", + "accept": false, + "reason": "GPT-OSS GGUF architecture support request is not the same as a streaming inference API failure." + }, + { + "left": "issue:44661", + "right": "issue:45464", + "accept": false, + "reason": "Tokenizer mapping/add-new-model-like failure is a model-registration bug, not the Qwen3.5 streaming bug." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Grouped_mm tracing error and Float8 storage lookup failure are distinct runtime issues." + }, + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Gemma3 sliding_window_pattern attribute error and MobileLLM key error are different loading/config bugs." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both involve SigLIP2, but one reports poor embedding quality while the other reports nonsensical AutoModel/pipeline outputs; not clearly the same defect." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Mask/tensor shape mismatch and deepspeed zero3 BertModel index error are different index errors with different contexts." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 7, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39186", + "issue:40990", + "issue:42915", + "issue:43404", + "issue:43541", + "issue:43645", + "issue:43873", + "issue:43975", + "issue:44292", + "issue:44360", + "issue:44492", + "issue:44512", + "issue:44530", + "issue:44863", + "issue:44918", + "issue:44960", + "issue:45005", + "issue:45399" + ], + "soft_pairs": [ + "issue:42915|issue:43541", + "issue:39186|issue:44918", + "issue:40990|issue:44960", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005", + "issue:43873|issue:45005", + "issue:42915|issue:45399", + "issue:44360|issue:44512", + "issue:43975|issue:44292", + "issue:44492|issue:44512" + ], + "prepared_review_unit_hash": "b6126ea0ddb933391761634cc9223b0f4da11ae39ced67338a4b118b407fcccd", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 13740, + "estimated_input_tokens": 3435, + "estimated_eval_tokens": 7126 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: model-specific runtime/loading bugs, one quantization/offloading issue, and a couple of documentation nits. None of the soft-similarity pairs look like the same underlying bug/change, so I would not merge any of them as duplicates. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "No true canonical duplicate stands out because the reports cover different models, error paths, and even docs-only changes. If forced to pick the closest umbrella, issue 45005 is the broadest v5/tied-weights report, but it does not represent most of the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45005 is the best representative only in a loose sense: it is the broadest and most umbrella-like item here, centered on a v5 regression around tied weights. Even so, it is not a good fit for the majority of the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:43541", + "accept": false, + "reason": "Different models and failures: FineGrainedFP8Config setup vs torch-dynamo grouped_mm tracing. Not the same bug path." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP 2-D weight runtime error and Qwen3.5 embedding unpacking in TRL are unrelated paths." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "Perplexity degradation on gpt-oss and GLM5 loading/runtime behavior do not point to the same underlying issue." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "Both mention tied weights, but the concrete cases differ too much (Mistral3 image-text-to-text vs translation models) to treat as one fix." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Custom model initialization in Jupyter is unrelated to the flash-attn2 fallback gating problem." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash is a different code path from tied-weight regressions." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint-loading failure is model-loading specific and not the same as the tied-weights regression." + }, + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Quantization/offloading behavior is a different subsystem from tied-weight handling." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "Qwen3Moe FP8 failure and flash-attn2 fallback checks are unrelated." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "A DSA indexer discussion is not the same as a docs note about a removed command." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "DeepSeek detokenization and Qwen NVFP4 runtime errors are different model-specific bugs." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "Docs typo in cache strategies and docs mentioning a removed command are separate documentation changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 8, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36331", + "issue:38175", + "issue:39692", + "issue:41553", + "issue:41762", + "issue:43054", + "issue:43644", + "issue:43828", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:43950", + "issue:44403", + "issue:44534", + "issue:44589", + "issue:44928" + ], + "soft_pairs": [ + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:43828|issue:44928", + "issue:39692|issue:43054", + "issue:41553|issue:43883", + "issue:43866|issue:44403", + "issue:36331|issue:41762", + "issue:41553|issue:43881", + "issue:43872|issue:44589", + "issue:38175|issue:43054" + ], + "prepared_review_unit_hash": "b9b5daa647367ccba6f14a4a53934f014f7e99d458cdc9cd663cf1057d2a565a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12847, + "estimated_input_tokens": 3212, + "estimated_eval_tokens": 6680 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated issues; the only clear duplicate subcluster is the non-persistent buffer corruption regression reported in 43644/43950/44534. All other soft pairs look superficially similar but describe different bugs or model-specific failures.", + "confidence": 0.95, + "canonical_issue_reason": "issue:43950 is the strongest canonical for the buffer-corruption subcluster: it names the regression precisely (`from_pretrained()` corrupting `register_buffer(persistent=False)` buffers) and is the most specific/root-cause-oriented report among the three matching titles.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43950 is the best representative overall because it most clearly describes the actual bug and is the most actionable anchor for the duplicate set; the remaining items in the cluster are not duplicates of it.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Same underlying regression: both report non-persistent buffers being filled/corrupted in Transformers v5." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Same bug and same code path: v5 regression corrupting non-persistent buffers during load." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Different failures: autocast dtype mismatch in Phi MoE vs RLHF gradient explosion from 3D position_ids/SDPA fallback." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "Both involve SigLIP2, but one is documentation/example errors and the other is degraded text embeddings; not the same bug." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Unrelated model-loading problems: Voxtral tokenizer error message vs Molmo missing `all_tied_weights_keys`." + }, + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "Corrupted checkpoint for Ovis2 is not the same as generic loading noise." + }, + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "Different errors and subsystems: Trainer API argument mismatch vs Gemma3 load failure under ZeRO-3." + }, + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Different model-specific loading issues with no evidence of the same root cause." + }, + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Both are bitsandbytes/storage-related, but they describe different exceptions and likely different incompatibilities." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": false, + "reason": "Same model family only; zero probabilities is a different symptom from worse text embeddings." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 9, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41762", + "issue:42491", + "issue:42915", + "issue:42947", + "issue:43054", + "issue:43493", + "issue:43643", + "issue:43782", + "issue:43856", + "issue:43881", + "issue:43883", + "issue:44368", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44960", + "issue:45020" + ], + "soft_pairs": [ + "issue:42947|issue:44387", + "issue:43881|issue:43883", + "issue:43054|issue:43493", + "issue:44403|issue:44451", + "issue:42947|issue:43856", + "issue:44403|issue:44488", + "issue:43643|issue:45020", + "issue:43782|issue:44960", + "issue:42491|issue:44368", + "issue:41762|issue:42915" + ], + "prepared_review_unit_hash": "c3bd6576ee565a6ea24f91305281adb5d5d4d2456ee8c821819beb26ea5ab8f6", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13331, + "estimated_input_tokens": 3333, + "estimated_eval_tokens": 6922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Only one soft pair looks like a true duplicate: the SigLIP2 text-embedding regression. Most other pairs share a subsystem but not the same concrete failure or code path.", + "confidence": 0.88, + "canonical_issue_reason": "issue:43493 is the clearest root-cause report for the SigLIP2 problem; issue:43054 reads like the user-facing symptom, while 43493 names the HF-vs-JAX implementation discrepancy directly.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43643 is the most actionable standalone issue in the set: it has a specific API surface (`AutoConfig.from_pretrained` with `trust_remote_code=True`) and a crisp, reproducible failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both are memory-related, but one is a LoRA/gradient-checkpointing correctness issue and the other is int4 quantization reserved-memory growth leading to OOM; different bugs." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different models and different failures (`glm-4v-9b` load failure vs Molmo missing `all_tied_weights_keys`); not the same underlying issue." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": true, + "reason": "Same SigLIP2 symptom space: 43054 reports worse text embeddings, and 43493 identifies the likely root cause as an HF vs JAX implementation discrepancy." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "44403 is about loading noise/warnings, while 44451 is a concrete model load failure; not the same bug." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Both mention Qwen3 MoE and memory, but one is about LoRA checkpointing behavior and the other about training memory inefficiency; too different to merge." + }, + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "Generic loading noise does not match a specific inability to load a model; different failure modes." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "Related to `trust_remote_code`, but 45020 is a broad regression report while 43643 is a specific missing-fields bug in `AutoConfig`; not enough evidence of the same concrete code-path." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different model families and different errors (`weight_only=True` load error vs a generic GLM5 issue); not the same underlying change." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "One is a compatibility break for a trained LoRA on hf4.x/hf5.x, the other is a warning about `tie_word_embeddings`; same ecosystem, but not the same bug." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Different models and different failure modes (`IndexError` under ZeRO-3 vs FineGrainedFP8Config failure); not a duplicate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 10, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43054", + "issue:43278", + "issue:43792", + "issue:43824", + "issue:43827", + "issue:43872", + "issue:43975", + "issue:44220", + "issue:44451", + "issue:44488", + "issue:44509", + "issue:44661", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45356", + "issue:45399" + ], + "soft_pairs": [ + "issue:43975|issue:44451", + "issue:45020|issue:45356", + "issue:43824|issue:43872", + "issue:44661|issue:45020", + "issue:43975|issue:44488", + "issue:43054|issue:43278", + "issue:44991|issue:45399", + "issue:43792|issue:44220", + "issue:45081|issue:45399", + "issue:43827|issue:44509" + ], + "prepared_review_unit_hash": "6ac239300b4ce6ad7c573470d7e15d7afead875758e1345a30beef9519e26699", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12932, + "estimated_input_tokens": 3233, + "estimated_eval_tokens": 6722 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated Transformers regression/issues around model loading, tokenizers, docs, and audio/vision paths. Only the two docs issues about removed pipeline task references look like true duplicates; the rest should stay separate.", + "confidence": 0.84, + "canonical_issue_reason": "issue:45020 is the broadest and most central open report in the set, covering recent-version regressions that break model loading via remote code, so it works best as the umbrella issue for this mixed cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the strongest global representative because it is broad, actively open, and describes the most general failure mode among the issues here.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "Different symptoms and model families: one is a detokenization bug for DeepSeek-Coder, the other is a tokenizer load failure for ScandiBERT." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Both concern regressions, but the concrete bugs differ: generic remote-code loading breakage vs. Kimi-K2.5 tokenizer codec / warning behavior." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "ImportError for a specific VL class and a bitsandbytes Int8Params constructor mismatch are unrelated failures in different subsystems." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "The first is an add-new-model-like tokenizer-mapping edge case; the second is a broader remote_code loading regression. Not the same bug." + }, + { + "left": "issue:43975", + "right": "issue:44488", + "accept": false, + "reason": "Both involve loading or output issues for different models, but the reported failures are not the same code path or symptom." + }, + { + "left": "issue:43054", + "right": "issue:43278", + "accept": false, + "reason": "A quality regression in SigLIP2 text embeddings is not the same as a BF16-to-FP32 dtype mismatch in evaluation." + }, + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Tokenizer loading for a specific model and flash-attn fallback selection are unrelated problems." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "The Whisper model runtime failure may involve fbank extraction, but the issue titles and descriptions indicate different concrete bugs and no clear shared fix." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Mistral regex patch crashes during tokenizer loading; the other issue is about flash-attn2 fallback being blocked by checks. Different paths, not duplicates." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Both report the same docs drift: references to removed v5 pipeline tasks (summarization/translation/text2text-generation) still appearing in documentation." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 11, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41553", + "issue:42915", + "issue:43257", + "issue:43404", + "issue:43742", + "issue:43854", + "issue:43866", + "issue:43872", + "issue:43940", + "issue:43950", + "issue:44291", + "issue:44488", + "issue:44863", + "issue:44991", + "issue:45020", + "issue:45305", + "issue:45313", + "issue:45399" + ], + "soft_pairs": [ + "issue:43404|issue:43950", + "issue:42915|issue:45020", + "issue:41553|issue:44991", + "issue:43257|issue:43866", + "issue:43742|issue:45399", + "issue:43742|issue:45020", + "issue:43872|issue:44291", + "issue:43940|issue:45313", + "issue:42915|issue:43872", + "issue:41553|issue:44488", + "issue:43854|issue:44863", + "issue:43257|issue:45305" + ], + "prepared_review_unit_hash": "ff1c2f92c0d259e8bb0e73ad84c6fb17446b5118b99a4c71aa1db4a161f95b0d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 14029, + "estimated_input_tokens": 3508, + "estimated_eval_tokens": 7272 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are separate model-loading/regression reports. The only strong duplicate match is the `_is_hf_initialized` / `init_empty_weights` TypeError pair; the rest are too model-specific or describe different failure modes.", + "confidence": 0.91, + "canonical_issue_reason": "issue:44291 is the clearest, most general report of the shared `_is_hf_initialized` regression and best represents the accepted duplicate pair.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44291 is the best global anchor because it states the underlying incompatibility most directly and broadly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "Both are loading-related, but one is about lm_head weight tying in Mistral3 while the other is a `from_pretrained()` buffer corruption regression; different bugs and code paths." + }, + { + "left": "issue:42915", + "right": "issue:45020", + "accept": false, + "reason": "Qwen3Moe FP8 failure vs. remote-code loading breakage are different problems, only loosely related by model loading." + }, + { + "left": "issue:41553", + "right": "issue:44991", + "accept": false, + "reason": "Both mention tokenizer loading, but they target different models and likely different tokenizer issues; not the same underlying bug." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "Qwen3 MOE weight conversion under accelerate+deepspeed is unrelated to a corrupted Ovis2 checkpoint." + }, + { + "left": "issue:43742", + "right": "issue:45399", + "accept": false, + "reason": "A MobileLLM key error and a flash-attn2 fallback check issue are distinct failures." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "MobileLLM key error is not the same as the broad remote_code regression." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": true, + "reason": "These describe the same `_is_hf_initialized` TypeError regression during model loading / `init_empty_weights`, with one being the bitsandbytes manifestation." + }, + { + "left": "issue:43940", + "right": "issue:45313", + "accept": false, + "reason": "Both are Qwen DeepSpeed ZeRO-3 loading failures, but the model variants and reported missing-weight symptoms differ too much to treat as the same concrete bug." + }, + { + "left": "issue:42915", + "right": "issue:43872", + "accept": false, + "reason": "FP8 config failure for Qwen3Moe is unrelated to the bitsandbytes `_is_hf_initialized` keyword incompatibility." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Different tokenizer-loading failures for different models; no evidence of the same root cause." + }, + { + "left": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "GLM-4.7-Flash test loading failure and NemotronH checkpoint loading failure are separate model-specific issues." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Weight conversion/loading under DeepSpeed is not the same as gradient averaging with GAS and `model_accepts_loss_kwargs=True`." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 12, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:36331", + "issue:41093", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43828", + "issue:43872", + "issue:44479", + "issue:44530", + "issue:44560", + "issue:44589", + "issue:44805", + "issue:44863", + "issue:44910", + "issue:44918", + "issue:45084" + ], + "soft_pairs": [ + "issue:36331|issue:41093", + "issue:41093|issue:44805", + "issue:44479|issue:44560", + "issue:44530|issue:44863", + "issue:43872|issue:45084", + "issue:43366|issue:43828", + "issue:24643|issue:30064", + "issue:43366|issue:43531", + "issue:43550|issue:44910", + "issue:44560|issue:44918", + "issue:43366|issue:44589" + ], + "prepared_review_unit_hash": "5d0924a5d684775dd7223fe15100e68bd9f93fbc61a1e2668242aae0dce121e8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13761, + "estimated_input_tokens": 3441, + "estimated_eval_tokens": 7138 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One likely duplicate pair stands out: the two generic mask/tensor shape-mismatch IndexError reports. The remaining items describe different models, error types, or subsystems and should stay separate.", + "confidence": 0.76, + "canonical_issue_reason": "issue:44805 is the best canonical issue for the accepted duplicate pair because it reports the same mask-vs-indexed-tensor shape mismatch as issue:41093 and provides the fuller, later report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44805 is the strongest representative issue in this set: it matches the duplicate mask-shape bug and has the most detailed report of that failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "Different failures: a Trainer API keyword-argument breakage vs a tensor mask shape mismatch. Not the same underlying bug." + }, + { + "left": "issue:41093", + "right": "issue:44805", + "accept": true, + "reason": "Same concrete error family: both report an IndexError from a mask shape not matching the indexed tensor shape, indicating the same underlying bug." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": false, + "reason": "Both involve video/Qwen models, but the reported regressions and error paths differ; too little evidence they are the same bug." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "Different subsystems and symptoms: PagedAttentionCache linear_attention crash vs inability to load NemotronH checkpoints." + }, + { + "left": "issue:43872", + "right": "issue:45084", + "accept": false, + "reason": "Unrelated errors: bitsandbytes constructor incompatibility vs a torch compile/template-node TypeError." + }, + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "No shared concrete code-path problem; one is GGUF architecture support, the other is a dtype mismatch under autocast." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "Completely different areas: DeepSpeed training weight-dimension error vs image processor segmentation-map handling." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "Both mention model support issues, but they concern different models and different failure modes." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different attention/compile failures on different models; not enough evidence of the same underlying code-path bug." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "One is a video StopIteration in Qwen3-vl-embedding, the other is an SFT trainer embedding-unpacking failure; distinct errors." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "Different problem classes: GGUF architecture support vs missing Float8 storage type." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 13, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:38175", + "issue:42617", + "issue:42915", + "issue:43366", + "issue:43638", + "issue:43645", + "issue:43646", + "issue:43824", + "issue:43950", + "issue:43957", + "issue:43994", + "issue:44560", + "issue:44661", + "issue:44805", + "issue:44877", + "issue:45030", + "issue:45325" + ], + "soft_pairs": [ + "issue:43645|issue:43646", + "issue:43950|issue:45325", + "issue:43824|issue:43957", + "issue:42915|issue:43957", + "issue:42617|issue:43366", + "issue:38175|issue:43994", + "issue:44560|issue:44805", + "issue:43646|issue:43950", + "issue:36010|issue:42915", + "issue:43638|issue:44661", + "issue:44877|issue:45030" + ], + "prepared_review_unit_hash": "fad29bbafc79ebadd92068896db66da9b982d88e689f972a53b631b7169a8877", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13806, + "estimated_input_tokens": 3452, + "estimated_eval_tokens": 7160 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous, but there are two clear duplicate-like issue pairs: the Transformers 5.0 custom model initialization regression, and the Siglip2 bad-output regression. A third pair about stricter config validation also looks likely to share the same underlying loading regression. The rest look unrelated.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43646 is the broadest and cleanest representative of the custom-model-initialization regression; issue:43645 is the narrower Jupyter-specific report.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue:43646 is the best overall representative among the cluster items because it states the core custom-model initialization breakage without extra environment-specific noise.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43645", + "right": "issue:43646", + "accept": true, + "reason": "Same underlying regression: Transformers 5.0 breaks custom model initialization; 43645 just adds the Jupyter notebook context." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Both involve model-loading behavior, but the symptoms and code paths differ: non-persistent buffer corruption vs. Qwen2.5-VL rope/position-id scaling." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Different failures: one is an import/export issue for a missing model class, the other is a meta-device loading regression for some models." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Both are loading-related, but the concrete problems differ too much: FineGrainedFP8Config failure vs. meta-device initialization breakage." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "Unrelated topics: a 3d_parallel.py runtime failure versus GGUF support for gpt-oss architecture." + }, + { + "left": "issue:38175", + "right": "issue:43994", + "accept": true, + "reason": "Same model-family regression with the same user-visible symptom class: Siglip2 produces invalid/garbled outputs, described as zero probabilities in one report and nonsensical results in the other." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Both are runtime errors, but they point to different multimodal tensor-shape issues and do not clearly share the same bug." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": false, + "reason": "These are separate regressions: custom model initialization in Transformers 5.0 versus silent corruption of non-persistent buffers during from_pretrained()." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "Different areas and failures: a GenerationMixin import error versus a FineGrainedFP8Config model-load failure." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different bug classes: a zero-sized tensor/indexing failure under deepspeed zero3 versus add-new-model-like failing inside TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": true, + "reason": "Both report the same strict config-validation regression preventing model config loading; the specific model names differ, but the underlying change is the same." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 14, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:42491", + "issue:43317", + "issue:43596", + "issue:43644", + "issue:43708", + "issue:43720", + "issue:44075", + "issue:44451", + "issue:44488", + "issue:44534", + "issue:44661", + "issue:44756", + "issue:44960", + "issue:44977", + "issue:45313", + "issue:45357" + ], + "soft_pairs": [ + "issue:43596|issue:44661", + "issue:43317|issue:44756", + "issue:43644|issue:44534", + "issue:38175|issue:39692", + "issue:45313|issue:45357", + "issue:44960|issue:44977", + "issue:44451|issue:44488", + "issue:43708|issue:44075", + "issue:42491|issue:43720" + ], + "prepared_review_unit_hash": "8021c8db19b0c4d0f9358d4cc7ad563c29e3dc9bc13454a27b53ac6cb3ee1d2a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13002, + "estimated_input_tokens": 3251, + "estimated_eval_tokens": 6758 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One clear duplicate pair is the Transformers v5 non-persistent buffer regression; the other soft pairs are only loosely related by subsystem or model family and should stay separate.", + "confidence": 0.88, + "canonical_issue_reason": "issue:43644 is the better canonical issue for the duplicate pair: it is older and more specific about the regression ('v5.0.0 fills non-persistent buffers with junk'), while issue:44534 is a later rewording of the same bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43644 is the strongest issue candidate overall because it describes a concrete, reproducible regression and has a clear duplicate counterpart, making it the best anchor for triage.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "Different bugs: one is a DeepSpeed ZeRO-3/BertModel indexing failure, the other is an add-new-model-like tokenizer mapping failure." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "Both mention loading/offload pain, but the concrete problems differ: dequantized model gpu+cpu offload failure vs Strix Halo mmap OOM avoidance." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": true, + "reason": "Same underlying regression in Transformers v5 about non-persistent buffers being filled with junk; titles are near-paraphrases of the same bug." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "SigLIP2 model probability issue is not the same as a documentation-example mismatch and quantization failure; different concrete failures." + }, + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Different code paths and symptoms: ZeRO-3 weight loading for language_model vs save_pretrained writing incorrect visual encoder keys." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Different models and failures; no evidence they share the same concrete bug or change." + }, + { + "left": "issue:44451", + "right": "issue:44488", + "accept": false, + "reason": "Both are BERT load failures, but they concern different models and there is no clear evidence of the same underlying defect." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Unrelated training-resume step-count bug vs optimizer SGD arguments not being applied." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "Different regressions in different areas: Qwen3 MoE LoRA compatibility vs BitNet packed-weight loading." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 15, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:41720", + "issue:42491", + "issue:42915", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:43994", + "issue:44387", + "issue:44488", + "issue:44661", + "issue:44683", + "issue:44756", + "issue:44991", + "issue:45276", + "issue:45362", + "issue:45406" + ], + "soft_pairs": [ + "issue:43856|issue:43873", + "issue:41720|issue:42915", + "issue:43854|issue:43931", + "issue:44488|issue:44991", + "issue:39692|issue:43994", + "issue:43856|issue:44387", + "issue:45362|issue:45406", + "issue:44661|issue:44683", + "issue:42491|issue:45276", + "issue:42915|issue:44756" + ], + "prepared_review_unit_hash": "9fe0264396228b40520412c4b14d56a288d1f28a4aa25642843bfa00e8167148", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13311, + "estimated_input_tokens": 3328, + "estimated_eval_tokens": 6912 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated models, loading paths, quantization/offloading, tokenizer registration, and UI/serve code. None of the soft pairs looks like the same underlying bug, so no duplicate group stands out.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits this cluster because the items are about different models and different failure modes, not one shared bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global canonical candidate; the cluster is too mixed for one representative issue to cover it well.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "Both mention quantization/memory, but one is Qwen3 MoE training memory use and the other is offloading behavior during quantization; different bugs and code paths." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Both involve Qwen3 family failures, but one is auto device mapping cudaErrorAssert and the other is FineGrainedFP8Config loading; not the same concrete issue." + }, + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading problems, but they affect different models with different symptoms (unit-test load failure vs weight-shape mismatch)." + }, + { + "left": "issue:44488", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-load failures, but the affected models differ and there is no clear evidence of one shared underlying tokenizer bug." + }, + { + "left": "issue:39692", + "right": "issue:43994", + "accept": false, + "reason": "Both are SigLIP2-related, but one is a docs example with model/processor mismatch plus quantization failure, while the other reports nonsensical inference results in AutoModel/pipeline usage." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "Both discuss memory under quantization, but one is offloading behavior and the other is increased CUDA reserved memory causing OOM; not the same fix." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different models and different surfaces: Qwen3.5 chat crashes versus Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Completely unrelated: tokenizer-mapping registration bug versus compiled flex_attention failing on torch >= 2.9." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Different models and unrelated behaviors: Qwen3 MoE LoRA compatibility across versions versus Gemma4 token embedding resize not propagating." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "Qwen3Moe FP8 loading failure and Strix Halo mmap OOM are unrelated issues with no shared code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 16, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:42491", + "issue:43257", + "issue:43404", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43854", + "issue:43856", + "issue:43873", + "issue:43931", + "issue:44164", + "issue:44291", + "issue:44387", + "issue:45276", + "issue:45335" + ], + "soft_pairs": [ + "issue:43257|issue:43854", + "issue:43644|issue:43645", + "issue:42491|issue:43931", + "issue:45276|issue:45335", + "issue:42491|issue:43856", + "issue:43404|issue:43454", + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:43873|issue:44387" + ], + "prepared_review_unit_hash": "c3024abf2eaf56089aeb1469a11b0a75d084357997f67f4096630ea22ada8e0f", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12588, + "estimated_input_tokens": 3147, + "estimated_eval_tokens": 6550 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly a bag of unrelated Transformers issues. None of the soft pairs look like clear duplicates with the same concrete bug, though the Gemma/t5Gemma resize-token-embeddings pair is the closest match.", + "confidence": 0.67, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole set; the items span several unrelated bugs. If forced to pick the closest anchor, issue #45276 is the strongest representative of the most similar pair.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45276 is the best global issue candidate because it sits in the strongest near-duplicate pair and has the broadest wording of the resize_token_embeddings regression.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Both are Qwen/GLM model-loading problems, but they concern different models and different failure modes; not the same underlying bug." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "One is about non-persistent buffers getting junk values, the other about custom model initialization in Jupyter; related release regressions but distinct code paths." + }, + { + "left": "issue:42491", + "right": "issue:43931", + "accept": false, + "reason": "Different symptoms and targets: a LoRA compatibility break on hf5.x vs a weight-shape mismatch for a specific Qwen3-VL model." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": false, + "reason": "Both mention resize_token_embeddings, but they target different model architectures and different missing updates; not clearly one duplicate bug." + }, + { + "left": "issue:42491", + "right": "issue:43856", + "accept": false, + "reason": "Both involve Qwen3 MoE, but one is a cross-version LoRA usability issue and the other is a training memory-efficiency issue." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": false, + "reason": "Both are about lm_head tying in multimodal models, but they are different model families and likely separate implementation fixes." + }, + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "An import path break for GenerationMixin is unrelated to Whisper pipeline return_language behavior." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "Both touch save/load initialization, but extra_state handling and unexpected _is_hf_initialized argument are different failures." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Both concern quantization memory/offloading, but one is offloading malfunction and the other is increased reserved CUDA memory/OOM; different concrete bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 17, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:41720", + "issue:42947", + "issue:43381", + "issue:43425", + "issue:43854", + "issue:43957", + "issue:44155", + "issue:44291", + "issue:44292", + "issue:44387", + "issue:44402", + "issue:44488", + "issue:44756", + "issue:44912", + "issue:44960", + "issue:45216" + ], + "soft_pairs": [ + "issue:44402|issue:44488", + "issue:41720|issue:44155", + "issue:44387|issue:44756", + "issue:39692|issue:44960", + "issue:43425|issue:43957", + "issue:42947|issue:43381", + "issue:43854|issue:45216", + "issue:44292|issue:44912", + "issue:44291|issue:44756" + ], + "prepared_review_unit_hash": "7ece9605aa9003cc6f75ca36360af41156f01681905054d055e19675dc27bbbc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12485, + "estimated_input_tokens": 3122, + "estimated_eval_tokens": 6500 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a mix of unrelated issues across loading, quantization, checkpointing, and model-specific regressions. None of the soft pairs look like the same underlying bug/change, so I would not merge any of them.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue: the items span distinct bugs and regressions, so there is no true duplicate anchor for the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "If one standalone report had to be kept as the representative, issue 44387 is the strongest candidate because it is concrete and well-scoped; however, it does not subsume the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Both concern model loading failures, but one is specifically about tokenizer vocab inconsistencies while the other is a separate model-load report; the root cause is not clearly the same." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "Completely different problems: Qwen3 auto device mapping CUDA assert vs AudioFlamingo3 batched inference token/embedding leakage." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "Int4 quantization OOM is unrelated to disabling mmap on Strix Halo; different failure modes and different triggers." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "SigLIP2 docs/model-processor mismatch and quantization failure is unrelated to a GLM5 issue." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Both mention loading/compatibility, but one is a torch version incompatibility report and the other is a meta-device loading regression for specific models; not the same bug." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Both involve gradient checkpointing, but one is about PEFT LoRA not enabling it effectively and the other is about checkpointing being invalid in eval mode; separate issues." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "Different models and different regressions: GLM-4.7-Flash unit-test loading vs Qwen3.5 save_pretrained checkpoint correctness." + }, + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Both are quantization-related model-loading reports, but they affect different models and different quantization paths (NVFP4 vs MXFP4 fallback)." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "init_empty_weights argument mismatch is unrelated to mmap-induced OOM on Strix Halo; different code paths and symptoms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 18, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43381", + "issue:43645", + "issue:43646", + "issue:43708", + "issue:43957", + "issue:44164", + "issue:44402", + "issue:44451", + "issue:44479", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44928", + "issue:44960", + "issue:45276", + "issue:45292", + "issue:45335" + ], + "soft_pairs": [ + "issue:45276|issue:45292", + "issue:44743|issue:44960", + "issue:43957|issue:44756", + "issue:43381|issue:44928", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:44479|issue:44877", + "issue:45292|issue:45335", + "issue:43645|issue:44164", + "issue:44402|issue:44451" + ], + "prepared_review_unit_hash": "27f4dec53feec6e06ee90a3bb34d9afaef71d7a0e627c212fe91e110cec1ea4d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12814, + "estimated_input_tokens": 3204, + "estimated_eval_tokens": 6664 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The only strong duplicate family here is the resize_token_embeddings/output_embeddings regression around Gemma/T5Gemma. The other soft pairs look like unrelated bugs in different subsystems and should stay separate.", + "confidence": 0.79, + "canonical_issue_reason": "issue 45292 is the broadest clean statement of the resize_token_embeddings/output_embeddings bug and best represents the related Gemma/T5Gemma reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue 45292 is the best overall representative because it captures the shared resizing bug without being too model-specific, while still covering the related variants.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "Same underlying resize_token_embeddings failure: both report output_embeddings not being updated, with 45276 just adding a Gemma4-specific manifestation." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "Different bugs: one is recurrent state reset with cache in modular_qwen3_5.py, the other is a GLM5 issue with no clear code-path overlap." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "Different problem domains: meta-device model loading failure versus mmap/OOM behavior on Strix Halo." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Unrelated failures: gradient checkpointing in eval mode versus RLHF gradient explosion from 3D position_ids/SDPA fallback." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "save/from_pretrained extra_state handling is unrelated to disabling mmap to avoid OOM." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom model initialization in notebooks is unrelated to Trainer resume_from_checkpoint max_steps calculation." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Different regressions in different areas: Qwen VL video input versus strict config loading for granite_speech." + }, + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Same resize_token_embeddings bug family; 45335 is a model-specific manifestation where decoder.embed_tokens is not updated." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "Notebook custom-model initialization is not the same bug as extra_state save/load handling." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Too little evidence they share the same concrete bug; both mention loading/tokenizer problems, but the affected models and failure modes are different." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 19, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39404", + "issue:43257", + "issue:43425", + "issue:43611", + "issue:43645", + "issue:43716", + "issue:43828", + "issue:43906", + "issue:43931", + "issue:43957", + "issue:44265", + "issue:44530", + "issue:44589", + "issue:44756", + "issue:44863", + "issue:44898", + "issue:44912", + "issue:45084" + ], + "soft_pairs": [ + "issue:43425|issue:43828", + "issue:43257|issue:44863", + "issue:43828|issue:43957", + "issue:44589|issue:44756", + "issue:43906|issue:44530", + "issue:43425|issue:43716", + "issue:43931|issue:44863", + "issue:39404|issue:44912", + "issue:43611|issue:43645", + "issue:44530|issue:45084", + "issue:44265|issue:44898" + ], + "prepared_review_unit_hash": "ba265471f02343c7ba5146bb40111b0550faf23faf4cc672b01c06977ececb9f", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13664, + "estimated_input_tokens": 3416, + "estimated_eval_tokens": 7088 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated model-loading, dtype/quantization, cache, and compilation bugs. None of the soft pairs look like true duplicates, so all soft edges are rejected.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "Different bugs: one is a broad Torch 2.10 compatibility report, the other is an autocast dtype-mismatch on Phi-tiny-MoE." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Different model families and failure modes: Qwen3 MoE conversion/loading with accelerate+deepspeed vs NemotronH checkpoint loading." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Both mention dtype/loading breakage, but the concrete paths differ: autocast on Phi-tiny-MoE vs meta-device model loading regressions." + }, + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Unrelated symptoms: missing Float8 storage vs mmap/OOM on Strix Halo." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "The first is an isolated repro of another issue; the second is a PagedAttentionCache linear_attention group-type crash on Qwen3.5. Different code paths and models." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "General Torch version incompatibility is not the same as a Mistral-3 image-preprocessor/model dtype mismatch." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Different loading failures for different architectures: Qwen3-VL weight-shape mismatch vs NemotronH checkpoint incompatibility." + }, + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline return_language regression is unrelated to MXFP4 quantization fallback on git-oss-20b." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both are Transformers 5.0 regressions, but one concerns base_model_prefix loading and the other notebook-based custom model initialization; not the same concrete bug." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "PagedAttentionCache group-type crash and a template-node compilation error are different failures in different subsystems." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "torch.export.export failures with torch_compilable_check are unrelated to Perceiver interpolation failures at non-default resolution." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 20, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:40990", + "issue:42947", + "issue:43299", + "issue:43425", + "issue:43596", + "issue:43856", + "issue:43901", + "issue:43931", + "issue:43994", + "issue:44079", + "issue:44479", + "issue:44509", + "issue:44589", + "issue:44683", + "issue:44756", + "issue:44805", + "issue:44918", + "issue:45084" + ], + "soft_pairs": [ + "issue:43901|issue:44509", + "issue:43299|issue:43931", + "issue:44479|issue:44805", + "issue:44589|issue:45084", + "issue:43596|issue:44683", + "issue:43425|issue:44918", + "issue:43856|issue:44756", + "issue:40990|issue:42947", + "issue:43994|issue:44079" + ], + "prepared_review_unit_hash": "0a4a74facb6020f8a5f16d05512d5acc2a0db91a7e83abbb6292bc152a5ed388", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13037, + "estimated_input_tokens": 3260, + "estimated_eval_tokens": 6776 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster does not look like a duplicate set: the items cover unrelated bugs and docs regressions across loading, training, memory, storage, and compiler compatibility. None of the soft pairs appear to be the same underlying issue/change.", + "confidence": 0.88, + "canonical_issue_reason": "No clear canonical duplicate exists because the issues are heterogeneous. If a representative issue must be chosen, #43931 is the most concrete and reproducible model-loading report.", + "canonical_pr_reason": null, + "best_issue_reason": "#43931 is the strongest representative: it has a specific model name, a clear loading failure, and a well-scoped error signature, making it easier to triage than the broader or more ambiguous reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Both are docs-related, but they refer to different pipeline API/docs regressions and do not describe the same change." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Both involve Qwen3-VL loading, but one is a MoE-loading breakage and the other is a weight-shape mismatch on a different model variant; not enough to treat as the same bug." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "Both mention shape-related failures in multimodal inputs, but one is a video-input regression across several models and the other is a specific mask/tensor indexing mismatch." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "These are unrelated failure modes: missing Float8 storage vs compilation of non-template nodes." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "Different subsystems and error classes: ZeRO-3/Bert indexing vs flex_attention compilation on newer Torch." + }, + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Torch version incompatibility is not the same bug as TRL SFT input-embedding unpacking failure." + }, + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/OOM, but they are different causes in different areas: Qwen3 MoE training memory inefficiency vs Strix Halo mmap OOM." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "Unrelated topics: perplexity on a model/eval setup vs gradient checkpointing ineffectiveness with PEFT LoRA." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "A SigLIP2 inference problem and a ModelOutput key-assignment bug are not the same concrete issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 21, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:32090", + "issue:36032", + "issue:39404", + "issue:40990", + "issue:43421", + "issue:43425", + "issue:43452", + "issue:43582", + "issue:43632", + "issue:43883", + "issue:43957", + "issue:44164", + "issue:44479", + "issue:44488", + "issue:44671", + "issue:44991", + "issue:45081", + "issue:45161" + ], + "soft_pairs": [ + "issue:44671|issue:44991", + "issue:39404|issue:44671", + "issue:36032|issue:43452", + "issue:44991|issue:45081", + "issue:43582|issue:43632", + "issue:32090|issue:39404", + "issue:44488|issue:45081", + "issue:43425|issue:45161", + "issue:43957|issue:44164", + "issue:43883|issue:44488", + "issue:40990|issue:43421", + "issue:44164|issue:44479" + ], + "prepared_review_unit_hash": "a9c5a5dbf39934a3c132c6cb044d5e0edd9d5df12813ea7d2aed8d48e185a5fa", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 14087, + "estimated_input_tokens": 3522, + "estimated_eval_tokens": 7300 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are broadly related to Transformers v5 regressions, mostly around tokenizers/model loading, but the paired issues describe different models, symptoms, and code paths. None of the soft edges look like true duplicates.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "No single issue cleanly represents the cluster because it is heterogeneous; if forced, issue 44991 is the closest broad tokenizer-loading regression, but it is still too specific to serve as a real canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "Both mention v5 model behavior, but one is CamemBERT MLM prediction correctness and the other is tokenizer loading for a specific model; different bugs." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper pipeline return_language regression is unrelated to CamemBERT masked-LM prediction errors." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "T5 tokenizer add_special_tokens conflict and gguf_file/from_pretrained breakage are different tokenizer-loading failures with different triggers." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "est-roberta tokenizer loading failure is not the same as the Mistral regex patch crash; different models and failure points." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "Apple Silicon caching_allocator_warmup TypeError is a runtime/device-specific error, not the same as the _is_hf_initialized v5 flag regression." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer GPU broadcast NoneType error is unrelated to Whisper pipeline return_language behavior." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Unable to load cjvt/sleng-bert is a tokenizer/model loading issue, but not the same as the Mistral backend_tokenizer AttributeError." + }, + { + "left": "issue:43425", + "right": "issue:45161", + "accept": false, + "reason": "Torch 2.10 incompatibility is an environment/version issue, not the same as GPT-OSS MoE tensor-parallel failure." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "meta-device loading breakage and save/from_pretrained extra_state handling involve different mechanisms and errors." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Molmo missing all_tied_weights_keys and sleng-bert loading failure are distinct model-specific issues." + }, + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "High perplexity on gpt-oss-20b and runtime post-processor update behavior are unrelated problems." + }, + { + "left": "issue:44164", + "right": "issue:44479", + "accept": false, + "reason": "extra_state save/load handling and Qwen video input regression affect different subsystems and code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 22, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34567", + "issue:40444", + "issue:42947", + "issue:43541", + "issue:43611", + "issue:43643", + "issue:43644", + "issue:43646", + "issue:43716", + "issue:43883", + "issue:44206", + "issue:44220", + "issue:44488", + "issue:44530", + "issue:44756", + "issue:44987", + "issue:44991", + "issue:45084" + ], + "soft_pairs": [ + "issue:42947|issue:44756", + "issue:43716|issue:45084", + "issue:44488|issue:44987", + "issue:43611|issue:43644", + "issue:44206|issue:44220", + "issue:43883|issue:44991", + "issue:34567|issue:40444", + "issue:43541|issue:45084", + "issue:43644|issue:43646", + "issue:43643|issue:44530" + ], + "prepared_review_unit_hash": "8936f938395211e2636331c041a32e66c6f7e281d388e87a5dda36941f001b45", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13314, + "estimated_input_tokens": 3329, + "estimated_eval_tokens": 6914 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous. Several pairs are only loosely related by subsystem or release version, but none clearly appear to be the same concrete bug/change. I rejected all soft edges; the closest one is the audio feature-extraction pair, but the evidence is still too weak.", + "confidence": 0.79, + "canonical_issue_reason": "Issue 43611 is the clearest, broadest standalone regression report in the set, centered on model loading in Transformers 5.0.0.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43611 is the most representative and well-scoped issue here if one must pick an anchor, though the overall set is not a true duplicate cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42947", + "right": "issue:44756", + "accept": false, + "reason": "Different bugs: one is about `num_input_tokens_seen`/training accounting, the other about mmap/OOM on Strix Halo." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Different failure modes in different code paths: image preprocessor dtype mismatch vs template compilation error." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Both are loading failures, but for different models and likely different causes; no shared concrete bug is evident." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Both are Transformers 5.0.0 regressions, but one is about `base_model_prefix` loading and the other about non-persistent buffers being initialized incorrectly." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": false, + "reason": "Both involve audio feature extraction, but one is specifically about an unsupported `center` argument in `LasrFeatureExtractor`; the other is a broader `_torch_extract_fbank_features()` issue. Not enough to treat as the same bug." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated model/tokenizer loading issues affecting different models and components." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "Completely different areas: trainer token accounting vs multimodal IterableDataset failure." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Different problems: torch dynamo tracing on Mixtral grouped_mm vs template compilation error." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Related to Transformers 5.0.0 initialization/loading, but the symptoms and likely root causes differ." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "Different subsystems: AutoConfig fields with `trust_remote_code` vs PagedAttentionCache linear_attention failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 23, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36032", + "issue:39404", + "issue:42915", + "issue:43262", + "issue:43317", + "issue:43377", + "issue:43404", + "issue:43452", + "issue:43482", + "issue:43645", + "issue:43825", + "issue:44451", + "issue:44466", + "issue:44530", + "issue:44849", + "issue:44987", + "issue:44991", + "issue:45325" + ], + "soft_pairs": [ + "issue:44451|issue:44987", + "issue:43317|issue:43482", + "issue:36032|issue:42915", + "issue:43452|issue:43482", + "issue:43262|issue:43377", + "issue:39404|issue:43825", + "issue:42915|issue:44530", + "issue:44849|issue:45325", + "issue:36032|issue:43645", + "issue:44987|issue:44991", + "issue:43404|issue:44466" + ], + "prepared_review_unit_hash": "e6f31c14914534f0097182d5ed8395b6baa008044507d0a58515067da0e5e51e", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13897, + "estimated_input_tokens": 3475, + "estimated_eval_tokens": 7206 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a grab bag of unrelated Transformers regressions across tokenizers, Whisper/pipeline behavior, audio processors, GGUF loading, tied weights, Qwen variants, and custom-model notebook init. None of the soft pairs look like the same concrete bug or change, so all are rejected; there are no PRs here.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44451", + "right": "issue:44987", + "accept": false, + "reason": "Both are model-loading failures in v5, but they affect different checkpoints and likely different failure points; not enough evidence of the same underlying bug." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "One is a dequantized model offload/device_map issue, the other is a GGUF loading failure for Qwen2.5; different code paths and symptoms." + }, + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "T5 tokenizer import/attribute conflict is unrelated to Qwen3Moe + FineGrainedFP8Config loading failure." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": false, + "reason": "Both mention GGUF, but one is a generic `gguf_file` API break in auto loaders while the other is a Qwen2.5-specific v5 loading failure; not the same concrete bug." + }, + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Audio chat-template sampling-rate defaulting and MIMI batch-vs-single padding-mask mismatch are different bugs in different components." + }, + { + "left": "issue:39404", + "right": "issue:43825", + "accept": false, + "reason": "Whisper `return_language` regression and pipeline translation-task error messaging are distinct pipeline issues." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "Different models and different failures: FP8 config loading vs PagedAttentionCache linear_attention crash." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Qwen3.5 hidden-state bug and Qwen2.5-VL rope-index scaling bug are unrelated model-specific defects." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "Tokenizer method conflict in T5 is unrelated to custom model definition/initialization breakage in Jupyter notebooks." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Both are v5 loading regressions, but they involve different models and likely different tokenizer/model-loading failures; too broad to merge." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "One is a missing `lm_head` tie in Mistral3ForConditionalGeneration, the other is device-dependent serialization of tied weights; related theme, but not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 24, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41553", + "issue:43322", + "issue:43377", + "issue:43540", + "issue:43582", + "issue:43645", + "issue:43646", + "issue:43819", + "issue:43994", + "issue:44220", + "issue:44403", + "issue:44451", + "issue:44534", + "issue:44610", + "issue:44683", + "issue:45081", + "issue:45216", + "issue:45357" + ], + "soft_pairs": [ + "issue:43646|issue:44534", + "issue:44403|issue:45081", + "issue:43994|issue:44610", + "issue:43645|issue:44534", + "issue:43377|issue:43819", + "issue:44451|issue:45081", + "issue:43322|issue:43540", + "issue:44220|issue:44683", + "issue:45216|issue:45357", + "issue:41553|issue:43582" + ], + "prepared_review_unit_hash": "2e87fbc4e1bda6df2a75db829113f4b637913be5178851cf5bb7596aa990c863", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13333, + "estimated_input_tokens": 3334, + "estimated_eval_tokens": 6924 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most pairs are clearly unrelated despite superficial theme overlap. The only likely duplicate-style match is the Qwen3.5 `save_pretrained` regression pair, where the second issue looks like a continuation of the same incorrect visual-encoder-key saving bug.", + "confidence": 0.89, + "canonical_issue_reason": "issue:45216 is the better canonical issue: it is the earlier and broader report of the Qwen3.5 `save_pretrained` regression, and issue:45357 reads like a follow-up continuation of the same serialization bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45216 is the strongest issue to anchor the cluster because it describes the core regression more generally and has follow-up references, while issue:45357 is narrower and version-specific.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Different failure modes: custom model init in notebooks vs non-persistent buffers being corrupted. Same version surface, but not the same underlying bug." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "One is a loading noise/logging complaint, the other is an AttributeError crash in Mistral regex patching. Not the same code-path problem." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "Siglip2 inference correctness and OmDet-Turbo image sizing are unrelated model/processor bugs." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Notebook custom-model initialization regression is distinct from the buffer initialization/junk data issue." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "MIMI padding-mask batching mismatch and DAC `from_latents`/STE mismatch concern different models and different invariants." + }, + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "ScandiBERT loading failure and Mistral tokenizer regex patch crash are separate tokenizer/model issues." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Llava-Next segfault and Qwen3OmniMoe video ValueError are different multimodal pipelines and different symptom classes." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Audio feature extraction failure and compiled flex_attention torch-compatibility failure are unrelated subsystems." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Both report the same Qwen3.5 `save_pretrained` serialization bug, specifically incorrect visual encoder keys; the second looks like a continuation of the same regression." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "AutoTokenizer Voxtral error messaging and Apple Silicon allocator warmup TypeError are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 25, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:40990", + "issue:42915", + "issue:43296", + "issue:43404", + "issue:43531", + "issue:43632", + "issue:44062", + "issue:44265", + "issue:44291", + "issue:44479", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44987", + "issue:45081", + "issue:45084", + "issue:45127", + "issue:45325" + ], + "soft_pairs": [ + "issue:42915|issue:45084", + "issue:44265|issue:44610", + "issue:43632|issue:44756", + "issue:44987|issue:45081", + "issue:43296|issue:43531", + "issue:43404|issue:45127", + "issue:44062|issue:44987", + "issue:44479|issue:45325", + "issue:40990|issue:44811", + "issue:42915|issue:44291" + ], + "prepared_review_unit_hash": "24b857bc7f851cd6217d09835e10eddd7fc8153660660f264aa36681bb3ae3fe", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13579, + "estimated_input_tokens": 3395, + "estimated_eval_tokens": 7046 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly distinct regression reports across different models and code paths; no soft pair looks like a true duplicate, so all candidate merges are rejected.", + "confidence": 0.95, + "canonical_issue_reason": "Issue 44291 is the closest thing to a cluster anchor because it describes a broad v5 loading/init regression around `_is_hf_initialized`, which is the most general theme among the loading-related reports.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44291 is the best representative issue: broad, root-cause-like, and centered on a core transformers v5 loading failure rather than a model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:45084", + "accept": false, + "reason": "Different failures and subsystems: Qwen3Moe FP8 config vs a template compilation TypeError." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "Unrelated bugs: torch.export with torch_compilable_check vs OmDet-Turbo image size mismatch." + }, + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Different code paths and symptoms: `_is_hf_initialized` regression vs mmap/OOM behavior on Strix Halo." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Both are loading-related, but one is a model load failure and the other is a Mistral tokenizer regex crash; not the same bug." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "Different models and defects: PaddleOCR-VL load failure vs Qwen3-MoE sliding window behavior." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "Both mention tied weights, but one is an untied lm_head bug and the other is LoRA merge collapse with extended vocab; not the same concrete issue." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "Generic loading regressions, but the concrete errors and affected code paths are different." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Same broad VL area, but one is a video-input regression and the other is rope-index scaling for still images." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Completely unrelated: perplexity evaluation on gpt-oss-20b vs Whisper batch_decode special-token handling." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "Both are model-loading regressions, but the concrete failures differ: FP8 config vs unexpected `_is_hf_initialized` argument." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 26, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36032", + "issue:40444", + "issue:42915", + "issue:43317", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43742", + "issue:43856", + "issue:44534", + "issue:44589", + "issue:44683", + "issue:44821", + "issue:44877", + "issue:45020", + "issue:45084", + "issue:45325" + ], + "soft_pairs": [ + "issue:43317|issue:43856", + "issue:42915|issue:44589", + "issue:36032|issue:43742", + "issue:43632|issue:44534", + "issue:43611|issue:44534", + "issue:40444|issue:45325", + "issue:43531|issue:44877", + "issue:44821|issue:45084", + "issue:44683|issue:45020" + ], + "prepared_review_unit_hash": "e88a022b2baa47183d59438968480cc63a533008efee82749ba6464a0e156ef8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12330, + "estimated_input_tokens": 3083, + "estimated_eval_tokens": 6422 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: it mixes many unrelated Transformers regressions and model-specific bugs (tokenizer loading, Qwen/Qwen2.5-VL issues, v5 loading changes, config/loading edge cases).", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the broadest umbrella item in the set and the most plausible representative if one issue must be chosen, but it still does not duplicate the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "Both involve Qwen3/MoE and memory/offload, but one is a dequantized model loading failure with device_map offload and the other is training memory usage; different bugs and code paths." + }, + { + "left": "issue:42915", + "right": "issue:44589", + "accept": false, + "reason": "Qwen3MoE + FP8 appears in both, but one is a FineGrainedFP8Config failure and the other is a missing Float8 storage object error; these are distinct failures." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "T5 tokenizer special-token conflict and MobileLLM-125M loading key error are unrelated model-loading problems." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 regressions, but one is about `_is_hf_initialized` and the other about non-persistent buffers being filled with junk; not the same bug." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "Loading via `base_model_prefix` and non-persistent buffer initialization are different regression surfaces and not plausibly one merged fix." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "Both touch Qwen2.5-VL, but one is an IterableDataset/multi-image finetuning failure and the other is a rope index bug for still-image temporal positions; separate issues." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Qwen3-MoE sliding-window behavior and granite_speech strict-config loading are unrelated." + }, + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "AutoImageProcessor URL loading and a non-template-node compile error are unrelated subsystems and failure modes." + }, + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "Compiled flex_attention on torch >= 2.9 and remote_code model breakage are different compatibility regressions with different code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 27, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:41720", + "issue:43377", + "issue:43421", + "issue:43425", + "issue:43792", + "issue:43818", + "issue:44403", + "issue:44568", + "issue:44589", + "issue:44829", + "issue:44991", + "issue:45357", + "issue:45479", + "issue:45491" + ], + "soft_pairs": [ + "issue:43421|issue:44568", + "issue:43818|issue:45357", + "issue:43425|issue:44403", + "issue:43792|issue:44589", + "issue:36032|issue:41720", + "issue:44403|issue:44991", + "issue:44829|issue:45479", + "issue:43377|issue:45491", + "issue:33666|issue:40444", + "issue:35707|issue:40444" + ], + "prepared_review_unit_hash": "a02d8785ed8eeb2c864a56a7047101c6837882a40e030a72a922cccff9a783f6", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13618, + "estimated_input_tokens": 3405, + "estimated_eval_tokens": 7066 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a duplicate cluster; they span unrelated bugs/features across tokenizers, vision models, training, loading, and runtime compatibility. All soft-similarity pairs look like false positives.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue: the issues cover distinct subsystems and failure modes, so none is a clear duplicate anchor for the group.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44403 is the broadest/general loading-related report, but only as a loose representative; it is not a true canonical duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer-related, but one is about runtime post-processor updates and the other is a specific BOS/EOS regression in mdeberta-v3-base; different bugs and fixes." + }, + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen/VL problems: missing temporal attention and weight sharing vs incorrect visual encoder keys on save_pretrained." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Torch 2.10 incompatibility is an environment/version issue, while 44403 is about noisy loading logs; not the same bug." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper failing to run and missing Float8 storage are unrelated failure modes with different code paths." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "T5Tokenizer constructor conflict and Qwen3 auto device-map cudaErrorAssert are unrelated issues." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Both touch loading, but one is benign log noise and the other is a concrete tokenizer loading failure for a specific model." + }, + { + "left": "issue:44829", + "right": "issue:45479", + "accept": false, + "reason": "Both concern degenerate sequence-classification behavior, but the triggers differ: flash_attention_3 vs problem_type/num_labels loss configuration." + }, + { + "left": "issue:43377", + "right": "issue:45491", + "accept": false, + "reason": "Both involve batched/masked sequence behavior, but they affect different models and mechanisms: MIMI padding mask vs Gemma3 sliding-window all-padding windows." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Both are Qwen2-VL fine-tuning reports, but one is multi-GPU training and the other is iterable-dataset/multiple-images failure; not the same bug." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to Qwen2.5-VL iterable-dataset multi-image failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 28, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:42175", + "issue:43296", + "issue:43482", + "issue:43525", + "issue:43531", + "issue:43632", + "issue:43644", + "issue:43749", + "issue:43881", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44488", + "issue:44534", + "issue:44589", + "issue:45072", + "issue:45081" + ], + "soft_pairs": [ + "issue:44079|issue:44534", + "issue:44291|issue:44589", + "issue:43482|issue:43525", + "issue:43749|issue:44079", + "issue:43881|issue:44488", + "issue:43644|issue:44079", + "issue:44079|issue:45072", + "issue:43632|issue:45072", + "issue:43531|issue:44291", + "issue:42175|issue:43296", + "issue:44265|issue:45072", + "issue:41720|issue:45081" + ], + "prepared_review_unit_hash": "4c1f5f1c3305431cc0d0ec5977a470568ea0cde6db2e1d557525e1ce04be6d15", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 13941, + "estimated_input_tokens": 3486, + "estimated_eval_tokens": 7228 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated Transformers/vLLM bug reports. The only clear duplicate-looking pair is the non-persistent buffers regression (issues 43644 and 44534), while the provided soft-edge candidates are all different bugs or different model-specific failures and should not be merged.", + "confidence": 0.86, + "canonical_issue_reason": "Issue 43644 is the strongest canonical issue: it is the original, clearer report of the non-persistent buffers regression and has more discussion/inbound attention than the later reworded duplicate 44534.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43644 is the best representative issue for the cluster because it captures the clearest duplicated bug and has the most established thread.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 regressions, but they describe different failures: ModelOutput key assignment vs non-persistent buffer initialization. Different code paths." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "Both mention loading/type errors under v5, but one is about the unexpected _is_hf_initialized argument in init_empty_weights and the other about missing Float8 storage support. Not the same bug." + }, + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Different model-specific loading problems: Qwen2.5-GGUF with Transformers v5 versus Llama4Config missing pad_token_id. Same area, different defects." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "FSDP CPU-efficient loading failure and ModelOutput key assignment failure are unrelated regressions." + }, + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "Different models and symptoms; these are separate loading failures, not the same underlying bug." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Non-persistent buffer junk initialization is unrelated to ModelOutput key handling." + }, + { + "left": "issue:44079", + "right": "issue:45072", + "accept": false, + "reason": "ModelOutput key assignment and bfloat16 dtype mismatch in inference are different issues." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "_is_hf_initialized flag breakage is a different Transformers v5 issue from the dtype mismatch in inference." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and init_empty_weights/_is_hf_initialized argument handling are unrelated." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Backend-install packaging issue for TensorFlow vs model loading failure for PaddleOCR-VL; different problem domains." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is a different code path from the bfloat16 dtype mismatch bug." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "A800 cudaErrorAssert during Qwen3 auto device mapping is unrelated to the Mistral tokenizer backend_tokenizer AttributeError." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 29, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:40444", + "issue:42915", + "issue:43454", + "issue:43482", + "issue:43611", + "issue:43646", + "issue:43792", + "issue:43828", + "issue:43957", + "issue:43994", + "issue:44387", + "issue:44466", + "issue:44617", + "issue:44683", + "issue:44898", + "issue:45325", + "issue:45381" + ], + "soft_pairs": [ + "issue:40444|issue:45381", + "issue:44387|issue:44617", + "issue:43828|issue:44683", + "issue:45325|issue:45381", + "issue:43611|issue:43646", + "issue:43994|issue:44898", + "issue:42915|issue:43482", + "issue:43454|issue:44466", + "issue:43792|issue:43957" + ], + "prepared_review_unit_hash": "2cada510c6043fe0b2fe95e33ccd81afdc95ed6feeec753751cbc55027d99033", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12543, + "estimated_input_tokens": 3136, + "estimated_eval_tokens": 6528 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No clear duplicates here: the soft-similarity pairs are mostly broad subsystem overlaps, not the same concrete bug/change. I would reject all edges and treat this as a heterogeneous set of unrelated issues.", + "confidence": 0.83, + "canonical_issue_reason": "No single canonical issue for the cluster; the items span unrelated regressions and only share broad themes (Qwen2.5-VL position ids, tied weights, loading/init regressions).", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick the closest anchor, 45325 is the most code-path-specific report (Qwen2.5-VL get_rope_index / position_ids), but it still does not subsume the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL, but one is finetuning with multiple images and the other is video-input position ids; related area, not the same bug." + }, + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Different problems: a Transformers 5.x int4 memory regression vs a model-specific Sam3Video OOM." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Different failure modes and code paths: autocast dtype mismatch vs compiled flex_attention on torch>=2.9." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": false, + "reason": "Same model and helper area, but one is still-image temporal scaling and the other is video vision_position_ids; too different to merge as one duplicate." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both involve Transformers 5 loading/init regressions, but one is base_model_prefix handling and the other is custom model initialization; not clearly the same bug." + }, + { + "left": "issue:43994", + "right": "issue:44898", + "accept": false, + "reason": "Different vision-model regressions with different symptoms and models; only broad similarity in classification/image handling." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "Unrelated models and failures: Qwen3Moe FP8 config vs Qwen2.5 GGUF loading on Transformers v5." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "Both mention lm_head/tied weights, but one is a model config failing to tie weights and the other is device-dependent serialization behavior; related family, not clearly the same bug." + }, + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "Different reports: whisper-large-v2 runtime failure vs meta-device loading regression affecting some models on Transformers 5+." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 30, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:43262", + "issue:43317", + "issue:43526", + "issue:43606", + "issue:43701", + "issue:43746", + "issue:43873", + "issue:43881", + "issue:43883", + "issue:44186", + "issue:44265", + "issue:44291", + "issue:44877", + "issue:44991", + "issue:45325" + ], + "soft_pairs": [ + "issue:43526|issue:43883", + "issue:43701|issue:45325", + "issue:43881|issue:44991", + "issue:43317|issue:43873", + "issue:43262|issue:43746", + "issue:44291|issue:44877", + "issue:43606|issue:44265", + "issue:34634|issue:35707", + "issue:44186|issue:44265" + ], + "prepared_review_unit_hash": "098dc4a96059c07005fd222b1dc563eea43affe3dad045442866a546d8778603", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12620, + "estimated_input_tokens": 3155, + "estimated_eval_tokens": 6566 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set does not form a tight duplicate cluster: the issues span unrelated areas like audio processors, tokenizer loading, model offloading, export, and vision-language internals. No PRs are present, and none of the soft pairs look like the same concrete bug.", + "confidence": 0.27, + "canonical_issue_reason": "If forced to choose, issue #43873 is the broadest and most reusable representative: it concerns offloading/quantization, is still open, and has inbound references. That said, the cluster is too heterogeneous for a strong canonical issue.", + "canonical_pr_reason": null, + "best_issue_reason": "#43873 is the best single issue candidate because it is the only open, externally referenced, broadly scoped bug here and could subsume nearby offloading-related reports better than the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "Different components and failure modes: BeitImageProcessorFast label reduction vs Molmo weight-tying attribute error." + }, + { + "left": "issue:43701", + "right": "issue:45325", + "accept": false, + "reason": "Unrelated topics: checkpoint resume key mismatch vs Qwen2.5-VL rope/position-id scaling." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading problems, but one is glm-4v-9b model load failure and the other is tokenizer loading for a different model; no shared bug path." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both mention offload/quantization, but the concrete problems differ: dequantized model loading with device_map=auto vs general offloading behavior." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio sampling-rate defaulting and PEFT checkpoint loading are unrelated." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "Both touch loading in recent transformers, but one is an init_empty_weights argument regression and the other is strict config handling; different code paths." + }, + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "Bark CPU-offload device mismatch and torch.export/torch_compilable_check export failures are distinct issues." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "Voice preset handling in BarkProcessor is unrelated to progressive generation with inputs_embeds/past_key_values." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and torch.export failures come from different subsystems and different failure mechanisms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 31, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:35707", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:43296", + "issue:43377", + "issue:43454", + "issue:43550", + "issue:43606", + "issue:43632", + "issue:43819", + "issue:44164", + "issue:44610", + "issue:44877", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45325" + ], + "soft_pairs": [ + "issue:43632|issue:44164", + "issue:43606|issue:43819", + "issue:35707|issue:41720", + "issue:44610|issue:45325", + "issue:43377|issue:44898", + "issue:43377|issue:44610", + "issue:43454|issue:43550", + "issue:43454|issue:45072", + "issue:43296|issue:44991", + "issue:43296|issue:44877", + "issue:39692|issue:40444" + ], + "prepared_review_unit_hash": "8f12b2e686ce8b4ed53a3c43e37b1522324e623e2b609a8fb44fb4ab80765f97", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13778, + "estimated_input_tokens": 3445, + "estimated_eval_tokens": 7146 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is over-broad: the soft pairs mostly share only a general subsystem or regression theme, not the same concrete bug. I would not merge any of the proposed issue pairs, and there are no PRs in the cluster.", + "confidence": 0.88, + "canonical_issue_reason": "Issue #43632 is the broadest, most regression-like report in the set and has the clearest cross-cutting impact, so it is the best anchor issue if one must be chosen, though the cluster itself does not look like true duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "#43632 is the strongest representative issue because it describes a clear framework-level regression, is well-scoped, and is more central than the other items that are mostly model-specific or documentation/example failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43632", + "right": "issue:44164", + "accept": false, + "reason": "Both are loader/state-related regressions, but one is about `_is_hf_initialized` in Transformers v5 and the other is about `extra_state` handling in save/from_pretrained; different failure modes and fixes." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "Device mismatch with CPU offload and DAC `from_latents`/forward mismatch are unrelated bugs in different model paths." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to Qwen3 auto device mapping CUDA asserts." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "Both mention Qwen2.5-VL, but one is a processor input-size mismatch and the other is a rope/indexing bug; different code paths." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "Missing padding mask support in MIMI encoder is not the same as Perceiver non-default-resolution handling." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "These concern different models and different preprocessing/model-consistency bugs." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "Tying `lm_head` weights and torch.compile SDPA failures are separate model implementation issues." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "Garbage text generation from untied weights is unrelated to dtype mismatches in bfloat16 inference." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "Both are load-time compatibility problems, but they affect different models and different breakpoints; not the same bug." + }, + { + "left": "issue:43296", + "right": "issue:44877", + "accept": false, + "reason": "PaddleOCR-VL loading in vLLM and strict config rejection for `granite_speech` are separate configuration/load failures." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "A broken SigLIP2 documentation example and Qwen2.5-VL finetuning with multi-image iterable data are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 32, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:43322", + "issue:43404", + "issue:43454", + "issue:43526", + "issue:43720", + "issue:43749", + "issue:43792", + "issue:43881", + "issue:44186", + "issue:44423", + "issue:44466", + "issue:44534", + "issue:44683", + "issue:44977", + "issue:45072", + "issue:45357", + "issue:45406" + ], + "soft_pairs": [ + "issue:43792|issue:44683", + "issue:43749|issue:44534", + "issue:44466|issue:45357", + "issue:41720|issue:43322", + "issue:43454|issue:43720", + "issue:41720|issue:44977", + "issue:43526|issue:43881", + "issue:43404|issue:45357", + "issue:44423|issue:45406", + "issue:44186|issue:45072" + ], + "prepared_review_unit_hash": "d4e5fbb0da0ce86544c663320a2d6d66f7a79d47c8bf30a696ffdae0c69a5cf0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13455, + "estimated_input_tokens": 3364, + "estimated_eval_tokens": 6984 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is heterogeneous: most items are distinct bugs across different models/features, and none of the soft pairs look like the same underlying defect closely enough to merge. The strongest single issue report is the FSDP loading regression, but it is not a duplicate of the others.", + "confidence": 0.41, + "canonical_issue_reason": "Issue 43749 has the most discussion and inbound references, and it describes a concrete, high-signal regression with a clear reproduction area (FSDP_CPU_RAM_EFFICIENT_LOADING).", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43749 is the best standalone issue in the set because it is the most developed report and the clearest actionable regression; the rest are mostly unrelated model-specific failures.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Different bugs: Whisper model runtime failure vs. compiled flex_attention breakage on torch>=2.9." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "One is FSDP CPU-efficient loading regression; the other is non-persistent buffers being filled with junk in v5. Different failure modes." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Both mention model serialization, but one is tied lm_head serialization and the other is incorrect visual encoder keys in Qwen3.5; not the same code-path." + }, + { + "left": "issue:41720", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3 auto device map cudaErrorAssert on A800 is unrelated to Llava Next segmentation fault during loading." + }, + { + "left": "issue:43454", + "right": "issue:43720", + "accept": false, + "reason": "AyaVision lm_head tying bug is unrelated to BitNet packed-weight unpacking during accelerate loading." + }, + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "Both involve Qwen3-family models, but one is a device-mapping cuda assert and the other is a flash-attention generation bug." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "BeitImageProcessorFast reduce_labels bug is unrelated to glm-4v-9b loading failure." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "lm_head weight tying in Mistral3 is not the same as Qwen3.5 visual encoder key serialization." + }, + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both crash in serve multimodal paths, but they are different AttributeErrors in different processor/model setups." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes on NER/padding are unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel bfloat16 inference." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 33, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34567", + "issue:40990", + "issue:42617", + "issue:42915", + "issue:43550", + "issue:43577", + "issue:43708", + "issue:43720", + "issue:43873", + "issue:44186", + "issue:44479", + "issue:44610", + "issue:44756", + "issue:44877", + "issue:44898", + "issue:44918", + "issue:44991" + ], + "soft_pairs": [ + "issue:34567|issue:40990", + "issue:43873|issue:44756", + "issue:43577|issue:43708", + "issue:42617|issue:42915", + "issue:44877|issue:44991", + "issue:44479|issue:44918", + "issue:44610|issue:44898", + "issue:43550|issue:43720", + "issue:44186|issue:44610" + ], + "prepared_review_unit_hash": "5a2363bcfd33f5a4d52fd7ce879d73ccda0d37628ba3f0cd4bed3e8eca3e30ba", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12519, + "estimated_input_tokens": 3130, + "estimated_eval_tokens": 6516 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are a heterogeneous mix of unrelated bugs across Trainer state, perplexity, model loading, quantization, tokenization, vision preprocessing, and config handling. The soft-similarity links are topical, but none look like the same underlying defect or change.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a good canonical representative; the candidates span unrelated subsystems and failure modes.", + "canonical_pr_reason": "No pull requests were provided.", + "best_issue_reason": "None of the issues is a strong global representative for the cluster. If forced, the broadest is 43873, but it is still not representative of the rest.", + "best_pr_reason": "No pull requests were provided.", + "soft_edge_verdicts": [ + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "Trainer token accounting and perplexity on gpt-oss/WikiText are unrelated problems." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention offloading/memory behavior, but one is quantization-specific and the other is a Strix Halo mmap OOM issue; not the same bug." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "One is a Blip2 dtype loading bug; the other is checkpoint resume/max_steps logic. Different code paths and symptoms." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "3d_parallel.py runtime failure and Qwen3Moe FineGrainedFP8Config failure are distinct model/config issues." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config loading for granite_speech and tokenizer loading for EMBEDDIA/est-roberta are unrelated loader regressions." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Video-input regression for several Qwen VL variants and Qwen3.5 embedding unpacking with TRL are different failures." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "OmDet-Turbo processor output size mismatch and Perceiver non-default-resolution failure are separate preprocessing/model issues." + }, + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "torch.compile SDPA failure in Bamba-9B-v2 and packed-weights unpacking during accelerate loading are different bugs." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "LayoutLMv2Tokenizer crashing on NER/batched padding and OmDet-Turbo image-size mismatch are not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 34, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36331", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:42994", + "issue:43316", + "issue:43317", + "issue:43482", + "issue:43531", + "issue:43550", + "issue:43572", + "issue:43606", + "issue:44292", + "issue:44479", + "issue:44671", + "issue:44743", + "issue:44913", + "issue:45161" + ], + "soft_pairs": [ + "issue:42994|issue:43482", + "issue:36331|issue:42915", + "issue:36331|issue:42617", + "issue:44671|issue:44743", + "issue:42915|issue:43317", + "issue:43316|issue:44913", + "issue:43550|issue:43606", + "issue:43531|issue:43572", + "issue:44292|issue:44479", + "issue:41720|issue:42617", + "issue:41720|issue:45161" + ], + "prepared_review_unit_hash": "a0bb96444379894fe38136daa1e693669b9ef5ffbef601d7563b0711929e2c63", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13431, + "estimated_input_tokens": 3358, + "estimated_eval_tokens": 6972 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the issues span unrelated trainer API errors, quantization/loading failures, model-specific regressions, config serialization, and device-mapping/offload problems. None of the soft pairs look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:44479 is the clearest standalone regression report, but the set is too mixed to treat any issue as a true canonical representative of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Saving a quantized model and loading a Qwen2.5-GGUF model under v5 are different failure points and likely different code paths." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "CustomTrainer compute_loss kwarg mismatch is unrelated to Qwen3Moe failing with FineGrainedFP8Config." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Trainer compute_loss API break and failure to run 3d_parallel.py are different bugs with different surfaces." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression and Qwen3.5 recurrent-state reset are unrelated model-specific issues." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and dequantized model load/offload failure concern different mechanisms and symptoms." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Gemma3TextConfig API discrepancy does not match GPTNeoX rotary_pct reverting on reload." + }, + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "torch.compile + SDPA failure in Bamba is a different bug from CPU offload device mismatch in bark-small." + }, + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig pad_token_idx persistence are unrelated regressions." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "Qwen-3-8B-NVFP4 runtime failure and a video-input regression across Qwen VL models are different failure modes." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Auto device mapping CUDA assert on Qwen3 is not the same underlying issue as 3d_parallel.py failing to run." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Qwen3 auto device mapping failure and GPT-OSS MoE tensor-parallel failure are separate model/runtime problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 35, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:36010", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43322", + "issue:43388", + "issue:43540", + "issue:43782", + "issue:43873", + "issue:44186", + "issue:44361", + "issue:44514", + "issue:44545", + "issue:45059", + "issue:45406", + "issue:45464" + ], + "soft_pairs": [ + "issue:43540|issue:43782", + "issue:43388|issue:45059", + "issue:44186|issue:44361", + "issue:41720|issue:43873", + "issue:42915|issue:43322", + "issue:45406|issue:45464", + "issue:36010|issue:42617", + "issue:44514|issue:44545", + "issue:30064|issue:34634" + ], + "prepared_review_unit_hash": "7b0bef9c6536ba9e9be3aee069722fcad9aed7f6234855d3fa60ec0fb024f5ff", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 12988, + "estimated_input_tokens": 3247, + "estimated_eval_tokens": 6750 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated issue reports. The only clear duplicate pair is the two Qwen2_5_VLProcessor batched-input padding=False crashes; the rest are similar only at a subsystem level, not the same bug.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 44545 is the cleanest canonical representative for the duplicated Qwen2_5_VLProcessor.apply_chat_template batched-input crash: it has the concise title, matches the same failure mode as 44514, and has slightly more reference activity.", + "canonical_pr_reason": null, + "best_issue_reason": "44545 is the best issue to keep as the representative duplicate target among this set because it states the bug clearly and appears to be the more central report for the exact same crash.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Both are Qwen multimodal issues, but one is a video-input ValueError in Qwen3OmniMoe and the other is a weight_only load error in Qwen3VL; different code paths and failure modes." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "gather_for_metrics label truncation in evaluation is unrelated to SAM3 PCS odd behavior with text and bounding boxes; same broad area only." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Both are tokenizer-related, but they affect different models and different bugs: LayoutLMv2 NER/batched padding crashes vs MLukeTokenizer AttributeError on tasks." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Qwen3 auto device mapping cudaErrorAssert and offloading-with-quantization not working are both memory/device-management topics, but not the same concrete bug." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure and Llava Next segfault are unrelated model-specific loading/runtime issues." + }, + { + "left": "issue:45406", + "right": "issue:45464", + "accept": false, + "reason": "Gemma4Processor missing _tokenizer in serve and Qwen3.5 streaming inference failure are distinct processor/runtime problems." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "An ImportError for GenerationMixin and inability to run 3d_parallel.py do not describe the same bug or code path." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "These are the same reported crash on Qwen2_5_VLProcessor.apply_chat_template with batched input and padding=False; only wording differs." + }, + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "Image Processor void segmentation maps and BarkProcessor voice_preset are completely different processor bugs with no shared failure path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 36, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33357", + "issue:35707", + "issue:41720", + "issue:43421", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43746", + "issue:43927", + "issue:44368", + "issue:44492", + "issue:44509", + "issue:44625", + "issue:44918", + "issue:44936", + "issue:45357", + "issue:45479" + ], + "soft_pairs": [ + "issue:33357|issue:35707", + "issue:43606|issue:43720", + "issue:41720|issue:43746", + "issue:43550|issue:43653", + "issue:44492|issue:44509", + "issue:44625|issue:45479", + "issue:44368|issue:45357", + "issue:43421|issue:43927", + "issue:43606|issue:43653", + "issue:44918|issue:44936" + ], + "prepared_review_unit_hash": "729f36681e778deb95363e7e60e1bcaeba67941a09c0b5286aa65dda3da19213", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13640, + "estimated_input_tokens": 3410, + "estimated_eval_tokens": 7076 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a single duplicate cluster; they cover unrelated bugs and docs issues across different models, tokenizers, and trainer paths. No canonical issue or PR stands out for the whole set.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "Different failure modes and code paths: MacOS bus error with CLIP vs generation behavior with inputs_embeds/past_key_values." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "Both are runtime-loading problems, but one is CPU offload device mismatch and the other is packed-weight unpacking during accelerate loading; not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Unrelated subsystems and symptoms: auto device mapping cuda assert vs PEFT adapter loading from local checkpoints." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "One is torch.compile + SDPA failure in a model, the other is tokenizer special-token registration; no shared underlying defect." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "A typo in cache strategy docs/code is unrelated to documentation still mentioning removed pipeline tasks." + }, + { + "left": "issue:44625", + "right": "issue:45479", + "accept": false, + "reason": "Both mention num_labels, but one is config propagation for Qwen3.5 and the other is a broader zero-loss classification bug across models." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "Different model/config issues: tie_word_embeddings warning during LoRA fine-tuning vs incorrect visual encoder keys saved by save_pretrained." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "TokenizerBackend post-processor refresh on runtime special-token edits is distinct from DiaConfig losing custom token IDs after save/load." + }, + { + "left": "issue:43606", + "right": "issue:43653", + "accept": false, + "reason": "Device mismatch under CPU offload is unrelated to BigBirdTokenizer mask token special-token registration and decode output." + }, + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "Both involve trainer usage, but one is unpacking Qwen3.5 input embeddings with TRL SFT and the other is evaluate() failing after train(); different bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 37, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34567", + "issue:34634", + "issue:35707", + "issue:41720", + "issue:43381", + "issue:43421", + "issue:43531", + "issue:43653", + "issue:43782", + "issue:43844", + "issue:44062", + "issue:44451", + "issue:44589", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45381" + ], + "soft_pairs": [ + "issue:43782|issue:45381", + "issue:44451|issue:44991", + "issue:34634|issue:41720", + "issue:43381|issue:43844", + "issue:44898|issue:45072", + "issue:44062|issue:44451", + "issue:34567|issue:35707", + "issue:43421|issue:43653", + "issue:43531|issue:44589" + ], + "prepared_review_unit_hash": "5919d0a49ba3d03da0ce15a666fd7497addc4a014eb5787de71b19cd7af83edb", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12514, + "estimated_input_tokens": 3129, + "estimated_eval_tokens": 6514 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it spans tokenizer special-token handling, model loading, generation, training, device mapping, quantization/storage, and architecture-specific bugs. None of the soft pairs look like the same concrete bug/change, so all soft edges should be rejected and there is no clear canonical issue/PR for the whole set.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43782", + "right": "issue:45381", + "accept": false, + "reason": "Both involve Qwen3-family vision/language models, but one is a from_pretrained weight-loading failure and the other is a video vision_position_ids bug. Different code paths and likely different fixes." + }, + { + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-loading regressions for specific checkpoints, but they affect different models and report different failure modes. Too broad to be the same underlying bug." + }, + { + "left": "issue:34634", + "right": "issue:41720", + "accept": false, + "reason": "BarkProcessor voice_preset handling and Qwen3 auto device mapping cudaErrorAssert are unrelated subsystems and failures." + }, + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Both touch training/gradient behavior, but one is about checkpointing in eval mode and the other about gradients exploding with DeepSpeed ZeRO-3. Not the same bug." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Different model families and different failure classes: Perceiver positional interpolation vs dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "Special-token/AddedToken construction issues are tokenizer-adjacent, but these are different concrete problems: duplicate special argument vs inability to load a specific tokenizer." + }, + { + "left": "issue:34567", + "right": "issue:35707", + "accept": false, + "reason": "TrainerState token counting and generation with inputs_embeds/past_key_values are separate code paths with different symptoms and fixes." + }, + { + "left": "issue:43421", + "right": "issue:43653", + "accept": false, + "reason": "Both concern special tokens, but one is a TokenizersBackend post-processor update issue and the other is BigBirdTokenizer mask-token registration/empty decode. Related area, not the same bug." + }, + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and Float8 storage lookup errors are unrelated; one is model config logic, the other is a storage/type support problem." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 38, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:42886", + "issue:43421", + "issue:43531", + "issue:43596", + "issue:43645", + "issue:43994", + "issue:44077", + "issue:44186", + "issue:44220", + "issue:44610", + "issue:44625", + "issue:44671", + "issue:44843", + "issue:44987", + "issue:45042", + "issue:45081", + "issue:45356" + ], + "soft_pairs": [ + "issue:42886|issue:44843", + "issue:44987|issue:45042", + "issue:44077|issue:44625", + "issue:43994|issue:44671", + "issue:43421|issue:44186", + "issue:39692|issue:44610", + "issue:43596|issue:44220", + "issue:43531|issue:43645", + "issue:45081|issue:45356" + ], + "prepared_review_unit_hash": "4f199d7f5faedf08f080f29a1d62dc42533d4efbcdb7a81f4a95580676fea0e8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13285, + "estimated_input_tokens": 3322, + "estimated_eval_tokens": 6900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous, with no clear duplicate group across the issues. The only near-match is the Mistral/fix_mistral_regex pair, but the reported symptoms still differ enough to keep them separate.", + "confidence": 0.89, + "canonical_issue_reason": "No single issue stands out as the canonical duplicate target; the set spans unrelated tokenizer, model, processor, and runtime problems.", + "canonical_pr_reason": "No PRs are present in this cluster.", + "best_issue_reason": "No best issue: the cluster is too diverse to use one issue as the representative canonical item.", + "best_pr_reason": "No PRs are present in this cluster.", + "soft_edge_verdicts": [ + { + "left": "issue:42886", + "right": "issue:44843", + "accept": false, + "reason": "Both involve offline tokenizer loading, but one is general cache/offline failure and the other is a specific _patch_mistral_regex model_info call path; not the same concrete bug." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "These are unrelated failures: loading a specific model breaks in one case, while PIL image processors incorrectly requiring torchvision is a separate image backend issue." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "Different components and symptoms: patchtsmixer post_init validation vs Qwen3.5 num_labels propagation." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Both are model output quality problems, but they affect different models and code paths (SigLIP2 AutoModel/pipeline vs CamemBERT masked LM)." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Both are tokenizer-related, but one is about runtime special-token/post-processor updates and the other is LayoutLMv2 crashes with NER padding/truncation; different bugs." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "Both mention processor/model mismatch, but they concern different models and different failures (SigLIP2 doc example vs OmDet-Turbo image size expectations)." + }, + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "A deepspeed/zero3 BertModel initialization error is unrelated to a feature extraction failure in _torch_extract_fbank_features()." + }, + { + "left": "issue:43531", + "right": "issue:43645", + "accept": false, + "reason": "These are separate runtime/modeling issues: Qwen3-MoE sliding_window behavior vs custom model initialization in Jupyter notebooks." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Closest pair, but still not clearly the same bug: 45081 is an AttributeError in fix_mistral_regex loading Mistral tokenizers, while 45356 is a Kimi-K2.5 regression about codec handling and a misleading warning." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 39, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:40444", + "issue:43540", + "issue:43618", + "issue:43697", + "issue:43716", + "issue:43720", + "issue:43746", + "issue:43819", + "issue:43873", + "issue:44008", + "issue:44220", + "issue:44661", + "issue:44843", + "issue:45081", + "issue:45183", + "issue:45381", + "issue:45479" + ], + "soft_pairs": [ + "issue:45183|issue:45479", + "issue:43540|issue:44008", + "issue:43746|issue:45381", + "issue:44220|issue:44661", + "issue:39692|issue:43873", + "issue:43746|issue:43873", + "issue:40444|issue:43746", + "issue:43716|issue:44008", + "issue:43618|issue:43697", + "issue:44843|issue:45081", + "issue:43720|issue:43819" + ], + "prepared_review_unit_hash": "66755ac57e607b85b6f5b2ec9049c979adf494e688c1dced04d1d6a8c6a07120", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 14014, + "estimated_input_tokens": 3504, + "estimated_eval_tokens": 7264 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it groups distinct bugs across multimodal inference, tokenizer patching, loading/offloading, and model-specific forward-path issues. I do not see any true duplicates among the soft-edge pairs.", + "confidence": 0.91, + "canonical_issue_reason": "No single canonical issue fits the cluster: the reports target different models and different code paths, so they should not be merged into one issue.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43746 is the most self-contained and actionable report, but it is still not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45183", + "right": "issue:45479", + "accept": false, + "reason": "Different bugs: XOR input validation/error messaging vs degenerate zero loss in single-label classification." + }, + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Different models and failures: Qwen3OmniMoe video input processing vs Gemma3n audio-mask name collision." + }, + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "Unrelated code paths: PEFT local checkpoint loading vs Qwen2.5-VL video position_ids bug." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Audio feature extraction bug vs tokenizer-mapping add-new-model-like failure; no shared underlying defect." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "Both mention quantization, but one is a doc example/model-processor mismatch and the other is offloading behavior with quantization." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "Different failure modes: local checkpoint loading with adapters vs quantization/offloading mismatch." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "Multimodal finetuning with iterable datasets vs checkpoint loading; these are distinct bugs." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Different model-specific issues: image preprocessor dtype mismatch vs variable collision in Gemma3n forward." + }, + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "Different problems in different models: missing CLIP attentions assignment vs RTDetrV2 output divergence." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Same Mistral regex area, but one is offline-mode model_info access and the other is a backend_tokenizer attribute crash; separate bugs." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "Different underlying defects: packed-weight loading in BitNet vs DAC.from_latents mismatch with missing STE." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 40, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:43335", + "issue:43421", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43653", + "issue:43720", + "issue:43756", + "issue:43818", + "issue:44062", + "issue:44186", + "issue:44488", + "issue:44811", + "issue:44898", + "issue:45072", + "issue:45381" + ], + "soft_pairs": [ + "issue:43335|issue:43756", + "issue:43653|issue:43720", + "issue:44062|issue:44488", + "issue:30064|issue:36010", + "issue:43421|issue:44811", + "issue:44186|issue:44898", + "issue:43540|issue:45381", + "issue:43550|issue:45072", + "issue:43577|issue:43818" + ], + "prepared_review_unit_hash": "1e82bad5f347011117dbd3ccf7aa2f4d29ca320a95c4ae260eddc97cce8720ea", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13329, + "estimated_input_tokens": 3333, + "estimated_eval_tokens": 6922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated bug reports across different models and subsystems. The soft pairs share broad areas like tokenizers or video inputs, but not the same concrete root cause, so none should be merged as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "No single issue clearly subsumes the rest; the cluster is heterogeneous rather than one duplicate bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43653 is the clearest stand-alone bug report with direct user/CI impact and a narrow, actionable tokenizer failure, so it is the best representative issue in this mixed set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "Different model/config bugs: sparse-layer construction vs RoPE layer dropping; same general architecture space, not the same defect." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "Unrelated failure modes: BigBird special-token registration vs BitNet accelerate loading/unpacking." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "Both are tokenizer/load errors, but the titles point to different concrete bugs and models; not enough to treat as the same root cause." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Image processor segmentation-map handling is unrelated to the GenerationMixin import error." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Both involve special tokens, but one is runtime post-processor updates and the other is Whisper batch_decode skip_special_tokens behavior; different code paths." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and Perceiver interpolation failures are separate model/input bugs." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "Both concern video inputs, but they affect different Qwen model families and describe different positional/input bugs." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "torch.compile + SDPA failure in Bamba is unrelated to dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "BLIP2 dtype loading issue and Video-LLaVA temporal-attention/weight-sharing bug are distinct model-loading vs architecture problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 41, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43531", + "issue:43550", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43644", + "issue:43653", + "issue:43749", + "issue:43782", + "issue:43819", + "issue:43927", + "issue:44448", + "issue:44561", + "issue:44671", + "issue:44877", + "issue:44977", + "issue:45020" + ], + "soft_pairs": [ + "issue:43550|issue:43927", + "issue:43643|issue:44877", + "issue:43782|issue:44977", + "issue:43577|issue:43582", + "issue:44561|issue:45020", + "issue:44448|issue:44671", + "issue:43531|issue:43643", + "issue:43644|issue:43749", + "issue:43653|issue:43819" + ], + "prepared_review_unit_hash": "947c20dd0d9426dc8650995e1dd2dc8c1d239d72f977053ad0656fffc60f3474", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12469, + "estimated_input_tokens": 3118, + "estimated_eval_tokens": 6492 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is loosely related by broad regression themes (remote code/config loading, dtype/loading, model-specific generation), but the paired issues do not appear to describe the same concrete bug. No soft edge is a safe duplicate merge.", + "confidence": 0.29, + "canonical_issue_reason": "issue:45020 is the broadest umbrella and the only open issue; it best matches the cluster\u2019s general theme of recent Transformers versions breaking remote_code models.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the most suitable representative because it is the most general report and can absorb the narrower remote_code-related regressions better than the model-specific issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "Different models and different failure modes: torch.compile+SDPA on Bamba vs save/load token-ID loss on DiaConfig." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Both involve config/remote_code loading, but one is missing fields from AutoConfig and the other is strict config blocking granite_speech; not the same bug." + }, + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and symptoms: Qwen3VL weight_only load error vs Qwen3.5 flash-attention generation issue." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "Unrelated regressions: BLIP2 dtype propagation vs Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "45020 is an umbrella remote_code regression report, but 44561 is a specific missing-function breakage; too broad to treat as the same underlying bug." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Both are output-quality regressions in v5, but they concern different models and likely different code paths." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to AutoConfig remote_code field loss." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Different subsystems and symptoms: non-persistent buffer initialization junk vs FSDP CPU RAM efficient loading failure." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Tokenizer special-token registration bug and DAC STE mismatch are separate model/component issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 42, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30064", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43550", + "issue:43650", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43906", + "issue:43994", + "issue:44186", + "issue:44442", + "issue:44805", + "issue:44936", + "issue:44991", + "issue:45081", + "issue:45406" + ], + "soft_pairs": [ + "issue:43825|issue:43994", + "issue:44805|issue:44936", + "issue:43550|issue:43819", + "issue:44186|issue:44442", + "issue:41720|issue:44991", + "issue:43825|issue:43827", + "issue:42175|issue:45081", + "issue:43650|issue:43906", + "issue:30064|issue:36331", + "issue:45081|issue:45406" + ], + "prepared_review_unit_hash": "f4f5610d3cc2060055e7349bc63f617fde5c8676696e951e70f5e0afc6ba7dc7", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13489, + "estimated_input_tokens": 3373, + "estimated_eval_tokens": 7002 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is noisy: the items span unrelated bugs in tokenizers, trainers, vision, serving, docs, and model-specific runtime failures. I don\u2019t see any true duplicate pairs to merge; the most actionable standalone report is the Mistral tokenizer crash issue.", + "confidence": 0.34, + "canonical_issue_reason": "Issue 45081 is the most concrete and actionable report in the set: it has a clear traceback, a narrow failure mode, and direct references to the affected tokenizer-loading path.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45081 is the best representative issue because it describes a specific, reproducible code-path failure rather than a broad symptom or a documentation-only mismatch.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "Both mention pipeline usage, but one is an error-message regression and the other is a model output correctness issue; different bugs and code paths." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Both are generic trainer failures, but the reported symptoms and likely root causes differ: mask shape mismatch vs post-train evaluate breakage." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "Different model-specific failures: torch.compile with SDPA on Bamba vs DAC latent reconstruction mismatch missing STE." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "Both are tokenizer-related, but they affect different tokenizers and failure modes: NER/batched padding crash vs AutoTokenizer loading failure." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated issues: Qwen3 auto device mapping CUDA assert vs tokenizer loading regression for a different model family." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "Related to v5 pipeline removals, but one is runtime behavior and the other is stale documentation; not the same underlying bug." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Package/backend installation issue versus a Mistral tokenizer attribute error; different subsystems and failure causes." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "No evidence of the same bug: one is an empty/nonsensical issue title, the other is an isolated reproduction of a separate referenced issue." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Vision image-processing segmentation-map failure and trainer compute_loss signature mismatch are unrelated." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Both are AttributeError crashes in tokenizer/processor loading, but they hit different objects and code paths (Mistral regex patch vs Gemma4Processor serve path)." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 43, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34634", + "issue:36010", + "issue:43262", + "issue:43582", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43782", + "issue:43819", + "issue:44060", + "issue:44368", + "issue:44485", + "issue:44509", + "issue:44610", + "issue:44671", + "issue:44857", + "issue:45072", + "issue:45325" + ], + "soft_pairs": [ + "issue:43606|issue:45072", + "issue:43782|issue:45325", + "issue:44610|issue:45072", + "issue:44485|issue:44509", + "issue:34634|issue:36010", + "issue:43720|issue:45072", + "issue:43653|issue:44671", + "issue:43582|issue:44857", + "issue:44060|issue:44368", + "issue:43262|issue:43819" + ], + "prepared_review_unit_hash": "48ed4151d2cecb7fd43373e4d07441a733da4f3bc2a4718a9e5037e047e39123", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13614, + "estimated_input_tokens": 3404, + "estimated_eval_tokens": 7064 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair appears to describe the same underlying bug or change; all candidate duplicates are distinct model- or subsystem-specific issues, so no merge-worthy canonical item stands out.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "Different bugs in different areas: BarkProcessor CPU offload/device mismatch vs general dtype mismatch in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen-VL related, but one is a from_pretrained weight_only loading error and the other is a rope/index scaling bug in get_rope_index; not the same code path." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "OmDet processor input-size mismatch is unrelated to bfloat16 inference dtype mismatches." + }, + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "One concerns GLM-5 RoPE implementation; the other is a docs cleanup for removed pipeline tasks." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice_preset and a transformers GenerationMixin import error are unrelated failures." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "BitNet packed-weight loading during accelerate is a different bug than dtype mismatches in inference." + }, + { + "left": "issue:43653", + "right": "issue:44671", + "accept": false, + "reason": "Tokenizer special-token registration vs CamemBERT masked-LM prediction correctness are separate issues." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "Apple Silicon warmup TypeError and CUDA float16 loss crash are different platform-specific failures." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Both mention tied-weights warnings, but they target different Qwen variants and different warning behavior; not the same concrete bug." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio processor chat-template sampling-rate defaulting and DAC latent reconstruction mismatch are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 44, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34634", + "issue:36331", + "issue:43452", + "issue:43454", + "issue:43526", + "issue:43577", + "issue:43638", + "issue:43653", + "issue:43697", + "issue:43749", + "issue:44291", + "issue:44554", + "issue:44568", + "issue:44912", + "issue:44936", + "issue:44991", + "issue:45488" + ], + "soft_pairs": [ + "issue:43749|issue:44291", + "issue:43526|issue:43697", + "issue:43526|issue:44554", + "issue:43638|issue:44936", + "issue:43653|issue:44912", + "issue:44568|issue:45488", + "issue:43454|issue:43577", + "issue:43452|issue:44991", + "issue:34634|issue:36331" + ], + "prepared_review_unit_hash": "8dee1c188b77feea4e287257927951851dbf052cce79880b29900d462d0688f8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12795, + "estimated_input_tokens": 3199, + "estimated_eval_tokens": 6654 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is a grab bag of unrelated bugs across tokenizers, trainers, model loading, quantization, and vision models. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR pair, so all soft edges should be rejected.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading-related, but one is about FSDP_CPU_RAM_EFFICIENT_LOADING and the other about init_empty_weights/_is_hf_initialized; different code paths and failure modes." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and RTDetrV2 output differences are separate vision bugs in different components with no shared concrete defect." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "A preprocessing label bug and an MPS attention correctness issue are unrelated subsystems and cannot be the same bug." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "DeepSpeed ZeRO-3 index error during training with a non-pretrained Bert model is unrelated to trainer.evaluate() failing after trainer.train()." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "BigBirdTokenizer special-token registration/empty decode and MXFP4 quantization fallback to bf16 are different defects in different layers." + }, + { + "left": "issue:44568", + "right": "issue:45488", + "accept": false, + "reason": "Both concern tokenizers, but one is mdeberta-v3 add_special_tokens behavior and the other is LlamaTokenizer overriding tokenizer.json pre-tokenizer; not the same concrete bug." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "AyaVision weight-tying failure and BLIP2 dtype propagation are distinct model-loading/model-setup issues." + }, + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "gguf_file breakage for AutoTokenizer/AutoModel loading and EMBEDDIA/est-roberta tokenizer loading in v5 are separate compatibility problems, not one duplicate fix." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "BarkProcessor voice_preset failure and CustomTrainer.compute_loss unexpected kwarg are entirely unrelated bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 45, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:38175", + "issue:39692", + "issue:43257", + "issue:43329", + "issue:43645", + "issue:43696", + "issue:43746", + "issue:43854", + "issue:44568", + "issue:44589", + "issue:44610", + "issue:44811", + "issue:44912", + "issue:44977", + "issue:45042", + "issue:45356" + ], + "soft_pairs": [ + "issue:43746|issue:44811", + "issue:44589|issue:45042", + "issue:43257|issue:43329", + "issue:43854|issue:44977", + "issue:38175|issue:44610", + "issue:36010|issue:43645", + "issue:43257|issue:43696", + "issue:44568|issue:45356", + "issue:39692|issue:44912" + ], + "prepared_review_unit_hash": "5faa610dc76784bb629b324746edd0f4395d72bee462453215ffe8c29a361fa0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12712, + "estimated_input_tokens": 3178, + "estimated_eval_tokens": 6612 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous and does not look like a true duplicate set. The soft-edge pairs are all distinct bugs or feature regressions, so they should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue fits this cluster: it mixes unrelated reports across imports, tokenizers, model loading, quantization, processors, and documentation examples.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue exists here. If forced, issue 45042 is the broadest/highest-impact processor bug, but it still does not represent the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43746", + "right": "issue:44811", + "accept": false, + "reason": "Different code paths and symptoms: GraniteSpeech local checkpoint loading with PEFT adapters vs Whisper processor.batch_decode skip_special_tokens handling." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "Unrelated failures: Float8 storage lookup error vs PIL backend image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43257", + "right": "issue:43329", + "accept": false, + "reason": "Different underlying bugs: Qwen3 MOE weight conversion under accelerate/deepspeed vs undefined variables in multimodal video token counting." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "Both involve model loading/inference, but they concern different models and different failure modes: unit-test loading vs flash-attention generation breakage." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "Both are image-model issues, but one is zero probabilities on SigLIP2 and the other is a processor/model input-size mismatch for OmDet-Turbo." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "Distinct regressions: missing GenerationMixin import vs custom-model initialization in Jupyter notebooks on Transformers 5.0." + }, + { + "left": "issue:43257", + "right": "issue:43696", + "accept": false, + "reason": "Accelerate/deepspeed weight conversion bug is unrelated to an OOM when running GPT-oss-20b." + }, + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-related, but they affect different models and different issues: missing BOS/EOS addition vs codec handling and warning behavior." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "Different models and different problems: SigLIP2 docs/model-processor mismatch and quantization failure vs git-oss-20b MXFP4 quantization fallback to bf16." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 46, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36010", + "issue:36331", + "issue:42175", + "issue:43262", + "issue:43329", + "issue:43531", + "issue:43550", + "issue:43720", + "issue:43749", + "issue:43819", + "issue:43927", + "issue:44479", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44912", + "issue:44938", + "issue:45042" + ], + "soft_pairs": [ + "issue:43819|issue:43927", + "issue:42175|issue:44938", + "issue:43720|issue:44610", + "issue:43550|issue:44912", + "issue:43262|issue:44811", + "issue:43749|issue:44756", + "issue:43531|issue:45042", + "issue:43329|issue:44479", + "issue:36010|issue:36331" + ], + "prepared_review_unit_hash": "b0bad7f846b1ff7788b65051c7f8e198c2b3d181680883052dcc8275b32beae3", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13199, + "estimated_input_tokens": 3300, + "estimated_eval_tokens": 6856 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These look like a bundle of unrelated Hugging Face/transformers issues rather than duplicates. The soft pairs share broad themes at most (audio, video, loading, quantization), but each reports a different concrete symptom, code path, or model-specific failure.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43819", + "right": "issue:43927", + "accept": false, + "reason": "Different bugs: DAC.from_latents forward mismatch vs DiaConfig custom token IDs being dropped on save/load and causing generation IndexError." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "One is missing TensorFlow in a torch extra install; the other is a Python 3.14 load/import failure. Different environments and failure modes." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "BitNet packed-weight loading under accelerate is unrelated to OmDet-Turbo processor output size mismatch (640px vs 224px)." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "Both are model/runtime inference problems, but they affect different models and different mechanisms: torch.compile+SDPA vs MXFP4 quantization fallback." + }, + { + "left": "issue:43262", + "right": "issue:44811", + "accept": false, + "reason": "Both involve audio processors, but the concrete bugs differ: chat template sampling-rate default vs batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "FSDP CPU RAM-efficient loading is a training/loading pipeline bug; disabling mmap on Strix Halo is a hardware-specific OOM workaround. Not the same issue." + }, + { + "left": "issue:43531", + "right": "issue:45042", + "accept": false, + "reason": "Completely different subsystems and symptoms: Qwen3-MoE sliding window behavior vs PIL image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43329", + "right": "issue:44479", + "accept": false, + "reason": "Both mention video/VLM paths, but one is an undefined-function bug in multimodal token counting, while the other is a broader video-input regression for several models." + }, + { + "left": "issue:36010", + "right": "issue:36331", + "accept": false, + "reason": "Unrelated API regressions: GenerationMixin import path vs CustomTrainer.compute_loss signature change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 47, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:43257", + "issue:43329", + "issue:43450", + "issue:43452", + "issue:43540", + "issue:43577", + "issue:43582", + "issue:43697", + "issue:43756", + "issue:43931", + "issue:44291", + "issue:44479", + "issue:45042", + "issue:45072", + "issue:45357", + "issue:45381" + ], + "soft_pairs": [ + "issue:43257|issue:43756", + "issue:43257|issue:43931", + "issue:43577|issue:45072", + "issue:41720|issue:43540", + "issue:43452|issue:43582", + "issue:43329|issue:43450", + "issue:43697|issue:45357", + "issue:44291|issue:45042", + "issue:44479|issue:45381" + ], + "prepared_review_unit_hash": "af5bd09ab418c408e4fc09d4b9d62de9e2b054e6876f0300e1139addc464a816", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12769, + "estimated_input_tokens": 3193, + "estimated_eval_tokens": 6642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous set of unrelated model-loading and multimodal bugs. The only strong duplicate-like relationship is the Qwen video regression pair: issue 44479 and issue 45381 both describe the same video-input / vision_position_ids regression in the Qwen VL family.", + "confidence": 0.89, + "canonical_issue_reason": "issue 44479 is the broadest and most representative issue in the only apparent duplicate family, covering the Qwen VL video-input regression across multiple affected models.", + "canonical_pr_reason": null, + "best_issue_reason": "issue 44479 is the best overall issue candidate because it describes the widest impact, the clearest regression, and subsumes the more specific Qwen2.5-VL video bug report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43257", + "right": "issue:43756", + "accept": false, + "reason": "Different failures and code paths: Qwen3 MOE weight conversion under accelerate/deepspeed vs Smollm3 RoPE-layer dropping." + }, + { + "left": "issue:43257", + "right": "issue:43931", + "accept": false, + "reason": "Both involve Qwen3 model loading, but one is MOE weight conversion and the other is visual weight-shape mismatch; not the same bug." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype mismatches, but they affect different architectures and likely different conversion/inference paths." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert is unrelated to the Qwen3OmniMoe video-input ValueError." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "Completely different subsystems: GGUF tokenizer/model loading vs Apple Silicon caching allocator warmup." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Both are video-related, but one is a missing helper/merge-size bug in token counting and the other is a batched-shape bug in processors." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "RTDetrV2 output drift in v5 is unrelated to Qwen3.5 save_pretrained visual encoder key serialization." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "init_empty_weights/_is_hf_initialized TypeError is unrelated to PIL backend processors requiring torchvision." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Both describe the same Qwen VL video-input regression, centered on incorrect vision_position_ids for video processing; 45381 looks like a specific instance of the broader 44479 bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 48, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33357", + "issue:34567", + "issue:42175", + "issue:42915", + "issue:43526", + "issue:43540", + "issue:43742", + "issue:43824", + "issue:43927", + "issue:44062", + "issue:44479", + "issue:44488", + "issue:44560", + "issue:44821", + "issue:44912", + "issue:44991", + "issue:45479" + ], + "soft_pairs": [ + "issue:42175|issue:43824", + "issue:44062|issue:44991", + "issue:42915|issue:44821", + "issue:43540|issue:44560", + "issue:42175|issue:44488", + "issue:42915|issue:43742", + "issue:43927|issue:44912", + "issue:43540|issue:44479", + "issue:43526|issue:45479", + "issue:33357|issue:34567" + ], + "prepared_review_unit_hash": "bfc89bc5a278abd3ef614f331fbd46ae2e254388acddf4b7456faa65453b90f0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12829, + "estimated_input_tokens": 3208, + "estimated_eval_tokens": 6672 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No convincing duplicate cluster here: the issues span unrelated subsystems (tokenizers, multimodal/video, quantization, training state, model loading). All soft pairs should be rejected.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the most central user-facing report in this set by discussion and inbound references, but it is not a duplicate anchor for the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Different failures: missing TensorFlow backend on pip install vs an import error for a specific Qwen2.5-VL class." + }, + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "Different root causes: AddedToken special-arg conflict vs tokenizer loading failure for a specific model." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "Unrelated areas: FP8 quantization failure for Qwen3Moe vs loading AutoImageProcessor from a URL." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Both are video-related, but they concern different Qwen model families and different concrete errors; not enough to treat as the same bug." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "Different issue classes: backend dependency installation vs loading a particular model checkpoint." + }, + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Different code paths: quantization config handling vs key error while loading MobileLLM-125M." + }, + { + "left": "issue:43927", + "right": "issue:44912", + "accept": false, + "reason": "Config save/load custom-token-ID corruption is unrelated to MXFP4 quantization fallback behavior." + }, + { + "left": "issue:43540", + "right": "issue:44479", + "accept": false, + "reason": "Both mention video input, but one is a Qwen3OmniMoe processing error while the other is a broader regression across several Qwen VL variants; not the same concrete bug." + }, + { + "left": "issue:43526", + "right": "issue:45479", + "accept": false, + "reason": "Different semantics: BeitImageProcessorFast label reduction bug vs single-label classification with num_labels=1 causing zero loss." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "Completely unrelated: MacOS bus error with CLIP model vs TrainerState token counter not updating." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 49, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:34689", + "issue:36010", + "issue:38175", + "issue:42175", + "issue:43582", + "issue:43646", + "issue:43742", + "issue:43749", + "issue:43782", + "issue:43906", + "issue:44291", + "issue:44484", + "issue:44704", + "issue:44898", + "issue:45137", + "issue:45381", + "issue:45405" + ], + "soft_pairs": [ + "issue:43906|issue:45381", + "issue:45137|issue:45405", + "issue:44484|issue:44704", + "issue:36010|issue:42175", + "issue:38175|issue:44898", + "issue:43582|issue:43782", + "issue:43742|issue:44291", + "issue:43646|issue:43749", + "issue:28282|issue:34689" + ], + "prepared_review_unit_hash": "3d0a310e920f6902682fa43dc12d21df5e7df93fc199e6ca05c11cb79cf59fc0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13089, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues span unrelated bugs/regressions in loading, generation, vision models, backend packaging, and distributed/runtime paths. None of the soft pairs look like the same underlying defect or change.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43749 is the strongest standalone report: detailed repro, active discussion, inbound references, and a concrete broken code path (FSDP CPU RAM efficient loading).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "Different problems: one is an isolated reproduction of an earlier issue, the other is a qwen2.5-vl video vision_position_ids bug. No shared code path or concrete fix overlap is evident." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "Unrelated: DeepSpeed ZeRO3 deque IndexError versus a PEFT version bump/package release issue. Different layers and remedies." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "Both mention model-loading utilities, but one is about max_shard_size behavior in save_pretrained while the other is cached_file kwargs propagation in AutoProcessor.from_pretrained. Not the same bug." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "One is a GenerationMixin import error; the other is missing TensorFlow support in a torch-only install. Different failure modes and code paths." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "Both affect vision models, but the symptoms differ: zero probabilities in SigLIP2 versus Perceiver interpolation failure at non-default resolution. Not the same underlying defect." + }, + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Apple Silicon caching_allocator_warmup TypeError is a runtime/backend issue; Qwen3VL weight_only=True loading error is a model-loading problem. No plausible single fix." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "Both are loading-time errors, but one is a KeyError for facebook/MobileLLM-125M and the other is an unexpected _is_hf_initialized argument with init_empty_weights in 5.0.0rc0. Different root causes." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Custom model initialization breakage in 5.0.0 is distinct from FSDP_CPU_RAM_EFFICIENT_LOADING regression. They affect different initialization/loading mechanisms." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Missing PyTorch ImportError and Llama 3.2 Vision loading regression are separate issues with different environments and code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 50, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:34567", + "issue:36331", + "issue:43452", + "issue:43493", + "issue:43643", + "issue:43688", + "issue:43749", + "issue:43756", + "issue:43994", + "issue:44062", + "issue:44291", + "issue:44488", + "issue:44821", + "issue:45081", + "issue:45325", + "issue:45375" + ], + "soft_pairs": [ + "issue:43643|issue:45375", + "issue:43756|issue:45325", + "issue:44291|issue:44821", + "issue:43493|issue:43994", + "issue:44062|issue:45081", + "issue:28282|issue:29127", + "issue:43452|issue:44488", + "issue:34567|issue:36331", + "issue:43688|issue:43749" + ], + "prepared_review_unit_hash": "1f62a6241e8dd9ad18b6ec9291599447695c0248ec88da37c507f0bf9d68de29", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13364, + "estimated_input_tokens": 3341, + "estimated_eval_tokens": 6938 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated standalone issues. The only likely duplicate pair is the SigLIP2 implementation mismatch reports; the rest differ by model, code path, or failure mode.", + "confidence": 0.74, + "canonical_issue_reason": "Issue #43493 is the best canonical issue: it describes the underlying SigLIP2 HF-vs-JAX implementation discrepancy, which also explains the user-facing bad outputs reported in #43994.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43493 is the strongest representative for the cluster because it states the concrete root-cause-level bug rather than only the downstream symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both involve missing config fields, but they affect different models and code paths; not the same concrete bug." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Both are RoPE-related, but they describe different model-specific logic errors and are not the same fix." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "One is an unexpected argument during model loading, the other is an AutoImageProcessor URL loading issue; different failures." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Both point to the same SigLIP2 implementation problem causing incorrect outputs; #43994 looks like the user-visible symptom of the discrepancy in #43493." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "These are unrelated tokenizer/parser failures with different exceptions and code paths." + }, + { + "left": "issue:28282", + "right": "issue:29127", + "accept": false, + "reason": "Different subsystems and symptoms: missing PyTorch import versus LayoutLMv3 error handling." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "Both are loading-related, but the titles indicate different model/load paths and no clear shared concrete bug." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "TrainerState token accounting and custom compute_loss signature are separate trainer bugs." + }, + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Auxiliary-loss normalization and FSDP CPU RAM efficient loading are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 51, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43452", + "issue:43531", + "issue:43577", + "issue:43643", + "issue:43697", + "issue:43742", + "issue:44514", + "issue:44561", + "issue:44589", + "issue:44792", + "issue:44821", + "issue:44964", + "issue:45042", + "issue:45081", + "issue:45216", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:44792|issue:44964", + "issue:43742|issue:44589", + "issue:43531|issue:44821", + "issue:44514|issue:45290", + "issue:45042|issue:45081", + "issue:43452|issue:43577", + "issue:43643|issue:44561", + "issue:44514|issue:45381", + "issue:43697|issue:45216" + ], + "prepared_review_unit_hash": "85cb7bcd886d44375c2f18f0ffd66dbde6ddab6879f4dc8ab0799e283368155b", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12662, + "estimated_input_tokens": 3166, + "estimated_eval_tokens": 6588 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues cover unrelated regressions across loading, tokenization, chat templating, vision/image processing, model dtype, and backend compatibility. No soft pair looks like the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": "No canonical issue fits well because the cluster is heterogeneous; the reported bugs do not share one underlying root cause.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44821 is the broadest, most user-facing loading problem and is the least misleading representative, but it is still not a true cluster canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Different problems: a janus test failure versus a Phi-4 multimodal loading failure; same general area, not the same bug." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "Both are loading/runtime errors, but one is a key error for a specific model and the other is a Float8 storage lookup failure; different failure modes and causes." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to AutoImageProcessor-from-URL loading." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": false, + "reason": "Both involve chat/template processing, but one crashes on batched padding=False input and the other on assistant tool-call messages with no content; distinct code paths." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "Image processor torchvision dependency issue versus Mistral tokenizer regex patch crash; different components and bugs." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "GGUF AutoTokenizer/AutoModel loading breakage is unrelated to BLIP2 dtype propagation remaining float32." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Different trust_remote_code-related regressions: missing fields in AutoConfig return object versus removal of is_torch_fx_available breaking remote-code models." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "Both touch Qwen2.5-VL chat/vision flows, but one is a processor chat-template crash and the other is incorrect vision_position_ids in video inputs; not the same bug." + }, + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "Different regressions: RTDetrV2 output divergence in v5 versus Qwen3.5 save_pretrained checkpoint correctness." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 52, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41762", + "issue:43295", + "issue:43452", + "issue:43653", + "issue:43688", + "issue:43720", + "issue:43819", + "issue:44464", + "issue:44568", + "issue:44625", + "issue:44743", + "issue:44811", + "issue:44843", + "issue:44877", + "issue:44898", + "issue:44936", + "issue:45356", + "issue:45440" + ], + "soft_pairs": [ + "issue:41762|issue:44936", + "issue:43452|issue:44843", + "issue:43688|issue:45440", + "issue:43653|issue:44568", + "issue:43720|issue:44898", + "issue:43819|issue:44811", + "issue:44625|issue:44877", + "issue:43295|issue:45356", + "issue:44464|issue:44743" + ], + "prepared_review_unit_hash": "2621b45f84bcfba5a272f59411c2c3a32885d398320be74ce4a9b0eeb047b388", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13368, + "estimated_input_tokens": 3342, + "estimated_eval_tokens": 6940 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues span unrelated bugs across tokenizers, configs, loading, generation, and model-specific behavior. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue stands out; the items are largely unrelated, so picking one as the duplicate anchor would be arbitrary.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44843 is the best representative by triage value: it is open, narrowly scoped, and has a clear failure path (`AutoTokenizer.from_pretrained` unconditionally calling `model_info()` in `_patch_mistral_regex`, breaking offline mode).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41762", + "right": "issue:44936", + "accept": false, + "reason": "Different failures: Gemma3 + DeepSpeed ZeRO-3 load-time IndexError vs trainer.evaluate() failing after training. No shared code path or concrete duplicate symptom." + }, + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "Both involve tokenizer/loading, but the bugs differ: gguf_file breaks AutoTokenizer/AutoModel loading, while 44843 is an offline-mode regression from an unconditional `model_info()` call." + }, + { + "left": "issue:43688", + "right": "issue:45440", + "accept": false, + "reason": "Unrelated model bugs: auxiliary-loss normalization in OLMoE/GPT Oss vs native DeepseekV3MoE diverging from remote implementation." + }, + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer/special-token regressions, but the concrete bugs differ: BigBird mask token decode issue vs mdeberta-v3 BOS/EOS not added with `add_special_tokens=True`." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "Different subsystems and symptoms: BitNet packed-weight loading/unpacking vs Perceiver image classification failing at non-default resolution." + }, + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "Distinct model behaviors: DAC `from_latents` mismatch due to missing STE vs Whisper processor `batch_decode()` ignoring `skip_special_tokens`." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Both concern config handling, but one is Qwen3.5 `num_labels` propagation and the other is strict config blocking `granite_speech` loading; not the same bug." + }, + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "Both are regressions, but they affect different tokenizer/processor paths: processor.tokenizer image handling vs Kimi-K2.5 codec handling and a misleading warning." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "Both touch generation/cache behavior, but one is chunked generation with compiled forward while the other is recurrent states resetting in modular_qwen3_5.py; not mergeable as one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 53, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:24643", + "issue:29127", + "issue:38175", + "issue:40444", + "issue:41720", + "issue:43295", + "issue:43525", + "issue:43577", + "issue:43643", + "issue:43697", + "issue:43906", + "issue:44361", + "issue:44442", + "issue:44466", + "issue:44811", + "issue:44843", + "issue:44991", + "issue:45005" + ], + "soft_pairs": [ + "issue:40444|issue:44811", + "issue:44466|issue:45005", + "issue:41720|issue:43906", + "issue:24643|issue:29127", + "issue:43525|issue:43643", + "issue:44843|issue:44991", + "issue:44361|issue:44442", + "issue:43577|issue:43697", + "issue:38175|issue:43295" + ], + "prepared_review_unit_hash": "506103b25e88b2900291cb390e0194966a7a5a6e8820aca9a797a0f33a6dbde1", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13285, + "estimated_input_tokens": 3322, + "estimated_eval_tokens": 6900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No convincing duplicate cluster emerged: the paired issues mostly share broad vocabulary (tokenizers, tied weights, v5 regressions, model-specific failures) but describe different code paths and symptoms.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45005 is the best umbrella-style issue here because it is the broadest current report around tied-weight regressions in v5 and could plausibly absorb related follow-ups, even though the other issues are not confirmed duplicates of it.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40444", + "right": "issue:44811", + "accept": false, + "reason": "Different models and failures: Qwen2.5-VL multi-image iterable dataset handling vs Whisper processor.batch_decode skip_special_tokens behavior." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": false, + "reason": "Related theme but not the same bug: 44466 is device-dependent lm_head.weight serialization, while 45005 is a broader tied-weights issue on translation models." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "Qwen3 auto device-mapping cudaErrorAssert on A800 is not the same as an isolated reproduction of issue 38071; no concrete shared bug is evident." + }, + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "DeepSpeed 2-D weight runtime error and LayoutLMv3 box/clarity error are unrelated symptoms in different workflows." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config is a specific model/config bug, not the same as generic missing fields from trust_remote_code AutoConfig loading." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "HF_HUB_OFFLINE breakage from unconditional model_info() is a different failure mode than tokenizer loading failure for EMBEDDIA/est-roberta." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "MLukeTokenizer AttributeError and FastSpeech2ConformerTokenizer load failure involve different tokenizers and code paths." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "BLIP2 dtype propagation remaining float32 is unrelated to RTDetrV2 output differences in Transformers v5." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "Siglip2 zero probabilities and processor.tokenizer regression are different model families and different symptoms; not the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 54, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:35141", + "issue:43122", + "issue:43531", + "issue:43653", + "issue:43742", + "issue:43906", + "issue:44361", + "issue:44464", + "issue:44479", + "issue:44936", + "issue:44977", + "issue:45356" + ], + "soft_pairs": [ + "issue:43653|issue:44361", + "issue:44479|issue:44936", + "issue:30064|issue:33666", + "issue:44977|issue:45356", + "issue:43122|issue:43906", + "issue:28282|issue:33357", + "issue:28282|issue:32090", + "issue:28282|issue:35141", + "issue:44464|issue:44977", + "issue:43531|issue:43742" + ], + "prepared_review_unit_hash": "6fcb2d3c069c00b551f0f309dd8f854953d4e8bd36c6234c1999b245472cdd24", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12812, + "estimated_input_tokens": 3203, + "estimated_eval_tokens": 6662 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated and span different bugs in tokenizers, training, generation, vision preprocessing, and model loading. I do not see a true duplicate cluster here, so all soft-edge pairs are rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue stands out because the set is heterogeneous and the soft-similarity links do not point to the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for duplicate-canonicalization: each item describes a distinct problem in a different code path or model family.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "Both are tokenizer-related, but one is a BigBird special-token registration/decode bug and the other is an MLuke task-time AttributeError; different failures and code paths." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Video-input regression for specific Qwen models is unrelated to trainer.evaluate failing after train; one is a model/input bug, the other is a trainer lifecycle issue." + }, + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Image processor void-segmentation handling and Qwen2-VL multi-GPU training are different subsystems with different symptoms." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Flash-attention generation failure for Qwen3.5 is not the same as a Kimi-K2.5 tokenizer codec regression and warning mismatch." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "The first reports tokenizer output changing across versions; the second is only an isolated reproduction of another issue, with no clear evidence it is the same bug." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "PyTorch-missing ImportError and a MacOS bus error on a CLIP model are unrelated failure modes." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "Missing PyTorch dependency vs. NoneType broadcast error in Trainer are distinct problems." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "ImportError on AutoModel has nothing to do with token embedding resizing reinitializing output embeddings." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Chunked-generation inconsistency with compiled forward is a different generation-path bug than Qwen3.5 flash-attention output issues." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE is unrelated to a key error when loading MobileLLM-125M." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 55, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41720", + "issue:43012", + "issue:43329", + "issue:43452", + "issue:43504", + "issue:43550", + "issue:43577", + "issue:43653", + "issue:43720", + "issue:43927", + "issue:44361", + "issue:44451", + "issue:44464", + "issue:44479", + "issue:44554", + "issue:44625", + "issue:45325" + ], + "soft_pairs": [ + "issue:43577|issue:44554", + "issue:43653|issue:43927", + "issue:43329|issue:45325", + "issue:43452|issue:44451", + "issue:43012|issue:43577", + "issue:41720|issue:44464", + "issue:44479|issue:44625", + "issue:43504|issue:43720", + "issue:43550|issue:44361" + ], + "prepared_review_unit_hash": "99ff388930faffba5b69383ee24aab16e54c2f358bdbdb8941d7b0a78bdf3bfa", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12750, + "estimated_input_tokens": 3188, + "estimated_eval_tokens": 6632 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated bugs in Qwen/VL, tokenizers, config loading, dtype handling, compilation, and specific model loaders. I do not see a valid duplicate set or a single canonical issue/PR.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "Both mention dtype/precision, but one is a BLIP2 loading mismatch and the other is an MPS attention correctness bug; different models, code paths, and failure modes." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "Tokenizer special-token decoding vs DiaConfig save/load token-ID loss; both are token-related but the concrete bugs are unrelated." + }, + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Both are multimodal/VL issues, but one is an undefined-variable bug in multimodal token counting and the other is a rope-index scaling regression; different functions and symptoms." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "Both are loading problems, but gguf_file/pretrained-loading breakage is unrelated to ScandiBERT model loading failure." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "These are both dtype-adjacent, but one is a PyTorch compile warning and the other is a model-loading bug leaving weights in float32; not the same underlying issue." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert vs chunked generation inconsistency under compiled forward are distinct runtime paths and bug classes." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "Both involve Qwen3.5 naming, but one is a video-input regression and the other is num_labels propagation in config; separate features and failures." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Legacy-field BEiT loading issue vs BitNet packed-weight unpacking during accelerate loading are different model-loading defects." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 and MLukeTokenizer AttributeError are unrelated subsystems and symptoms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 56, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:32090", + "issue:33666", + "issue:34567", + "issue:43334", + "issue:43504", + "issue:43525", + "issue:43540", + "issue:43550", + "issue:43572", + "issue:43716", + "issue:43742", + "issue:44610", + "issue:44821", + "issue:44849", + "issue:45200", + "issue:45229", + "issue:45357" + ], + "soft_pairs": [ + "issue:44849|issue:45357", + "issue:43504|issue:43550", + "issue:45200|issue:45229", + "issue:43334|issue:43572", + "issue:43742|issue:44821", + "issue:43540|issue:43716", + "issue:32090|issue:34567", + "issue:32090|issue:33666", + "issue:43504|issue:44610", + "issue:43525|issue:43572" + ], + "prepared_review_unit_hash": "edf989bad2c3ef8633653c096c871df34a1cd2b8c4b93cabc31f15c3a8ad9260", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12728, + "estimated_input_tokens": 3182, + "estimated_eval_tokens": 6620 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a true duplicate cluster; they span unrelated model/config/processor/training bugs. All soft edges are superficial thematic matches and should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single issue is a convincing canonical duplicate target because the reports cover different models and different failure modes.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative here; the set is too heterogeneous for one issue to stand in as a canonical duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen3.5-related, but one is about output_hidden_states behavior and the other is about save_pretrained visual encoder keys; different bugs and different code paths." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated problems: BEiT pretrained loading with a legacy field versus Bamba torch.compile/SDPA failure. Same broad ML area only." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "Gemma 4 text-only fine-tuning with missing mm_token_type_ids is unrelated to a multi-GPU inference CUDA OOM." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Both mention missing pad-token config attributes, but they affect different models and fields (pad_token_id vs pad_token_idx) with no evidence of the same underlying bug." + }, + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "A MobileLLM load key error and an AutoImageProcessor-from-URL loading issue are different failure modes in different loading paths." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "Different models and modalities: Qwen3OmniMoe video processing versus Mistral-3 image preprocessor dtype mismatch." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "Trainer broadcast TypeError and TrainerState num_input_tokens_seen not updating are separate trainer issues with different symptoms and fixes." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "One is a specific Trainer _gpu_broadcast_one TypeError; the other is a broad Qwen2-VL multi-GPU training request/problem statement, not the same bug." + }, + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "Both involve model/preprocessor loading, but the concrete bugs differ: legacy field handling in BEiT versus an input-size mismatch in OmDet-Turbo." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Similar pad-token attribute theme, but different models and different missing fields; not enough to conclude the same bug or a mergeable shared fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 57, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33666", + "issue:36010", + "issue:43262", + "issue:43278", + "issue:43493", + "issue:43606", + "issue:43688", + "issue:43720", + "issue:43756", + "issue:43825", + "issue:43844", + "issue:43873", + "issue:43927", + "issue:44488", + "issue:44843", + "issue:44913", + "issue:45084" + ], + "soft_pairs": [ + "issue:43278|issue:43688", + "issue:43262|issue:45084", + "issue:43756|issue:44913", + "issue:43493|issue:43825", + "issue:33666|issue:36010", + "issue:43688|issue:43873", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:44488|issue:44843", + "issue:43278|issue:43844" + ], + "prepared_review_unit_hash": "deba43ccfa4c370efd4a68724a734b58b4610211405642a76baf195d5e2191a0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12901, + "estimated_input_tokens": 3226, + "estimated_eval_tokens": 6708 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated bug reports that share only broad subsystem vocabulary (training, loading, offload, configs). I do not see any true duplicate pairs among the soft candidates.", + "confidence": 0.91, + "canonical_issue_reason": "No single canonical issue fits this set: the reports cover different bugs, different code paths, and different products/models.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative because the cluster is not a duplicate set; at best, one could pick a broad open report, but it would not summarize the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "Both mention training behavior and numerics, but one is a BF16/FP32 dtype mismatch in evaluation and the other is auxiliary-loss normalization in MoE models; different bugs and code paths." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "One is an audio processor sampling-rate default issue, the other is a compile-time template-node TypeError; no shared underlying bug." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Both involve rotary-related configuration, but one is about Smollm3 dropping RoPE layers and the other is config reload losing rotary_pct; distinct failure modes." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "SigLIP2 implementation discrepancy and pipeline translation-task messaging are unrelated; same broad quality umbrella only." + }, + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Multi-GPU training for Qwen2-VL and a GenerationMixin import error are completely separate issues." + }, + { + "left": "issue:43688", + "right": "issue:43873", + "accept": false, + "reason": "Both involve offloading/quantization-adjacent behavior, but one is about auxiliary-loss normalization and the other about CPU offload with quantization; not the same bug." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "BitNet accelerate-loading with packed weights and DiaConfig custom token IDs loss are different model/config-loading problems." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU-offload device mismatch for bark-small and DiaConfig token-ID persistence are unrelated failures." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Both concern model/tokenizer loading, but one is a specific model failing to load and the other is HF_HUB_OFFLINE being broken by an unconditional model_info call; different root causes." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "Both reference training instability, but one is a dtype change between train/eval and the other is gradient growth with ZeRO-3; not the same concrete defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 58, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33666", + "issue:36331", + "issue:39692", + "issue:43257", + "issue:43299", + "issue:43329", + "issue:43493", + "issue:43540", + "issue:43606", + "issue:43720", + "issue:43756", + "issue:43844", + "issue:44060", + "issue:44361", + "issue:44964", + "issue:44977", + "issue:45405" + ], + "soft_pairs": [ + "issue:43540|issue:44977", + "issue:43720|issue:44361", + "issue:43257|issue:43299", + "issue:43329|issue:43606", + "issue:39692|issue:43493", + "issue:33666|issue:36331", + "issue:43756|issue:44060", + "issue:44964|issue:45405", + "issue:43493|issue:43844" + ], + "prepared_review_unit_hash": "c85d51fb7c7a97f58c370989ed6c436b60985277c26234d0b872595bcf64614a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12666, + "estimated_input_tokens": 3167, + "estimated_eval_tokens": 6590 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change; most are broad-family false positives with different models, code paths, or scopes. There are no PRs in the set. I picked the clearest concrete regression issue as the best anchor issue.", + "confidence": 0.66, + "canonical_issue_reason": "Issue 43299 is the clearest, most actionable regression report: it names a specific broken version boundary (5.0.0.dev0) and a concrete loading failure path for Qwen3VL MoE models.", + "canonical_pr_reason": null, + "best_issue_reason": "43299 is the strongest standalone issue because it is precise, reproducible, and regression-oriented; among these unrelated reports it is the best representative anchor.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Both are Qwen-related, but one is a video-input ValueError in Qwen3OmniMoe and the other is a flash-attention generation bug in Qwen3.5; different models and code paths." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "Packed-weight loading in BitNet/accelerate is unrelated to a tokenizer AttributeError in MLukeTokenizer." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3 MoE loading, but one is an accelerate+deepspeed conversion issue and the other is a transformers 5.0.0.dev0 regression for Qwen3VL MoE; different failure modes." + }, + { + "left": "issue:43329", + "right": "issue:43606", + "accept": false, + "reason": "A multimodal token-counting video-branch bug is unrelated to a Bark CPU offload device-mismatch CI failure." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "Same SigLIP2 family, but one is documentation example errors and the other is an implementation discrepancy versus JAX; not the same bug." + }, + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Multi-GPU training support for Qwen2-VL is unrelated to a CustomTrainer compute_loss signature incompatibility." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "Smollm3 RoPE-layer behavior and Qwen3-Next tied-weights warnings affect different models and different mechanisms." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "A Phi-4 multimodal load failure is unrelated to a PEFT dependency-version bump issue." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 model discrepancy and ZeRO-3 gradient inflation are different bugs with different triggers and subsystems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 59, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33357", + "issue:33666", + "issue:43504", + "issue:43572", + "issue:43606", + "issue:43646", + "issue:43688", + "issue:43746", + "issue:43824", + "issue:43994", + "issue:44291", + "issue:44423", + "issue:44466", + "issue:44493", + "issue:44521", + "issue:44589", + "issue:44734", + "issue:45290" + ], + "soft_pairs": [ + "issue:43824|issue:44589", + "issue:44423|issue:44734", + "issue:43688|issue:43746", + "issue:43572|issue:44291", + "issue:43646|issue:44466", + "issue:44521|issue:45290", + "issue:43994|issue:44493", + "issue:43504|issue:43606", + "issue:33357|issue:33666" + ], + "prepared_review_unit_hash": "8d0473d85cff5b240d60235378dc81e09965b98b814549cd952ceff4cfc9a72f", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13317, + "estimated_input_tokens": 3330, + "estimated_eval_tokens": 6916 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a duplicate cluster; they cover unrelated bugs, model families, and code paths. All soft similarity pairs should be rejected.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue: the issues span distinct failures (model loading, serving, serialization, training, chat templating, and config regressions) rather than one underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a clear representative duplicate target because there is no coherent duplicate set to canonicalize.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "Different failures: Qwen2.5-VL import/name availability vs Float8 storage object lookup. No shared code-path bug." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Both hit serving, but one is a multimodal `str.to` crash and the other is KV-cache continuation indexing. Different concrete bugs." + }, + { + "left": "issue:43688", + "right": "issue:43746", + "accept": false, + "reason": "Aux-loss normalization in MoE models vs GraniteSpeech PEFT/local checkpoint loading. Unrelated functionality." + }, + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "StableLmConfig missing field after v5 update vs `init_empty_weights` unexpected argument. Different initialization regressions." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Custom model init breaks after v5 vs device-dependent `lm_head.weight` serialization. Not the same underlying code path." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both involve `apply_chat_template`, but one is all-zero assistant masks for multimodal inputs and the other crashes on tool-call messages without content. Different bugs." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "Siglip2 AutoModel/pipeline wrong results vs widespread unexpected `position_id` keys. Separate issues." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "BEiT preset load failure due to legacy field vs bark-small CPU-offload device mismatch. No common fixable bug." + }, + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "MacOS bus error with pretrained CLIP model vs Qwen2-VL multi-GPU training. Completely different scenarios." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 60, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:15354", + "issue:38175", + "issue:39692", + "issue:43257", + "issue:43278", + "issue:43295", + "issue:43504", + "issue:43540", + "issue:43575", + "issue:43606", + "issue:43653", + "issue:43701", + "issue:43927", + "issue:44112", + "issue:44361", + "issue:44792", + "issue:45200", + "issue:45405" + ], + "soft_pairs": [ + "issue:39692|issue:43295", + "issue:43504|issue:43653", + "issue:43927|issue:44361", + "issue:43575|issue:45405", + "issue:43278|issue:43701", + "issue:43257|issue:45405", + "issue:43606|issue:44112", + "issue:44792|issue:45405", + "issue:15354|issue:38175", + "issue:43540|issue:45200" + ], + "prepared_review_unit_hash": "d8e574b85a8bfd0381af75907bfda2764460906aae1338dd50f01a308fdf1edb", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13465, + "estimated_input_tokens": 3367, + "estimated_eval_tokens": 6990 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are a collection of unrelated model-, tokenizer-, config-, and CI-specific issues; none of the proposed soft pairs appear to be the same underlying bug, so all soft edges are rejected.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue: the reports cover different subsystems and failure modes rather than one duplicated problem.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for deduping; there is no shared underlying defect strong enough to make one issue representative of the rest.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "SigLIP2 docs/example errors with model/processor mismatch and quantization vs a v4.57.5 regression in processor.tokenizer/image handling; different bug classes." + }, + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Beit preset load failure due to a legacy field vs BigBirdTokenizer mask token special-token registration/empty decode; unrelated components and symptoms." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "DiaConfig losing custom token IDs on save/load is a config serialization bug, while MLukeTokenizer failing on tasks is an AttributeError in task handling." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "Qwen2 TP loading OOM is a model-loading/memory issue; MIN_PEFT_VERSION being bumped to an unreleased version is a dependency/versioning issue." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "Embedding dtype changing BF16->FP32 during evaluate is unrelated to resume_from_checkpoint key mismatch; different code paths and failure modes." + }, + { + "left": "issue:43257", + "right": "issue:45405", + "accept": false, + "reason": "Qwen3 MOE weights not converted with accelerate+deepspeed is a loading/conversion bug, not the PEFT version bump/release issue." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Bark-small CPU offload device mismatch affects model execution, while the GraniteSpeech report is a stale CI test override; not the same underlying defect." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "janus image-generation test failure is a model-specific runtime/test issue, whereas 45405 is about an unreleased PEFT dependency version." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "torch.jit.script GeneratorExp export limitation for ViT is unrelated to zero probabilities in SigLIP2; different models and failure mechanisms." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Qwen3OmniMoe video-processing ValueError is a multimodal input parsing bug, while Gemma 4 mm_token_type_ids defaulting is a training/fine-tuning configuration issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 61, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:37428", + "issue:39401", + "issue:43232", + "issue:43334", + "issue:43525", + "issue:43582", + "issue:43844", + "issue:43957", + "issue:44315", + "issue:44554", + "issue:44568", + "issue:44849", + "issue:44964", + "issue:44977", + "issue:45072", + "issue:45198", + "issue:45356", + "issue:45446" + ], + "soft_pairs": [ + "issue:44554|issue:45446", + "issue:43232|issue:44849", + "issue:43582|issue:44977", + "issue:39401|issue:44568", + "issue:45072|issue:45198", + "issue:37428|issue:43957", + "issue:43844|issue:44964", + "issue:44315|issue:44849", + "issue:43334|issue:43525", + "issue:44554|issue:45356" + ], + "prepared_review_unit_hash": "78facb9e88dcb7b3727d339beff85d7412c069cca5148dc2d5ab875cd50ce5bf", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13573, + "estimated_input_tokens": 3394, + "estimated_eval_tokens": 7044 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous. Most soft pairs are only superficially similar and do not look like the same underlying bug; the only near-match is the pair of missing `pad_token_id` config issues, but even that is model-specific and not clearly one fix.", + "confidence": 0.91, + "canonical_issue_reason": "No single issue cleanly subsumes the cluster; the items span unrelated tokenizer, loading, training, allocator, and attention bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong global representative issue exists. If forced, the closest thematic match is one of the `pad_token_id` config errors, but the cluster is too mixed for a meaningful canonical issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44554", + "right": "issue:45446", + "accept": false, + "reason": "Different bugs: an MPS attention correctness issue vs a flex_attention PyTorch version-check/import problem." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "Different code paths and symptoms: generation kwargs update after `sync_gpus` vs Qwen3.5 `output_hidden_states` behavior." + }, + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "Apple Silicon allocator warmup `TypeError` and Qwen3.5 flash-attention generation are unrelated failures." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer-related, but one is wrong `offset_mapping` and the other is missing BOS/EOS insertion for a different tokenizer." + }, + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "Different subsystems and failures: bfloat16 dtype mismatch in inference vs save/tokenization failure in Wav2Vec2." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "Both concern model loading, but one is a missing flash-attention symbol and the other is a `meta`-device loading regression." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "Training instability under ZeRO-3 and inability to load Phi-4-multimodal-instruct are different issues." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Different problems: Liger Kernel not applied with `model_init` vs Qwen3.5 `output_hidden_states` bug." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": false, + "reason": "Same broad symptom (`pad_token_id` missing), but on different model configs and not clearly the same concrete bug or fix." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "An MPS attention correctness bug and a Kimi-K2.5 tokenizer regression are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 62, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:37428", + "issue:40990", + "issue:43295", + "issue:43388", + "issue:43493", + "issue:43526", + "issue:43701", + "issue:43824", + "issue:43873", + "issue:44464", + "issue:44466", + "issue:44610", + "issue:44625", + "issue:44898", + "issue:45127", + "issue:45200", + "issue:45325", + "issue:45412" + ], + "soft_pairs": [ + "issue:44898|issue:45412", + "issue:43295|issue:44610", + "issue:45200|issue:45325", + "issue:37428|issue:43824", + "issue:44466|issue:45127", + "issue:43493|issue:43701", + "issue:43873|issue:44464", + "issue:40990|issue:44625", + "issue:43388|issue:43526" + ], + "prepared_review_unit_hash": "3a29b6c169831f85f70d7e512df66e971d01fcb7c1825c95d3e428707759f627", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13391, + "estimated_input_tokens": 3348, + "estimated_eval_tokens": 6952 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the items cover unrelated bugs across imports, vision processors, training metrics, generation, quantization, and model-specific regressions. None of the soft pairs look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "No true canonical issue: the cluster is heterogeneous. If one anchor is needed, issue 43824 is the strongest because it is a concrete, high-engagement regression report with a clear import failure, but it does not represent the rest of the set.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43824 is the best single representative only in the sense of being the most concrete and widely discussed bug report in the set. It is not a duplicate target for the other issues, but it is the strongest anchor if one issue must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Different models and failure modes: Perceiver image resolution/interpolation bug vs RT-DETR memory not being released on deletion." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Both involve model/processor regressions, but the concrete bugs differ: processor.tokenizer/image-input handling vs OmDet-Turbo preprocessing size mismatch." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Unrelated code paths: Gemma 4 multimodal token-type defaults vs Qwen2.5-VL rope-index scaling for still images." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different symbols and different model areas; not the same underlying missing-export bug." + }, + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "Generation inconsistency with compiled forward is unrelated to LoRA merge/extended-vocab collapse and tied-weight serialization behavior." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "Different bugs: SigLIP2 implementation discrepancy vs checkpoint resume key mismatch." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization/offloading behavior is a different problem from chunked generation inconsistencies under compiled forward." + }, + { + "left": "issue:40990", + "right": "issue:44625", + "accept": false, + "reason": "Perplexity quality regression on a model is unrelated to num_labels propagation between configs." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Both touch labels, but one is gather_for_metrics dropping tuple elements in the last batch while the other is BeitImageProcessorFast reduce_labels returning a single label; different layers and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 63, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:42915", + "issue:43295", + "issue:43572", + "issue:43575", + "issue:43824", + "issue:43844", + "issue:43873", + "issue:43975", + "issue:44442", + "issue:44464", + "issue:44514", + "issue:44521", + "issue:44589", + "issue:44936", + "issue:44987", + "issue:45198", + "issue:45412" + ], + "soft_pairs": [ + "issue:43572|issue:44589", + "issue:43572|issue:44987", + "issue:44514|issue:44521", + "issue:43844|issue:44936", + "issue:43824|issue:43975", + "issue:43575|issue:44464", + "issue:44442|issue:45198", + "issue:38175|issue:45412", + "issue:42915|issue:43824", + "issue:43295|issue:43873" + ], + "prepared_review_unit_hash": "36c774ed1723220a40faed3e4c0c8d9bd05be0604d6aa717677374c21fd1b14c", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13306, + "estimated_input_tokens": 3327, + "estimated_eval_tokens": 6910 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All proposed soft pairs look like false positives: the issues span unrelated models, tokenizer/processor regressions, training/eval bugs, quantization/offload failures, and memory-management problems. There is no coherent duplicate cluster here.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43572", + "right": "issue:44589", + "accept": false, + "reason": "Different failure classes: missing config field vs missing Float8 storage type; no shared code-path bug." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "Unrelated symptoms and models; one is a StableLm config regression, the other a model-load failure on a specific repo." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both involve Qwen2.5 VL chat/template code, but one is a batched-input crash and the other is incorrect zero assistant masks; not the same concrete bug." + }, + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "Training instability with ZeRO-3 is unrelated to evaluate() failing after train(); different lifecycle and failure mode." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "Different models and bugs: import/export of Qwen2.5 VL class vs DeepSeek detokenization regression." + }, + { + "left": "issue:43575", + "right": "issue:44464", + "accept": false, + "reason": "TP OOM on loading a Qwen model is unrelated to chunked generation inconsistencies under compiled forward." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "Both are tokenizer-related but for different speech models and different problems (load failure vs save_pretrained/tokenization failure)." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "Siglip zero-probabilities and RT-DETR memory not released are unrelated model/runtime issues." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "Different subsystems and root causes: FineGrainedFP8Config failure vs missing Qwen2.5 VL import." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "A processor/tokenizer regression is not the same as quantization offloading behavior; no shared concrete bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 64, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:43278", + "issue:43295", + "issue:43450", + "issue:43493", + "issue:43653", + "issue:43827", + "issue:43856", + "issue:43901", + "issue:43994", + "issue:44112", + "issue:44360", + "issue:44561", + "issue:44568", + "issue:44849", + "issue:44991", + "issue:45003" + ], + "soft_pairs": [ + "issue:43295|issue:43450", + "issue:43278|issue:43856", + "issue:44849|issue:45003", + "issue:44360|issue:44561", + "issue:43295|issue:43994", + "issue:43827|issue:43901", + "issue:38175|issue:43493", + "issue:43653|issue:44112", + "issue:44568|issue:44991" + ], + "prepared_review_unit_hash": "1a4a6bbb4b72a18858430099ffabdc734cfa055a8a1fb625ba4bf541a853c049", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12540, + "estimated_input_tokens": 3135, + "estimated_eval_tokens": 6526 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One likely duplicate pair stands out in the SigLIP2 area: the zero-probabilities report and the HF-vs-JAX discrepancy report. The remaining soft pairs look related only at a broad subsystem level or are plainly different bugs.", + "confidence": 0.78, + "canonical_issue_reason": "issue:43493 is the better canonical issue for the SigLIP2 problem because it states the broader root discrepancy against the JAX implementation, while issue:38175 reads like a downstream symptom (unexpected zero probabilities).", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43493 is the strongest overall issue here: it is specific, root-cause oriented, and likely subsumes the narrower user symptom reported in issue:38175.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Different bugs: one is a processor/tokenizer regression, the other is a batched video processor shape issue." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Unrelated symptoms and code paths: dtype mismatch in evaluation vs MoE training memory usage." + }, + { + "left": "issue:44849", + "right": "issue:45003", + "accept": false, + "reason": "Distinct problems: Qwen3.5 hidden-state behavior vs unsafe sys.modules access in modeling_utils." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "No concrete overlap: a DSA indexer ReLU discussion is unrelated to v5 trust_remote_code breakage." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Both mention SigLIP2/processor usage, but the reports describe different failure modes and are not clearly the same underlying bug." + }, + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "Both are docs regressions, but they cover different API removals and are not the same change request." + }, + { + "left": "issue:38175", + "right": "issue:43493", + "accept": true, + "reason": "Same underlying SigLIP2 model discrepancy: zero probabilities is a likely symptom of the HF-vs-JAX implementation mismatch." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "Completely different areas: BigBird tokenizer special-token registration vs a GraniteSpeech CI device test." + }, + { + "left": "issue:44568", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-related v5 regressions, but they concern different models and different breakages." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 65, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:37428", + "issue:43425", + "issue:43450", + "issue:43493", + "issue:43575", + "issue:43819", + "issue:44112", + "issue:44351", + "issue:44462", + "issue:44561", + "issue:44610", + "issue:44779", + "issue:44857", + "issue:45083", + "issue:45127", + "issue:45245", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:45127|issue:45245", + "issue:43450|issue:43493", + "issue:37428|issue:43425", + "issue:44462|issue:44561", + "issue:43819|issue:45083", + "issue:45290|issue:45381", + "issue:44561|issue:44779", + "issue:43450|issue:43575", + "issue:44610|issue:44857", + "issue:37428|issue:44351", + "issue:43819|issue:44112" + ], + "prepared_review_unit_hash": "95b8bcdf55636a4f8150dd37ac6eb6b35e45a2b2731af3e1831a87070c1f3e7a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13638, + "estimated_input_tokens": 3410, + "estimated_eval_tokens": 7076 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous; none of the suggested soft pairs look like true duplicates or the same fix. The strongest standalone issue is the v5 trust_remote_code regression, but there is no evidence for merging any pair.", + "confidence": 0.83, + "canonical_issue_reason": "issue:44561 is the best representative issue: it describes a broad, user-facing regression tied to a specific version change, has inbound references, and is more central than the narrower bug reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44561 has the clearest impact and widest relevance among the listed issues; it is a concrete compatibility break with a clear upgrade-related root cause.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "Different failures: LoRA merge collapse with extended vocab vs a category-cardinality RuntimeError. Same general model-ops area, but not the same bug." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "Both involve vision processing, but one is batched video processor shape handling and the other is a SigLIP2 HF-vs-JAX discrepancy. Distinct code paths and symptoms." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Both are compatibility/import problems, but one is a missing flash-attention symbol import and the other is Torch 2.10 incompatibility. Not the same underlying issue." + }, + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "Both are version-related regressions, but one is AutoTokenizer ignoring tokenizer.json and the other is trust_remote_code breaking after is_torch_fx_available removal. Different mechanisms." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "Different models and different bugs: DAC.from_latents mismatch vs qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Chat-template crash on tool-call assistant messages is unrelated to qwen2.5-vl video vision_position_ids being wrong." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 regressions, but they affect different tokenizer/model paths and have different root causes." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Video processor shape mismatch is unrelated to tensor-parallel OOM when loading Qwen2-57B-A14B-Instruct." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "OmDet-Turbo processor output size mismatch and LwDetrImageLoss float16 CUDA crash are different bugs in different components." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols and likely different compatibility breakages." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "DAC latent conversion mismatch and a stale CI test failure in GraniteSpeech are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 66, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:39692", + "issue:43262", + "issue:43278", + "issue:43450", + "issue:43526", + "issue:43606", + "issue:43611", + "issue:43819", + "issue:43825", + "issue:43931", + "issue:44448", + "issue:44493", + "issue:44743", + "issue:45083", + "issue:45310", + "issue:45478" + ], + "soft_pairs": [ + "issue:44448|issue:44743", + "issue:43262|issue:43450", + "issue:43606|issue:45083", + "issue:45310|issue:45478", + "issue:43819|issue:44493", + "issue:43278|issue:43825", + "issue:29942|issue:32090", + "issue:43611|issue:43931", + "issue:39692|issue:43526" + ], + "prepared_review_unit_hash": "6a70ba71d7a14731f59fd45ebc8e58eb2835ea79d1b553ecb6c189a10d0583d8", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13182, + "estimated_input_tokens": 3296, + "estimated_eval_tokens": 6848 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated issue reports. The only clear duplicate pair is the Qwen3.5 MoE `from_pretrained` error: issues 45310 and 45478 describe the same Transformers>=5.4.0 failure. No PRs are present.", + "confidence": 0.95, + "canonical_issue_reason": "Issue 45310 is the better canonical issue because it is the earlier report of the same Qwen3.5 MoE `from_pretrained` failure and already has inbound reference activity, while 45478 is an open duplicate with the same title.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45310 is the strongest cluster representative: it matches 45478 exactly on the underlying bug and is the more established ticket. The rest of the items are separate bug reports in different areas.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Different model families and failures: Pegasus v4/v5 output drift vs Qwen3 recurrent-state reset in cache handling." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Different subsystems and symptoms: audio chat-template sample-rate defaulting vs batched video processor shape errors." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "Unrelated bugs: bark-small CPU offload device mismatch vs qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:45310", + "right": "issue:45478", + "accept": true, + "reason": "Same underlying bug and same error class: Transformers>=5.4.0 `from_pretrained` failure for Qwen3.5 MoE." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "Different code paths: DAC latent reconstruction mismatch vs broad position-id warning/key issue across many models." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "Different problems: dtype regression in evaluation vs an incorrect pipeline translation-support error message." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Unrelated failures: Flash Attention 2 test breakage vs Trainer `_gpu_broadcast_one` NoneType error." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "Different model-loading failures: `base_model_prefix` regression vs a specific Qwen3-VL weight-shape mismatch." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "Different topics: SigLIP2 documentation example issues vs BEiT fast image processor label reduction bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 67, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:43335", + "issue:43404", + "issue:43450", + "issue:43526", + "issue:43550", + "issue:43653", + "issue:44448", + "issue:44493", + "issue:44779", + "issue:44855", + "issue:44977", + "issue:45083", + "issue:45200", + "issue:45229", + "issue:45381" + ], + "soft_pairs": [ + "issue:45200|issue:45381", + "issue:43335|issue:43550", + "issue:44855|issue:45083", + "issue:43404|issue:45200", + "issue:39692|issue:44493", + "issue:43450|issue:43526", + "issue:38175|issue:44493", + "issue:44977|issue:45229", + "issue:44448|issue:44779", + "issue:43335|issue:43653" + ], + "prepared_review_unit_hash": "a3a07f6ce4ca7d59063892585a16a1d060c45d819319d3529d7be7c5bf118573", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12961, + "estimated_input_tokens": 3241, + "estimated_eval_tokens": 6738 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose collection of unrelated closed issues across different models, tokenizers, processors, configs, and version regressions. I do not see a single duplicate root cause or a viable canonical artifact for the whole cluster.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue fits: the items span distinct models and subsystems with different failure modes, so none can represent the cluster as a duplicate target.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue: there is no dominant or umbrella bug here, just several unrelated reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Different models and symptoms: Gemma 4 text-only fine-tuning needing mm_token_type_ids defaults is unrelated to Qwen2.5-VL video vision_position_ids being wrong." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated bugs: SwitchTransformers config incorrectly creating a sparse layer vs Bamba-9B-v2 torch.compile + SDPA failure." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "Different code paths and failure types: Python 3.13 JIT parsing/IndentationError in DebertaV2 vs qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:43404", + "right": "issue:45200", + "accept": false, + "reason": "Different model families and issues: tied lm_head weights in Mistral3 vs missing/default mm_token_type_ids in Gemma 4." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "Doc-example/model-processor mismatch and quantization failure in SigLIP2 is not the same as a broad unexpected position-id key issue." + }, + { + "left": "issue:43450", + "right": "issue:43526", + "accept": false, + "reason": "Both are processor bugs, but they affect different processors and different outputs: batched video shape vs BeitImageProcessorFast label reduction." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "Different models and symptoms: SigLIP2 zero probabilities vs unexpected position-id key handling across models." + }, + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Different runtime problems: Qwen3.5 flash-attention generation bug vs Gemma4 multi-GPU CUDA OOM." + }, + { + "left": "issue:44448", + "right": "issue:44779", + "accept": false, + "reason": "Version-related regressions in different subsystems, but not the same bug: Pegasus output drift vs Deepseek tokenizer behavior change." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "No shared root cause: SwitchTransformers sparse-layer construction is unrelated to BigBirdTokenizer special-token registration." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 68, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:32090", + "issue:40444", + "issue:43335", + "issue:43504", + "issue:43526", + "issue:43550", + "issue:43720", + "issue:43723", + "issue:44112", + "issue:44448", + "issue:44554", + "issue:44568", + "issue:44977", + "issue:45072", + "issue:45083", + "issue:45491" + ], + "soft_pairs": [ + "issue:29127|issue:32090", + "issue:43335|issue:45083", + "issue:43335|issue:43504", + "issue:43550|issue:44112", + "issue:44568|issue:45491", + "issue:44448|issue:44977", + "issue:40444|issue:43526", + "issue:43335|issue:45072", + "issue:43723|issue:44554", + "issue:43720|issue:44112" + ], + "prepared_review_unit_hash": "99f175079323c5e2a8ae63d577345f1ee983c33a0a4b4067c6d7f89860c8e6b5", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 13088, + "estimated_input_tokens": 3272, + "estimated_eval_tokens": 6800 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not duplicates of one another; they span unrelated bugs across different models, tokenizers, training paths, and CI failures. All soft edges should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue: the cluster is heterogeneous and the candidate pairs do not describe the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good global representative because there is no shared root cause across the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "Different subsystems and failure modes: LayoutLMv3 box validation messaging vs Trainer/_gpu_broadcast_one TypeError." + }, + { + "left": "issue:43335", + "right": "issue:45083", + "accept": false, + "reason": "Both are model-internal bugs, but they concern different architectures and code paths (SwitchTransformers config vs qwen3_omni_moe length helper)." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "Unrelated BEIT pretrained-loading/legacy-field issue versus SwitchTransformers sparse-layer construction bug." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "Different models and bugs: Bamba torch.compile/SDPA failure vs GraniteSpeech CI test flakiness." + }, + { + "left": "issue:44568", + "right": "issue:45491", + "accept": false, + "reason": "Tokenizer special-token regression is unrelated to Gemma3 NaN embeddings from sliding-window attention on GPU." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and failure modes: Pegasus v4/v5 output drift vs Qwen3.5 flash-attention generation bug." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Both involve vision preprocessing, but one is Qwen2.5-VL multi-image dataset handling and the other is BEiT Fast processor label reduction." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "Different bugs in different models: SwitchTransformers sparse-layer config versus dtype mismatch in TimmWrapperModel/CI inference." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "Tokenizer loading in v5 is unrelated to an MPS attention correctness issue with mismatched value/query head dimensions." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "Different causes and areas: BitNet packed-weight loading under accelerate vs a stale GraniteSpeech device-override CI test." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 69, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:34689", + "issue:35141", + "issue:37428", + "issue:43066", + "issue:43278", + "issue:43329", + "issue:43335", + "issue:43606", + "issue:43873", + "issue:44112", + "issue:44462", + "issue:44493", + "issue:44857", + "issue:44987", + "issue:45072", + "issue:45229", + "issue:45372", + "issue:45406" + ], + "soft_pairs": [ + "issue:45372|issue:45406", + "issue:43066|issue:44462", + "issue:43335|issue:44112", + "issue:37428|issue:44987", + "issue:43606|issue:44857", + "issue:43873|issue:45229", + "issue:44857|issue:45072", + "issue:34689|issue:35141", + "issue:43873|issue:44493", + "issue:43278|issue:43329" + ], + "prepared_review_unit_hash": "aa1148ea0420ca8ff86d2da450a24b61853d297944d213bb6dbfa181c2c097c2", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13564, + "estimated_input_tokens": 3391, + "estimated_eval_tokens": 7038 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated bug reports across different subsystems. None of the soft pairs look like the same underlying defect, so all soft edges are rejected. If forced to choose an anchor, issue #43873 is the best representative because it is the broadest active report with the most inbound references, but there is no true canonical duplicate here.", + "confidence": 0.84, + "canonical_issue_reason": "Issue #43873 is the most central/active item in the set (open, 4 inbound references) and has the broadest framing around offloading/quantization, so it is the best anchor if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43873 is the strongest general representative for the cluster, but only as an anchor; the cluster does not contain a clear duplicate set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Both involve Gemma 4 processor loading, but one is an import error from mistral_common while the other is a missing _tokenizer attribute in serve; different failure modes and code paths." + }, + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Both mention tokenizer behavior, but one is about decoder type in Transformers v5 and the other about AutoTokenizer ignoring tokenizer.json; not the same bug." + }, + { + "left": "issue:43335", + "right": "issue:44112", + "accept": false, + "reason": "Sparse-layer construction in SwitchTransformers config is unrelated to a stale device-override CI failure in GraniteSpeech." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "The first is an internal import error for flash attention utilities; the second is a model-loading failure for a specific repository. Same broad area, different issue." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "One is a Bark CPU offload device mismatch, the other is an AMP/CUDA crash in LwDetrImageLoss; different models and different root causes." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Offloading/quantization problems are not the same as multi-GPU CUDA OOM during Gemma4 inference." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "Both involve dtype issues, but one is a loss crash under AMP and the other is a bfloat16 inference mismatch in different models/components." + }, + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Model loading for Llama 3.2 Vision and embedding reinitialization after resize are distinct bugs with different triggers." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Offloading with quantization and unexpected position-id keys are unrelated symptoms in different code paths." + }, + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Embedding dtype drift in evaluate is unrelated to an undefined-variable bug in multimodal video token counting." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 70, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:33357", + "issue:35141", + "issue:41720", + "issue:43065", + "issue:43550", + "issue:43701", + "issue:43976", + "issue:43994", + "issue:44448", + "issue:44466", + "issue:45083", + "issue:45229", + "issue:45372", + "issue:45412", + "issue:45440", + "issue:45459" + ], + "soft_pairs": [ + "issue:43701|issue:45083", + "issue:41720|issue:45229", + "issue:45372|issue:45459", + "issue:29942|issue:35141", + "issue:43550|issue:45083", + "issue:43976|issue:45440", + "issue:32090|issue:33357", + "issue:44448|issue:44466", + "issue:43065|issue:45412", + "issue:43065|issue:43994" + ], + "prepared_review_unit_hash": "930a4b679a1d3f73102763ce3cd8ea47a956c05a70835cf0a929b9f7ee1a671d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13459, + "estimated_input_tokens": 3365, + "estimated_eval_tokens": 6986 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is not a duplicate set; the items cover unrelated bugs across training, model inference, serialization, tokenizer/import handling, and platform-specific failures. All soft-similarity pairs should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43701", + "right": "issue:45083", + "accept": false, + "reason": "Different failure modes and components: checkpoint key mismatch vs a Qwen3 Omni MoE helper-length bug." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Both are GPU inference problems, but one is a cudaErrorAssert in Qwen3 auto device mapping and the other is an OOM on Gemma4 multi-GPU; not the same bug." + }, + { + "left": "issue:45372", + "right": "issue:45459", + "accept": false, + "reason": "Different root causes: Gemma 4 processor import dependency breakage vs tokenizer error masking when protobuf is missing." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "Flash Attention test failures and embedding reinitialization on resize are unrelated code paths." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "torch.compile/SDPA on Bamba-9B-v2 is unrelated to the qwen3_omni_moe feature-length helper issue." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Python version compatibility and DeepseekV3MoE divergence are distinct issues with no shared concrete bug." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Trainer NoneType broadcast error and MacOS bus error for a CLIP model are different failures in different environments." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Both concern v5 behavior changes, but one is output parity for Pegasus while the other is serialization of lm_head.weight; not the same underlying defect." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "A dummy Conv2d in Sam3PixelDecoder is unrelated to RT-DETR memory not being released." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Dummy layer implementation in Sam3PixelDecoder is unrelated to SIGLIP2 producing nonsensical results." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 71, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:25251", + "issue:32090", + "issue:42915", + "issue:42994", + "issue:43257", + "issue:43278", + "issue:43381", + "issue:43421", + "issue:43653", + "issue:43994", + "issue:44186", + "issue:44265", + "issue:44361", + "issue:44743", + "issue:45198", + "issue:45412", + "issue:45491" + ], + "soft_pairs": [ + "issue:44186|issue:45198", + "issue:43653|issue:44743", + "issue:43994|issue:45412", + "issue:25251|issue:32090", + "issue:44361|issue:45198", + "issue:44265|issue:45198", + "issue:43278|issue:43381", + "issue:42915|issue:42994", + "issue:43257|issue:43278", + "issue:43421|issue:45491" + ], + "prepared_review_unit_hash": "63cd821b68f1bee09e8bcdb47b1aae118ffbd62d21056d9902a65dbd3d945434", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12882, + "estimated_input_tokens": 3221, + "estimated_eval_tokens": 6698 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes unrelated tokenizer, training, quantization, export, evaluation, and memory-management bugs. None of the soft pairs look like the same underlying issue/change, so I would not merge them as duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "No strong canonical issue stands out because the cluster is not a true duplicate set; the reports cover different models, different code paths, and different failure modes.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must be kept as the most representative standalone report, issue 45491 is the most recent and technically specific, but it does not meaningfully cover the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "Both are tokenizer-related failures, but they affect different models and different behaviors (NER/padding crash vs save_pretrained/tokenization failure)." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "These describe unrelated problems: BigBirdTokenizer special-token registration vs Qwen3 recurrent-state reset in cached generation." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "Different subsystems and symptoms: SigLIP2 wrong inference outputs vs RT-DETR memory not released on deletion." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Pipeline top_k nesting bug and Trainer GPU broadcast NoneType error are unrelated code paths and failure modes." + }, + { + "left": "issue:44361", + "right": "issue:45198", + "accept": false, + "reason": "Both involve tokenizers, but the concrete bugs differ: MLukeTokenizer task AttributeError vs Wav2Vec2 save/tokenization failure." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "torch.export with torch_compilable_check is a model export problem, not the same as Wav2Vec2 tokenization/save_pretrained failure." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "Eval-mode behavior is the only overlap; one is an embedding dtype regression, the other is gradient checkpointing being unsupported in eval mode." + }, + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Both touch quantization, but one is FineGrainedFP8Config failure on Qwen3Moe and the other is quantized model saving failure." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "These are different Qwen3MOE loading/conversion and embedding dtype issues; no shared concrete bug path." + }, + { + "left": "issue:43421", + "right": "issue:45491", + "accept": false, + "reason": "TokenizersBackend runtime special-token updates and Gemma3 NaN embeddings with mixed-length sliding windows are unrelated failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 72, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:30064", + "issue:43504", + "issue:43756", + "issue:43824", + "issue:44206", + "issue:44279", + "issue:44479", + "issue:44610", + "issue:44857", + "issue:44871", + "issue:44977", + "issue:45072", + "issue:45103", + "issue:45229", + "issue:45372", + "issue:45412", + "issue:45440" + ], + "soft_pairs": [ + "issue:44871|issue:44977", + "issue:44206|issue:44479", + "issue:43756|issue:45440", + "issue:44610|issue:45412", + "issue:45103|issue:45372", + "issue:29942|issue:30064", + "issue:43504|issue:44857", + "issue:43824|issue:44279", + "issue:45072|issue:45229" + ], + "prepared_review_unit_hash": "f096b0a1e6f185d7d94715c412b72000a6efadcfe83e40407c7d8b9ad66017e0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13134, + "estimated_input_tokens": 3284, + "estimated_eval_tokens": 6824 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly a loose similarity cluster rather than true duplicates. All soft edges should be rejected: they pair different models, different failure modes, or different subsystems.", + "confidence": 0.88, + "canonical_issue_reason": "issue:44279 is the most umbrella-like of the set (\u201cDependency issue with transformers\u201d), so it is the closest thing to a cluster anchor, though it is still too generic to be a true duplicate target for the more specific bugs here.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44279 is the broadest and most reusable issue title in the group, making it the best representative if one must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "Different model families and different bugs: Gemma-3 EOS-token config mismatch vs Qwen3.5 flash-attention generation failure." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Both are regressions, but they affect different models and code paths: a feature extractor arg crash vs video-input regression in Qwen VLMs." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Both mention model-implementation divergence, but they concern different architectures and distinct defects, so they are not the same underlying bug." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor input-size mismatch and model memory-release leak are unrelated failure modes." + }, + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "Both are loading/import problems, but one is a docstring-annotation crash and the other is a missing dependency symbol during Gemma 4 processor loading." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention test failures and void segmentation-map processing are unrelated issues." + }, + { + "left": "issue:43504", + "right": "issue:44857", + "accept": false, + "reason": "A pretrained preset legacy-field loading bug is not the same as an AMP/CUDA loss crash." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "A specific missing import for Qwen2_5_VL is not the same as a vague general dependency issue." + }, + { + "left": "issue:45072", + "right": "issue:45229", + "accept": false, + "reason": "Bfloat16 dtype mismatches and multi-GPU CUDA OOM are different concrete problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 73, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:39692", + "issue:43262", + "issue:43334", + "issue:43526", + "issue:43531", + "issue:43653", + "issue:44186", + "issue:44206", + "issue:44442", + "issue:44464", + "issue:44977", + "issue:45245", + "issue:45491" + ], + "soft_pairs": [ + "issue:33357|issue:39692", + "issue:29127|issue:34567", + "issue:32090|issue:33290", + "issue:43334|issue:43531", + "issue:43653|issue:44442", + "issue:44977|issue:45245", + "issue:43526|issue:44186", + "issue:44464|issue:45491", + "issue:43262|issue:44206" + ], + "prepared_review_unit_hash": "9954cd716c46eee52c4c5dfd20d1941c64897272011e068323d43d4c6d9de9fe", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13241, + "estimated_input_tokens": 3311, + "estimated_eval_tokens": 6878 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster appears to be a loose thematic grouping of unrelated Hugging Face issues; none of the soft pairs look like the same underlying bug or change, so no duplicates should be merged.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "Both are multimodal model issues, but one is a MacOS bus error for CLIP and the other is a SigLIP2 docs/model-processor mismatch with quantization errors; different failure modes and code paths." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and TrainerState token-count tracking are unrelated areas with different symptoms and fixes." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "Trainer NoneType broadcast failure and DeepSpeed Adafactor OOM are distinct training/runtime problems; not the same bug." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Qwen3-VL config loading pad_token_id issue and Qwen3-MoE sliding_window behavior are separate model-specific bugs." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "BigBirdTokenizer special-token registration/empty decode and AutoTokenizer failing to load FastSpeech2ConformerTokenizer are different tokenizer-loading defects." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention generation bug for Qwen3.5 and a category-cardinality runtime error are unrelated." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BEiT image processor reduce_labels behavior and LayoutLMv2 NER/batching crashes involve different preprocessing components and failures." + }, + { + "left": "issue:44464", + "right": "issue:45491", + "accept": false, + "reason": "Compiled-forward chunked generation inconsistency and Gemma3 mixed-length sliding-window NaNs are both generation-related but arise from different code paths and symptoms." + }, + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sampling-rate default mismatch and LasrFeatureExtractor unsupported center arg are separate audio-processing bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 74, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:33357", + "issue:35141", + "issue:36010", + "issue:42915", + "issue:43232", + "issue:43381", + "issue:43388", + "issue:43526", + "issue:43701", + "issue:43761", + "issue:43931", + "issue:44079", + "issue:44442", + "issue:44857", + "issue:45072" + ], + "soft_pairs": [ + "issue:43381|issue:43701", + "issue:43761|issue:44079", + "issue:29942|issue:36010", + "issue:29127|issue:33357", + "issue:43232|issue:43388", + "issue:29127|issue:35141", + "issue:43526|issue:44857", + "issue:44442|issue:45072", + "issue:42915|issue:43931" + ], + "prepared_review_unit_hash": "6a02a9e666c3ac078836337cdea0cf2585a162a2f47db93c02290901581df315", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12553, + "estimated_input_tokens": 3139, + "estimated_eval_tokens": 6534 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are a bundle of unrelated closed issues that only match superficially at the text-similarity level; none of the soft pairs look like the same underlying bug or fix.", + "confidence": 0.91, + "canonical_issue_reason": "No true duplicate cluster is evident. If one issue has to serve as the anchor, issue #43761 is the most concrete, self-contained regression report with a clearly stated model behavior mismatch.", + "canonical_pr_reason": null, + "best_issue_reason": "#43761 is the strongest standalone issue report: specific symptom, clear expected/actual behavior, and a narrow code path. The others are similarly valid but not better representatives of a duplicate set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Eval-mode gradient checkpointing and resume_from_checkpoint key mismatch are different failure modes in different parts of training workflow." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "One is a CLIPVisionModel hidden-states regression; the other is ModelOutput key assignment logic. Related area, but not the same bug." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention 2 test failures and a GenerationMixin import error are unrelated subsystems and symptoms." + }, + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "Error-message clarity for LayoutLMv3 boxes vs. a MacOS bus error in CLIP are completely different issues." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwargs sync logic and gather_for_metrics label truncation are separate code paths and defects." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "LayoutLMv3 validation messaging and token-embedding reinitialization are unrelated bugs." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "BeitImageProcessorFast label reduction and LwDetrImageLoss AMP/CUDA crashing are different model/component failures." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer loading failure and bfloat16 dtype mismatches in inference are not the same underlying change or bug." + }, + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Qwen3Moe FineGrainedFP8Config failure and Qwen3-VL weight-shape mismatch are distinct model-loading/runtime issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 75, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43316", + "issue:43525", + "issue:43526", + "issue:43723", + "issue:43761", + "issue:43931", + "issue:43994", + "issue:44188", + "issue:44206", + "issue:44479", + "issue:44743", + "issue:44811", + "issue:45003", + "issue:45216", + "issue:45290", + "issue:45440", + "issue:45479" + ], + "soft_pairs": [ + "issue:43723|issue:45479", + "issue:43526|issue:43994", + "issue:45003|issue:45440", + "issue:44743|issue:44811", + "issue:44479|issue:45290", + "issue:43316|issue:43525", + "issue:43761|issue:44188", + "issue:43994|issue:44206", + "issue:43931|issue:45216" + ], + "prepared_review_unit_hash": "ba868f1870e437298f38de5ded15d6fd76bc44ec7c0f174814e5299b1d456d70", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12706, + "estimated_input_tokens": 3177, + "estimated_eval_tokens": 6610 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft-similarity pairs are all false positives, covering unrelated regressions in configs, tokenizers, vision/audio processors, model loading/saving, and attention behavior. No duplicate/canonical issue emerges.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the items do not describe the same underlying bug, and the strongest-looking pairs still differ by model/component and failure mode.", + "canonical_pr_reason": "No PRs are present in this cluster.", + "best_issue_reason": "No single issue is a good representative; the cluster spans several unrelated topics, so selecting one would be arbitrary.", + "best_pr_reason": "No PRs are present in this cluster.", + "soft_edge_verdicts": [ + { + "left": "issue:43723", + "right": "issue:45479", + "accept": false, + "reason": "Tokenizer loading in v5 vs. degenerate classification loss; unrelated components and symptoms." + }, + { + "left": "issue:43526", + "right": "issue:43994", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug vs. SigLIP2 model inference issue; different code paths." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "Unsafe sys.modules access vs. DeepSeekV3MoE implementation divergence; not the same bug." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Recurrent cache-state reset bug vs. Whisper batch_decode skip_special_tokens handling; unrelated functionality." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "Video-input regression in Qwen VL models vs. chat-template crash on tool-call messages; different failure modes." + }, + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Both are config/API issues, but for different models and missing fields; not a shared underlying defect." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "CLIPVision hidden_states regression vs. attention-kernel divergence under torch.compile; distinct code paths." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 nonsensical outputs vs. LasrFeatureExtractor center-arg crash; different subsystems and symptoms." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Model weight-shape mismatch on load vs. incorrect checkpoint save_pretrained regression; loading and saving are different problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 76, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29942", + "issue:33290", + "issue:33666", + "issue:34567", + "issue:39692", + "issue:43575", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:44077", + "issue:44387", + "issue:44442", + "issue:44743", + "issue:44898", + "issue:44964", + "issue:45440", + "issue:45491" + ], + "soft_pairs": [ + "issue:43575|issue:43701", + "issue:43646|issue:44077", + "issue:44964|issue:45440", + "issue:39692|issue:45491", + "issue:43575|issue:44387", + "issue:43673|issue:44743", + "issue:29942|issue:33666", + "issue:44442|issue:44898", + "issue:33290|issue:34567" + ], + "prepared_review_unit_hash": "a57ca131894aa5529f488f07d2d0b7fba399a89722dc101d14b1297088eb674d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12408, + "estimated_input_tokens": 3102, + "estimated_eval_tokens": 6460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated; several share broad themes like OOMs, cache/state handling, or Transformers v5 regressions, but none look like the same underlying bug/change closely enough to merge.", + "confidence": 0.93, + "canonical_issue_reason": "No pair looks like a true duplicate cluster. If one issue must serve as the representative triage anchor, issue 45440 is the most active and concretely described open bug report, with explicit inbound references and a clear model-specific divergence.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45440 is the strongest standalone issue to keep as the lead reference: it is open, has inbound references, and describes a specific reproducible correctness gap in a native model implementation.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "Both mention failure during loading/resume, but one is a tensor-parallel OOM for a large Qwen2 model and the other is a checkpoint key mismatch; different failure modes and code paths." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "Both relate to initialization changes in Transformers 5.0.0, but one is a broad custom-model init regression and the other is a specific patchtsmixer post_init policy issue; not the same bug." + }, + { + "left": "issue:44964", + "right": "issue:45440", + "accept": false, + "reason": "Different models and different problems: Phi-4 multimodal loading failure versus DeepSeekV3 implementation divergence." + }, + { + "left": "issue:39692", + "right": "issue:45491", + "accept": false, + "reason": "One is a documentation example error for SigLIP2; the other is a runtime NaN bug in Gemma3 sliding-window attention. No shared underlying change." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both are OOM reports, but one is caused by tensor-parallel loading of a specific model and the other by increased reserved memory under int4 quantization in Transformers 5.x; too different to be duplicates." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Both involve cache/state semantics, but one is a missing GenerationMixin cache in chunked prefill and the other is recurrent states resetting in modular_qwen3_5; different implementations and symptoms." + }, + { + "left": "issue:29942", + "right": "issue:33666", + "accept": false, + "reason": "Flash Attention 2 test failures and Qwen2-VL multi-GPU training are unrelated beyond both touching training/inference performance areas." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "Tokenizer loading failure for FastSpeech2ConformerTokenizer is unrelated to Perceiver interpolation failure on non-default image sizes." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "AdaFactor/DeepSpeed OOM and TrainerState token-count tracking are completely different issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 77, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33290", + "issue:34689", + "issue:35141", + "issue:39692", + "issue:41628", + "issue:43381", + "issue:43493", + "issue:43611", + "issue:43749", + "issue:43827", + "issue:43873", + "issue:43931", + "issue:44077", + "issue:44206", + "issue:44479", + "issue:44871", + "issue:45372" + ], + "soft_pairs": [ + "issue:41628|issue:45372", + "issue:34689|issue:43611", + "issue:43493|issue:43827", + "issue:43381|issue:43873", + "issue:44077|issue:44479", + "issue:39692|issue:44206", + "issue:33290|issue:35141", + "issue:33290|issue:34689", + "issue:43873|issue:44871", + "issue:43749|issue:43931" + ], + "prepared_review_unit_hash": "6ff20feea06782d44303a48c2c251a07c0ba81bbc85ebfd0546a8393ede63363", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12916, + "estimated_input_tokens": 3229, + "estimated_eval_tokens": 6714 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated Transformers issues: model-loading regressions, docs breakage, quantization/offloading, and a few model-specific bugs. None of the proposed soft pairs look like true duplicates of the same underlying change or bug.", + "confidence": 0.93, + "canonical_issue_reason": "issue:43873 is the broadest and still-open item, with inbound references and a general quantization/offloading failure description, so it is the best single representative if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43873 is the most central representative of the cluster\u2019s only broad infrastructure-style problem; the others are narrower, model- or version-specific regressions and should not be collapsed into it.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "Both are import errors, but they concern different symbols, different upstream causes, and different model flows; not the same bug." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Both are model-loading regressions, but one is Llama 3.2 Vision-specific and the other is a base_model_prefix loading change in v5; too different to merge." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "SigLIP2 implementation mismatch and docs still using pipeline() after v5 removals are unrelated problem types." + }, + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Gradient checkpointing in eval mode and quantization/offloading failures affect different code paths and fixes." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "A patchtsmixer post_init restriction is unrelated to the video-input regression in Qwen multimodal models." + }, + { + "left": "issue:39692", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 docs/example errors are not the same bug as the LasrFeatureExtractor center-argument regression." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Adafactor OOM in DeepSpeed and token embedding reinitialization in post_init are different mechanisms and code paths." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "These share only a broad theme of model loading/runtime failure; the concrete bugs and affected features are unrelated." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading behavior and Gemma eos_token_id inconsistency are separate configuration issues." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "FSDP CPU RAM efficient loading and Qwen3-VL shape mismatch are different loading failures with different root causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 78, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33290", + "issue:36331", + "issue:42175", + "issue:43316", + "issue:43381", + "issue:43540", + "issue:43643", + "issue:43856", + "issue:44162", + "issue:44190", + "issue:44496", + "issue:44514", + "issue:44792", + "issue:44855", + "issue:44877", + "issue:44913", + "issue:45137" + ], + "soft_pairs": [ + "issue:44190|issue:44496", + "issue:43643|issue:44913", + "issue:43540|issue:44514", + "issue:43316|issue:44877", + "issue:42175|issue:43316", + "issue:44162|issue:44792", + "issue:43381|issue:43856", + "issue:44855|issue:45137", + "issue:33290|issue:36331" + ], + "prepared_review_unit_hash": "8d454e6be231daa2dfc949a1f9402eb4d9f3b91153afa8c7aef214af00e4f27a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12510, + "estimated_input_tokens": 3128, + "estimated_eval_tokens": 6512 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are largely unrelated single issues across different subsystems; none of the soft pairs look like true duplicates. The clearest standalone bug report is the `compute_loss()` keyword regression.", + "confidence": 0.18, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue 36331 is the most concrete and actionable: it has a precise traceback and a clear behavioral regression to fix.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Different bugs: one is a local dataset loading failure in an example script, the other is an unrecognized model/config loading error." + }, + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Both involve config loading, but the failures differ: missing fields from `trust_remote_code=True` vs. a `rotary_pct` value not persisting on reload." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Both are Qwen multimodal issues, but they hit different code paths and failure modes: video-input processing vs. batched chat template padding." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Both are config/API-related, but they concern different models and different problems (`Gemma3TextConfig` API mismatch vs. strict config blocking `granite_speech`)." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Unrelated: packaging/backend dependency omission versus a config API discrepancy." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "Different model/test failures with no shared concrete code-path or bug." + }, + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "One is about gradient checkpointing in eval mode; the other is MoE training memory inefficiency. Related themes, but not the same bug." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Different failures: Python 3.13 parsing/`@torch.jit.script` indentation issue versus DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:33290", + "right": "issue:36331", + "accept": false, + "reason": "Completely different problems: DeepSpeed/Adafactor OOM versus a `compute_loss()` signature mismatch." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 79, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:34689", + "issue:36010", + "issue:41950", + "issue:43065", + "issue:43316", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43688", + "issue:43716", + "issue:44617", + "issue:45127", + "issue:45200" + ], + "soft_pairs": [ + "issue:43388|issue:43688", + "issue:34689|issue:36010", + "issue:43316|issue:43716", + "issue:43404|issue:43441", + "issue:43065|issue:44617", + "issue:41950|issue:43475", + "issue:45127|issue:45200", + "issue:16998|issue:22355", + "issue:25251|issue:28282" + ], + "prepared_review_unit_hash": "aa35ac623b81766c52209284a77d0d9034b44e2a312610d40b3e1ffe42f3b597", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13052, + "estimated_input_tokens": 3263, + "estimated_eval_tokens": 6782 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items cover unrelated transformers bugs, import errors, model-specific regressions, and pipeline quirks. None of the soft pairs look like the same underlying issue, so there is no good canonical item for the cluster.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Both mention metrics/loss, but one is a dataloader/metric-gathering bug with tuple labels and the other is an auxiliary-loss normalization issue in different models. Different code paths and fixes." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "Both are import/load failures, but one is a Transformers 4.46.2 model-loading regression for Llama 3.2 Vision and the other is a missing `transformers.generation.GenerationMixin` import. Not the same bug." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "Different model families and failures: Gemma3TextConfig API inconsistency versus Mistral-3 image-preprocessor dtype mismatch. No shared underlying code-path problem." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "Both concern Mistral-family models, but one is about tied `lm_head` weights in `Mistral3ForConditionalGeneration` and the other is a FlashAttention failure in Ministral-3. Distinct issues." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "Both are about Sam3Video, but one reports a dummy `nn.Conv2d` in `Sam3PixelDecoder` while the other is CUDA OOM. Different root causes." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "Both touch video/SAM3 code, but one is a pipeline selecting image processors and the other is a missing `fpn_position_embeddings` attribute on an output object. Separate bugs." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "Both involve token/embedding-related behavior, but one is LoRA merging with extended vocabulary causing collapse and the other is `mm_token_type_ids` defaulting for Gemma 4 text-only fine-tuning. Not the same change." + }, + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "Completely unrelated: a `model_max_length` question for DeBERTa-V3 versus a missing `transformers.onnx` module import." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Different subsystems and symptoms: pipeline `top_k` output nesting versus `AutoModel` failing because PyTorch is absent. No shared underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 80, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43295", + "issue:43452", + "issue:43550", + "issue:43606", + "issue:43646", + "issue:43701", + "issue:43761", + "issue:43825", + "issue:44077", + "issue:44242", + "issue:44479", + "issue:44496", + "issue:44534", + "issue:44734", + "issue:44871", + "issue:44913", + "issue:45406" + ], + "soft_pairs": [ + "issue:43761|issue:44479", + "issue:43295|issue:43825", + "issue:44734|issue:45406", + "issue:43295|issue:44871", + "issue:44077|issue:44534", + "issue:43550|issue:43701", + "issue:43452|issue:44496", + "issue:43606|issue:43701", + "issue:43646|issue:44913", + "issue:43761|issue:44242" + ], + "prepared_review_unit_hash": "d11b8e71f3b074bfa5d6c3da796298ff2480343b7b7734606707897a77536596", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12957, + "estimated_input_tokens": 3240, + "estimated_eval_tokens": 6736 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not a duplicate cluster; they span unrelated regressions in processor/tokenizer handling, model loading, serving, config reload, training, and video/model-specific failures. None of the soft edges look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue fits: the reports describe distinct failures in different code paths and subsystems.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick a representative, issue 43646 is the broadest user-facing regression, but it still does not unify the rest of the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "Both are regressions, but one is CLIPVision hidden_states output and the other is video-input handling for Qwen VL models; different models and failure modes." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "Processor/tokenizer access regression vs pipeline task-support error message; unrelated symptoms and code paths." + }, + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "Both involve transformers serve, but one is KV-cache continuation indexing and the other is Gemma4Processor missing _tokenizer; different bugs." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "processor.tokenizer breakage vs Gemma-3 eos_token_id mismatch; separate model/config issues." + }, + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "Optional post_init validation for patchtsmixer is not the same as non-persistent buffer initialization junk in v5." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "torch.compile + SDPA failure for Bamba is unrelated to resume_from_checkpoint key mismatch." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "Both are loading failures, but one is gguf_file support breaking from_pretrained paths and the other is an unrecognized model config lacking model_type; different root causes." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "CPU offload device mismatch for Bark is unrelated to checkpoint resume key mapping." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom model initialization breakage in v5 is not the same as GPTNeoXConfig rotary_pct not persisting on reload." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "Hidden_states regression in CLIPVisionModel is unrelated to load-balancing loss omission when output_router_logits=False." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 81, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42617", + "issue:43065", + "issue:43525", + "issue:43531", + "issue:43575", + "issue:43742", + "issue:43873", + "issue:43906", + "issue:43931", + "issue:44060", + "issue:44479", + "issue:44521", + "issue:44610", + "issue:44734", + "issue:44792", + "issue:44977", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:43906|issue:45290", + "issue:43931|issue:44060", + "issue:42617|issue:43575", + "issue:43575|issue:43742", + "issue:44521|issue:45381", + "issue:43525|issue:43531", + "issue:44479|issue:44734", + "issue:44792|issue:44977", + "issue:43065|issue:44610", + "issue:43575|issue:43873" + ], + "prepared_review_unit_hash": "bb5280220eadea6a0f7118bb43d958b1e926fad19f54fb0202f0d309c682bf5d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13274, + "estimated_input_tokens": 3319, + "estimated_eval_tokens": 6894 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated issue reports spanning different models, errors, and subsystems. None of the proposed soft pairs look like true duplicates or the same fix target, so all soft-edge merges should be rejected.", + "confidence": 0.93, + "canonical_issue_reason": "issue:44521 is the strongest representative by activity and breadth: it is open, has the most inbound references/discussion, and describes a concrete multimodal/template bug with clear reproduction and impact.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 is the best issue candidate because it is active, well-scoped, and central enough to represent the cluster better than the many one-off model-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "Both involve chat/template behavior, but one is a reproduction of an older issue and the other is a tokenizer crash on assistant tool-call messages. Different failure modes and likely different code paths." + }, + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Different models and different bugs: weight-shape mismatch in Qwen3-VL vs a tied-weights warning in Qwen3-Next. Not the same underlying defect." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "A 3d_parallel.py runtime problem is unrelated to Qwen2-57B tensor-parallel OOM. Same broad runtime area, but not the same bug." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "Both are model-loading issues, but one is OOM during TP loading of Qwen2-57B and the other is a key error for MobileLLM-125M. Different symptoms, models, and fixes." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "Both concern multimodal/video preprocessing, but one is all-zero assistant masks from apply_chat_template and the other is wrong vision_position_ids. Similar area, not the same concrete bug." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Different models and unrelated problems: missing pad_token_id on Llama4Config versus a sliding_window issue on Qwen3-MoE." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Video-input regression in several Qwen VL variants versus a serve-side KV-cache continuation crash from wrong tensor indexing. Distinct code paths and failure modes." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "Janus image-generation test failure is not the same issue as Qwen3.5 flash-attention generation breakage. Different models and symptoms." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Both touch vision models, but one is a dummy Conv2d in Sam3PixelDecoder and the other is an OmDet-Turbo processor/model input-size mismatch. Not the same defect." + }, + { + "left": "issue:43575", + "right": "issue:43873", + "accept": false, + "reason": "OOM from tensor-parallel model loading is not the same as offloading failing under quantization. Related resource-management theme, but different underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 82, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:43388", + "issue:43582", + "issue:43650", + "issue:43827", + "issue:43994", + "issue:44190", + "issue:44220", + "issue:44464", + "issue:44534", + "issue:44871", + "issue:44913", + "issue:44933", + "issue:44964", + "issue:45245", + "issue:45362" + ], + "soft_pairs": [ + "issue:44933|issue:45362", + "issue:44464|issue:44871", + "issue:32090|issue:35141", + "issue:41720|issue:45245", + "issue:43827|issue:43994", + "issue:44534|issue:44913", + "issue:43388|issue:43582", + "issue:43650|issue:44220", + "issue:44190|issue:44964" + ], + "prepared_review_unit_hash": "50ff33f9c7c06ce96e196fa703ef6252fa0cb070e0bc5e311ac9de7ca6c0b645", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13089, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues cover unrelated areas (trainer/distributed, embedding resize, device mapping, docs, model loading, configs, generation, etc.). None of the soft pairs appear to be the same underlying bug or change, so all candidate duplicate edges should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Different problems: an import/API issue in image_utils vs a Qwen3.5 chat crash." + }, + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Different failure modes: compiled-forward chunked generation mismatch vs Gemma-3 eos_token_id config inconsistency." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer broadcast TypeError from NoneType is unrelated to embedding reinitialization after resize." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "Qwen3 auto device-mapping cuda assert is unrelated to the 2^24 category-count limit error." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "One is a docs update for removed pipeline() examples; the other is a model/pipeline runtime correctness bug." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer junk on v5 is not the same as GPTNeoXConfig rotary_pct not persisting on reload." + }, + { + "left": "issue:43388", + "right": "issue:43582", + "accept": false, + "reason": "Metric-gather label truncation and Apple Silicon allocator warmup TypeError are unrelated." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "The placeholder 'ADD THE DATA' issue is unrelated to the fbank feature extraction bug." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Both are loading-related, but they concern different scripts/models and different root causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 83, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43335", + "issue:43540", + "issue:43550", + "issue:43644", + "issue:43761", + "issue:43976", + "issue:44077", + "issue:44190", + "issue:44355", + "issue:44496", + "issue:44734", + "issue:44743", + "issue:44855", + "issue:44877", + "issue:44913", + "issue:45127", + "issue:45357", + "issue:45372" + ], + "soft_pairs": [ + "issue:43761|issue:44743", + "issue:45127|issue:45357", + "issue:44355|issue:44855", + "issue:43540|issue:44734", + "issue:44496|issue:45372", + "issue:43976|issue:44855", + "issue:43644|issue:44913", + "issue:43335|issue:44855", + "issue:44077|issue:44877", + "issue:43550|issue:44190" + ], + "prepared_review_unit_hash": "ea394f716f1284436f1ba8c5811653863838379df5916da9f907ce7ae7258dbf", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13623, + "estimated_input_tokens": 3406, + "estimated_eval_tokens": 7068 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These reports cover unrelated bugs across different models, configs, loaders, and runtime paths. None of the soft pairs look like the same underlying defect, so they should remain separate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must be highlighted, issue 45372 is the broadest runtime regression with active references, but it is not a duplicate anchor for the rest of the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "Different model families and failure modes: CLIPVisionModel hidden_states regression vs Qwen3 recurrent state reset in modular code." + }, + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "Both involve model save/load behavior, but one is LoRA merging with tied embeddings and the other is incorrect visual encoder keys for Qwen3.5; not the same bug." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "Unrelated import/runtime errors: compiled Python files vs a Python 3.13 TorchScript decorator parsing problem in DebertaV2Model." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Different subsystems and symptoms: Qwen3OmniMoe video input ValueError vs transformers serve KV-cache continuation indexing crash." + }, + { + "left": "issue:44496", + "right": "issue:45372", + "accept": false, + "reason": "Separate loading failures: unrecognized model/config.json handling vs a mistral_common ReasoningEffort import break in Gemma 4 processor loading." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "Version/platform incompatibility in Transformers 5.1.0 vs a specific Python 3.13 TorchScript parsing regression; distinct causes." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Different configuration persistence bugs: non-persistent buffer initialization junk vs GPTNeoX rotary_pct not preserved on reload." + }, + { + "left": "issue:43335", + "right": "issue:44855", + "accept": false, + "reason": "SwitchTransformers sparse layer creation on single-layer models is unrelated to the DebertaV2 Python 3.13 import/parsing issue." + }, + { + "left": "issue:44077", + "right": "issue:44877", + "accept": false, + "reason": "PatchTSMixer post_init acceptance and granite_speech strict config loading are separate config-validation issues with different targets." + }, + { + "left": "issue:43550", + "right": "issue:44190", + "accept": false, + "reason": "Bamba-9B-v2 torch.compile/SDPA failure is unrelated to local dataset loading in the image classification no-trainer script." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 84, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41628", + "issue:41950", + "issue:43065", + "issue:43116", + "issue:43295", + "issue:43404", + "issue:44016", + "issue:44186", + "issue:44190", + "issue:44734", + "issue:44871", + "issue:45216", + "issue:45245", + "issue:45276" + ], + "soft_pairs": [ + "issue:43116|issue:44190", + "issue:43295|issue:45276", + "issue:41950|issue:44016", + "issue:36246|issue:41628", + "issue:43404|issue:45216", + "issue:36246|issue:38617", + "issue:39692|issue:44190", + "issue:44186|issue:44734", + "issue:44871|issue:45245", + "issue:38175|issue:43065" + ], + "prepared_review_unit_hash": "62aa38d2d7efa0020b46243abc095e253fd2fa9c61b2004e2810663418bbc1c1", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13648, + "estimated_input_tokens": 3412, + "estimated_eval_tokens": 7080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous and does not form a true duplicate set. The soft pairs are only superficial theme matches (mostly around import errors or example failures), so none should be merged as duplicates and no single canonical issue stands out.", + "confidence": 0.93, + "canonical_issue_reason": "No clear canonical issue: the items span unrelated bugs in imports, docs/examples, tokenizer behavior, model save/load, and runtime errors.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong cluster-wide representative because there is no single underlying bug or change tying these items together.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "Different example-script failures: one is multi-label classification output handling, the other is local dataset loading for image classification." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Different concrete bugs in different model paths: processor/tokenizer regression vs. resize_token_embeddings not updating Gemma4 embeddings." + }, + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Unrelated failures: a video-classification pipeline bug vs. a notebook syntax error." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different symbols and modules; they do not look like the same underlying code-path bug." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Different model-specific issues: Mistral3 weight tying vs. Qwen3.5 save_pretrained checkpoint corruption." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Distinct import failures for different names in different modules, not one shared missing export bug." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "Different problems in different examples: SigLIP2 docs/model mismatch and quantization failure vs. local dataset loading." + }, + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Different subsystems and symptoms: tokenizer padding/NER crashes vs. server KV-cache indexing during responses." + }, + { + "left": "issue:44871", + "right": "issue:45245", + "accept": false, + "reason": "Completely different bugs: Gemma-3 eos token configuration vs. a category cardinality runtime limit." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "No shared underlying bug: SigLIP2 zero probabilities vs. a dummy Conv2d in Sam3PixelDecoder." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 85, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42175", + "issue:43295", + "issue:43479", + "issue:43854", + "issue:44016", + "issue:44112", + "issue:44220", + "issue:44246", + "issue:44279", + "issue:44466", + "issue:44683", + "issue:44792", + "issue:44913", + "issue:45003", + "issue:45335", + "issue:45375", + "issue:45446" + ], + "soft_pairs": [ + "issue:44016|issue:44220", + "issue:43479|issue:45375", + "issue:44246|issue:45003", + "issue:42175|issue:44279", + "issue:44683|issue:45446", + "issue:44466|issue:44913", + "issue:44112|issue:44792", + "issue:43854|issue:44792", + "issue:43295|issue:45335" + ], + "prepared_review_unit_hash": "ac0f8879a40388954a0f36df5b9bc35030042d950f5d133a3fb6cca404189c19", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12456, + "estimated_input_tokens": 3114, + "estimated_eval_tokens": 6484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; the items span unrelated bugs across configs, CI, notebooks, dependency issues, multimodal models, and attention/version checks. No single canonical issue or PR emerges.", + "confidence": 0.96, + "canonical_issue_reason": "No canonical issue: the issues describe distinct failures in different subsystems and code paths, with no strong evidence they are the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No best issue: none is a good representative of the whole set because there is no coherent duplicate cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "Notebook syntax error and `_torch_extract_fbank_features()` are unrelated failure modes in different areas; not the same bug." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config handling, but they concern different model families and different fields/default-init behavior; too different to merge." + }, + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "`import transformers` slowness is a performance/import-path issue, while `sys.modules[]` access is a correctness/safety issue; not the same underlying problem." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "Both mention dependencies, but one is specifically missing TensorFlow in an extra install and the other is a generic dependency issue; no same code-path bug." + }, + { + "left": "issue:44683", + "right": "issue:45446", + "accept": false, + "reason": "Both touch flex_attention/version checks, but one is a torch>=2.9 compile failure and the other is an AuxRequest import guard problem; different fixes." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Inconsistent `lm_head.weight` serialization and `rotary_pct` reload behavior are separate configuration/serialization bugs in different model code." + }, + { + "left": "issue:44112", + "right": "issue:44792", + "accept": false, + "reason": "These are different CI test failures in different models; no shared concrete bug or mergeable fix path." + }, + { + "left": "issue:43854", + "right": "issue:44792", + "accept": false, + "reason": "GLM-4.7-Flash loading in tests and Janus image generation test failure are distinct model/test issues." + }, + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Regression in `processor.tokenizer`/image passing and T5Gemma embedding resize behavior are different multimodal/modeling bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 86, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42907", + "issue:43299", + "issue:43441", + "issue:43643", + "issue:43673", + "issue:43824", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44336", + "issue:44368", + "issue:44496", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45216" + ], + "soft_pairs": [ + "issue:42907|issue:43441", + "issue:43299|issue:45216", + "issue:44016|issue:44062", + "issue:43643|issue:44496", + "issue:44112|issue:44964", + "issue:43673|issue:44336", + "issue:43976|issue:44987", + "issue:44075|issue:44368", + "issue:43824|issue:44933" + ], + "prepared_review_unit_hash": "1f18f01515425d291642756af22e7de7f31cd92a5e16711e0e4d15825a0d0f6d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13174, + "estimated_input_tokens": 3294, + "estimated_eval_tokens": 6844 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All candidate pairs look like separate regressions or unrelated bugs; none appear to be the same underlying issue/change. The closest pairs are only superficially similar by model family or import/config context, but the code paths differ.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43824 is the strongest issue candidate overall: it is concrete, well-scoped, and has the most obvious regression signal and community traction among the set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the best representative issue in this set because it states a specific failure mode and is easier to triage than the broader or more ambiguous reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Both mention Ministral/Ministral-3, but one is about saving dequantized models and the other is a FlashAttention load failure. Different failure paths and likely different fixes." + }, + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Qwen3VL MoE loading and Qwen3.5 save_pretrained regression are different model families and opposite operations (load vs save). Not the same bug." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "A notebook syntax error and an AddedToken argument TypeError are unrelated issues with no shared code path." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "Both involve config/model loading, but one is trust_remote_code returning incomplete objects and the other is an unrecognized model lacking model_type. Related area, but not the same concrete bug." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "A stale GraniteSpeech CI test failure is unrelated to a Phi-4-multimodal model loading failure." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Chunked_prefill GenerationMixin cache regression and ANSI color codes in loading_report are unrelated subsystems and symptoms." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "A Python version compatibility regression and a specific model-loading failure in physical-intelligence/fast are not the same underlying issue." + }, + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "SGD optimizer args being ignored and a tie_word_embeddings warning during Qwen3.5 LoRA fine-tuning are unrelated." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Both are import errors, but they reference different missing symbols in different modules; too generic to treat as the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 87, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43299", + "issue:43526", + "issue:43577", + "issue:43854", + "issue:43901", + "issue:43976", + "issue:44242", + "issue:44279", + "issue:44351", + "issue:44496", + "issue:44521", + "issue:44964", + "issue:45042", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45325", + "issue:45357" + ], + "soft_pairs": [ + "issue:44496|issue:45042", + "issue:43976|issue:44279", + "issue:43299|issue:45357", + "issue:43526|issue:43901", + "issue:44242|issue:45200", + "issue:44521|issue:45325", + "issue:43577|issue:45216", + "issue:43901|issue:44496", + "issue:43854|issue:44964", + "issue:44351|issue:45278" + ], + "prepared_review_unit_hash": "57349bcb717d7a461a203672805cca492d6577ed2b41dfc521e0e14b020293e9", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13475, + "estimated_input_tokens": 3369, + "estimated_eval_tokens": 6994 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These look like a collection of distinct Transformers regression reports rather than one duplicate cluster. The soft-linked pairs share only broad topical similarity (model loading, import errors, multimodal behavior, or version regressions), not the same concrete bug or change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 is the most detailed and actively referenced report, so it is the best representative issue in this set, but it is still a distinct multimodal chat-template bug rather than a true canonical match for the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "Different problems: unrecognized model loading for Olmo-Hybrid vs PIL image processors incorrectly requiring torchvision." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "One is a Python-version compatibility regression; the other is a vague dependency issue with no clear shared code path." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Different Qwen regressions: loading Qwen3VL MoE models vs incorrect save_pretrained visual encoder keys for Qwen3.5." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "BEiT image processor label reduction bug is unrelated to TextClassificationPipeline documentation/behavior mismatch." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "Load-balancing loss omission in MoE routing is unrelated to Gemma 4 mm_token_type_ids defaulting for fine-tuning." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "apply_chat_template assistant mask failure is unrelated to Qwen2.5-VL rope-index scaling for still-image positions." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "BLIP2 dtype loading issue is unrelated to the Qwen3.5 save_pretrained checkpoint regression." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Docs/behavior mismatch for return_all_scores is not the same bug as failing to load an unrecognized model." + }, + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Both are model-loading failures, but they involve different model families and likely different root causes." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "Missing HybridCache import is a specific symbol regression; the other is a broad import-error report and not the same concrete issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 88, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38175", + "issue:41950", + "issue:43479", + "issue:43525", + "issue:43650", + "issue:43976", + "issue:44279", + "issue:44297", + "issue:44462", + "issue:44464", + "issue:44496", + "issue:44617", + "issue:44779", + "issue:44877", + "issue:45020", + "issue:45081", + "issue:45245", + "issue:45405" + ], + "soft_pairs": [ + "issue:44464|issue:45245", + "issue:44462|issue:45081", + "issue:41950|issue:45020", + "issue:41950|issue:43650", + "issue:43976|issue:45405", + "issue:44297|issue:44462", + "issue:44279|issue:45405", + "issue:43479|issue:43525", + "issue:44496|issue:44877", + "issue:44462|issue:44779", + "issue:38175|issue:44617" + ], + "prepared_review_unit_hash": "924cc7077fe6decc1f88e15eebd1b6c553a4b14223e2bcdef62c05a9649b2354", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13621, + "estimated_input_tokens": 3406, + "estimated_eval_tokens": 7068 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the soft-similarity pairs share broad themes (tokenizers, configs, regressions) but not the same concrete bug or code path. I do not see any true duplicate pairs. If forced to pick a representative issue, #45020 is the broadest and most substantive report, but it is not a real canonical duplicate anchor.", + "confidence": 0.91, + "canonical_issue_reason": "No single issue cleanly represents a duplicate set here; the items span unrelated bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "#45020 is the most broadly scoped and actionable issue in the group, though the cluster is too mixed for a true canonical duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44464", + "right": "issue:45245", + "accept": false, + "reason": "Different bugs: chunked generation/compiled forward versus a categories-cardinality runtime error." + }, + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "Both involve tokenizer loading, but one is about repository tokenizer.json selection and the other is a Mistral regex patch crash." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "Different subsystems and failures: video-classification processor lookup versus remote_code model loading regressions." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "No shared bug path; pipeline processor selection is unrelated to the vague data issue." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "A Python version compatibility problem is unrelated to a PEFT minimum-version bump." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Both are tokenizer-related, but one is save_pretrained metadata mismatch and the other is AutoTokenizer ignoring tokenizer.json." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "General dependency trouble does not match the specific unreleased MIN_PEFT_VERSION issue." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Different config classes and failures: default multimodal config initialization versus missing pad_token_id on Llama4Config." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Both are config-loading problems, but they stem from different validation paths and model families." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenizers, but the symptoms and likely code paths differ: repo tokenizer selection versus Deepseek v5 behavior regression." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unrelated model inference issues: zero probabilities on SigLIP2 versus CUDA OOM in Sam3Video." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 89, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:39692", + "issue:42994", + "issue:43295", + "issue:43299", + "issue:43317", + "issue:43441", + "issue:43450", + "issue:43550", + "issue:43723", + "issue:43854", + "issue:43901", + "issue:44016", + "issue:44162", + "issue:44279", + "issue:44291", + "issue:44683", + "issue:44779", + "issue:44964" + ], + "soft_pairs": [ + "issue:43550|issue:44964", + "issue:43299|issue:43854", + "issue:43295|issue:44779", + "issue:39692|issue:43901", + "issue:43441|issue:44683", + "issue:43450|issue:44279", + "issue:44016|issue:44291", + "issue:42994|issue:43317", + "issue:43723|issue:44162" + ], + "prepared_review_unit_hash": "f0616d117b8a08c5b08c9dd1525b99108154a429e02d0e6a5e03a6ebf01e15ba", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13028, + "estimated_input_tokens": 3257, + "estimated_eval_tokens": 6770 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates; they span separate regressions and unrelated subsystems (model loading, tokenizer behavior, attention kernels, quantization, docs, and notebook syntax). The cluster should be split rather than merged.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue: the set covers distinct bugs across different models and code paths, so none can represent the whole cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for deduplication; each item is a separate problem with no clear superseding report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:44964", + "accept": false, + "reason": "Both are model-loading failures, but they involve different models and different failure modes (torch.compile+SDPA vs latest-transformers loadability)." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "These are separate model-loading bugs for different architectures and likely different code paths; similarity is only at the broad 'load fails' level." + }, + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "One is a processor/tokenizer API regression, the other is incorrect tokenizer output for Deepseek; related area, but not the same bug." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "Both mention docs, but one is a SigLIP2 example with model/processor and quantization errors, while the other is a pipeline docs mismatch about return_all_scores." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "Different attention implementations and different failure conditions; not the same concrete code-path problem." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Video processor batching shape bug is unrelated to a generic dependency issue." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "A notebook syntax error and an init_empty_weights TypeError are unrelated defects." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Quantized model saving failure and dequantized model loading with device_map=auto are different quantization lifecycle problems." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "Tokenizer loading regression and ESM2 model breakage are different issues; no shared concrete bug is evident." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 90, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43650", + "issue:43673", + "issue:43761", + "issue:44261", + "issue:44263", + "issue:44466", + "issue:44521", + "issue:44623", + "issue:44671", + "issue:44749", + "issue:44779", + "issue:44898", + "issue:45276", + "issue:45290", + "issue:45397" + ], + "soft_pairs": [ + "issue:43650|issue:45290", + "issue:43065|issue:44898", + "issue:44623|issue:45397", + "issue:44261|issue:44263", + "issue:44466|issue:45276", + "issue:43262|issue:44521", + "issue:44623|issue:44749", + "issue:43122|issue:43650", + "issue:44671|issue:44779", + "issue:43673|issue:43761" + ], + "prepared_review_unit_hash": "815fdafaa49d424efe92dec924cffafd4552143132586b4f13d93a010ceb30f1", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13452, + "estimated_input_tokens": 3363, + "estimated_eval_tokens": 6982 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No convincing duplicate pairs here: the soft links mostly share broad keywords (e.g. apply_chat_template, v5 regressions, serialization) but point to different bugs and code paths. I\u2019d anchor the issue side on the most concrete, active bug report.", + "confidence": 0.86, + "canonical_issue_reason": "Issue 44521 is the strongest representative: it is a specific, reproducible bug in apply_chat_template for multimodal inputs, is still open, and has the most cluster activity/inbound references among the candidates.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44521 looks best suited as the canonical issue because it is the most concrete and well-scoped report with active discussion and references; the other issues are either closed, narrower, or clearly unrelated.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "Completely different problems: a vague data request vs a crash in apply_chat_template with tool-call assistant messages. No shared code path." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "Different models and failures: dummy Conv2d in Sam3PixelDecoder vs Perceiver image classification at non-default resolution." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "processor.save_pretrained missing files is unrelated to gemma-4 Zero3 from_pretrained behavior." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "MLA q_a_layernorm epsilon handling and GlmMoeDsaIndexer torch.split return values are unrelated bugs." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "One is lm_head.weight serialization/tied weights behavior; the other is resize_token_embeddings not updating Gemma4 embeddings. Different fixes." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Both involve apply_chat_template, but one is about audio sampling-rate defaults and the other about assistant_masks for multimodal inputs. Not the same bug." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "save_pretrained file output vs tokenizer/filtering performance regression are unrelated." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Different tokenizer regression vs a vague request to add data; no shared underlying issue." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "CamemBERT masked-LM prediction regression and Deepseek tokenizer incorrect results are separate model/tokenizer bugs." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "GenerationMixin chunked_prefill cache handling and CLIPVisionModel hidden_states output are unrelated code paths and behaviors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 91, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42175", + "issue:43295", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44246", + "issue:44297", + "issue:44589", + "issue:44617", + "issue:45250", + "issue:45362", + "issue:45397" + ], + "soft_pairs": [ + "issue:44297|issue:45397", + "issue:43352|issue:45250", + "issue:42175|issue:43976", + "issue:43901|issue:43994", + "issue:43295|issue:44617", + "issue:43475|issue:44016", + "issue:43824|issue:43976", + "issue:44246|issue:44617", + "issue:43525|issue:43881", + "issue:44589|issue:45362", + "issue:43901|issue:44246" + ], + "prepared_review_unit_hash": "928221e4dec0d3644de1140fc7ce213208583deed3fc79e7f4d3a86e80627bdc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13481, + "estimated_input_tokens": 3371, + "estimated_eval_tokens": 6998 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated individual bugs/regressions that only share broad Transformer/model-loading vocabulary. None of the soft-edge pairs looks like the same concrete underlying issue, so all should be rejected. If a representative issue is needed, issue 44246 is the broadest/highest-engagement one, but this is not a strong duplicate cluster.", + "confidence": 0.92, + "canonical_issue_reason": "Issue 44246 is the most general and highest-engagement item in the set, so it is the closest thing to a cluster anchor, though the cluster itself is heterogeneous.", + "canonical_pr_reason": null, + "best_issue_reason": "44246 is the best single representative by breadth and discussion activity, but it does not genuinely subsume the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer save_pretrained metadata mismatch and Gemma-4 zero3 from_pretrained are unrelated failure modes." + }, + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "Both mention Flash Attention 2.0, but one is a specific model-not-supported error and the other is a generic FA2 issue; not the same bug." + }, + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "Backend dependency installation for TensorFlow and Python version compatibility are different problems." + }, + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs mismatch for return_all_scores and incorrect SigLIP2 inference behavior are distinct pipeline issues." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "Processor/tokenizer regression and CUDA out-of-memory in Sam3Video are unrelated." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "Missing SAM3 encoder attribute and notebook syntax error are clearly different issues." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "Missing Qwen2.5-VL import and Python version incompatibility do not share the same underlying fix." + }, + { + "left": "issue:44246", + "right": "issue:44617", + "accept": false, + "reason": "Slow import performance and video model OOM are not the same code-path problem." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Llama4Config missing pad_token_id and glm-4v-9b loading failure are separate model/config errors." + }, + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Float8 storage deserialization error and Qwen3.5 chat crash are unrelated runtime failures." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "Documentation mismatch for pipeline output and slow package import are different concerns." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 92, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:41950", + "issue:42491", + "issue:43299", + "issue:43976", + "issue:44188", + "issue:44220", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44351", + "issue:44373", + "issue:44462", + "issue:44496", + "issue:45356", + "issue:45397" + ], + "soft_pairs": [ + "issue:41950|issue:44220", + "issue:41628|issue:43976", + "issue:44188|issue:44336", + "issue:44295|issue:45397", + "issue:44462|issue:45356", + "issue:42491|issue:43299", + "issue:44336|issue:44373", + "issue:44246|issue:44496", + "issue:36246|issue:44351" + ], + "prepared_review_unit_hash": "7eda0fbcdb3cf428ddf84c786185af19a06ab171e1ab7bd86e9258d0a5111c36", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12539, + "estimated_input_tokens": 3135, + "estimated_eval_tokens": 6526 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs mostly share only broad themes (imports, tokenizers, model loading, docs) but not the same concrete bug. I would not merge any of the suggested pairs as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits the cluster; the items cover unrelated failures across import/export, tokenizer behavior, model loading, attention kernels, and documentation.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good cluster-wide representative because there is no clear duplicate set here.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Different failures: video-classification pipeline image-processor lookup vs an error in _torch_extract_fbank_features(). Same general library area, but not the same bug." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "ImportError for AutoImageProcessor is unrelated to the Python 3.9/3.10 compatibility break." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "Attention-kernel divergence under torch.compile is unrelated to ANSI codes emitted by loading_report." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "Position_ids buffer handling and gemma-4 zero3 from_pretrained are different model-loading problems." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "Both involve tokenizers, but one is about ignoring tokenizer.json and the other about a specific Kimi-K2.5 codec/regression; not the same underlying change." + }, + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen/MoE loading, but one is a LoRA compatibility issue across branches and the other is a Qwen3VL MoE loading regression; too different to treat as one bug." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "Loading_report ANSI output and a wrong docstring for position_ids are unrelated." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Slow import timing is unrelated to the Olmo-Hybrid model config/model_type ValueError." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different exported symbols and likely different causes; not the same concrete failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 93, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:43526", + "issue:43531", + "issue:43596", + "issue:43673", + "issue:43994", + "issue:44162", + "issue:44242", + "issue:44295", + "issue:44336", + "issue:44485", + "issue:44561", + "issue:44749", + "issue:44857", + "issue:45003", + "issue:45200", + "issue:45276", + "issue:45362" + ], + "soft_pairs": [ + "issue:43531|issue:45362", + "issue:43526|issue:45003", + "issue:43994|issue:45276", + "issue:43673|issue:44561", + "issue:44242|issue:44857", + "issue:44336|issue:44485", + "issue:36246|issue:43596", + "issue:44162|issue:45200", + "issue:44295|issue:44749" + ], + "prepared_review_unit_hash": "252a0dd444b87ca5868ca436884bb6de676ac84ca413e79c586e5423d41f78ad", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13195, + "estimated_input_tokens": 3299, + "estimated_eval_tokens": 6854 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster does not look like a true duplicate set: the issues span unrelated models, subsystems, and failure modes. None of the soft pairs appear to share the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "issue:45003 is the best single representative if one must be chosen: it is open, broadly applicable, and describes a concrete framework-level bug in modeling_utils rather than a narrow model-specific report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45003 is the strongest issue to anchor the cluster because it is actionable and systemic, but the cluster overall is too heterogeneous to justify a real duplicate canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Both mention Qwen, but one is a sliding_window behavior report for Qwen3-MoE and the other is a chat crash for Qwen3.5-35B; different symptom and likely different code paths." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "BEiT image label reduction and unsafe sys.modules access in modeling_utils are unrelated bugs in different components." + }, + { + "left": "issue:43994", + "right": "issue:45276", + "accept": false, + "reason": "SigLIP2 AutoModel/pipeline bad outputs and Gemma 4 embedding resize behavior are different model families and different failure modes." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Chunked_prefill cache handling and removal of is_torch_fx_available causing trust_remote_code breakage are distinct issues with no shared concrete root cause." + }, + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "MoE load-balancing loss omission and LwDetrImageLoss AMP/CUDA crash are unrelated training/runtime bugs." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "ANSI output in loading_report and GLM-5 RoPE implementation are different subsystems and different bug classes." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "Qwen2.5-VL import failure and BertModel deepspeed zero3 IndexError are unrelated; one is an import/export problem, the other a distributed-init runtime error." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 broken workflows and Gemma 4 mm_token_type_ids defaults are separate model-specific issues with no shared code path." + }, + { + "left": "issue:44295", + "right": "issue:44749", + "accept": false, + "reason": "position_ids buffer access error and a 10x slowdown after upgrade are different regressions affecting different mechanics." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 94, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41950", + "issue:42886", + "issue:43066", + "issue:43441", + "issue:43540", + "issue:43596", + "issue:44246", + "issue:44779", + "issue:44871", + "issue:44877", + "issue:44987", + "issue:44995", + "issue:45200", + "issue:45229", + "issue:45292", + "issue:45375", + "issue:45459" + ], + "soft_pairs": [ + "issue:44987|issue:45459", + "issue:44877|issue:45375", + "issue:44246|issue:44871", + "issue:44779|issue:45292", + "issue:41950|issue:43596", + "issue:43066|issue:44779", + "issue:43441|issue:43540", + "issue:44995|issue:45200", + "issue:42886|issue:45229" + ], + "prepared_review_unit_hash": "0fb544a8d16362a2568146bd4a3a4708caaea49846965693c93ec33b28241771", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12417, + "estimated_input_tokens": 3105, + "estimated_eval_tokens": 6466 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues cover unrelated transformer bugs in tokenizers, configs, vision/video models, FlashAttention, cache/offline loading, embedding resizing, and OOM behavior. None of the soft pairs look like the same underlying bug/change, so no duplicates are accepted.", + "confidence": 0.97, + "canonical_issue_reason": "No clear canonical issue: the set spans unrelated bugs and code paths, so there is no single issue that reasonably represents the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong best-issue anchor either; none of the reports subsumes the others. If forced, 44779 is a concrete regression report, but it is not representative of the whole set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44987", + "right": "issue:45459", + "accept": false, + "reason": "Different failures: one is a loading failure for a specific model, the other is tokenizer error masking when protobuf is missing. Same broad area, but not the same bug." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Both mention strict config handling, but they concern different models and different missing fields. Not enough evidence of one shared underlying bug." + }, + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Import slowdown is unrelated to Gemma eos_token_id inconsistency; different symptom, path, and fix scope." + }, + { + "left": "issue:44779", + "right": "issue:45292", + "accept": false, + "reason": "Tokenizer regression in v5 and resize_token_embeddings not updating output embeddings are separate code paths and bugs." + }, + { + "left": "issue:41950", + "right": "issue:43596", + "accept": false, + "reason": "Video-classification processor lookup and deepspeed zero3/BertModel index error are unrelated issues." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer-related v5 regressions, but one is a decoder-type mismatch and the other is Deepseek output corruption; not clearly the same bug." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "Ministral FlashAttention failure and Qwen3OmniMoe video ValueError affect different models and code paths." + }, + { + "left": "issue:44995", + "right": "issue:45200", + "accept": false, + "reason": "Stale indexer cache on second forward pass is unrelated to Gemma 4 mm_token_type_ids defaults for text-only fine-tuning." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU CUDA OOM are different classes of failure with no shared underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 95, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43065", + "issue:43232", + "issue:43257", + "issue:43262", + "issue:43295", + "issue:43408", + "issue:43475", + "issue:43540", + "issue:43653", + "issue:43901", + "issue:44162", + "issue:44246", + "issue:44336", + "issue:44496", + "issue:45003", + "issue:45137", + "issue:45292" + ], + "soft_pairs": [ + "issue:43901|issue:45003", + "issue:43232|issue:44336", + "issue:43065|issue:43408", + "issue:43257|issue:45137", + "issue:43262|issue:44246", + "issue:44496|issue:45003", + "issue:43295|issue:45292", + "issue:43475|issue:43540", + "issue:43653|issue:44162" + ], + "prepared_review_unit_hash": "34c847b83244fa2f7658d320dd1044aea2cb05fc9d603a27b27ac40049897f36", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12552, + "estimated_input_tokens": 3138, + "estimated_eval_tokens": 6532 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are largely unrelated and should not be deduplicated. The apparent overlaps are only at the subsystem level (SAM3, DeepSpeed, tokenizer/modeling internals), not the same concrete bug.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43901", + "right": "issue:45003", + "accept": false, + "reason": "Docs mismatch for TextClassificationPipeline vs unsafe sys.modules access in modeling_utils; different code paths and fixes." + }, + { + "left": "issue:43232", + "right": "issue:44336", + "accept": false, + "reason": "Generation kwarg update after sync_gpus is unrelated to ANSI output in loading_report." + }, + { + "left": "issue:43065", + "right": "issue:43408", + "accept": false, + "reason": "Both mention SAM3, but one is about a dummy Conv2d in the pixel decoder and the other about a model-type mismatch warning; not the same bug." + }, + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both involve DeepSpeed, but one is about Qwen3 MOE weight conversion and the other an empty-deque IndexError in ZeRO3; different failures." + }, + { + "left": "issue:43262", + "right": "issue:44246", + "accept": false, + "reason": "Audio sampling-rate defaulting in apply_chat_template is unrelated to intermittent slow import time." + }, + { + "left": "issue:44496", + "right": "issue:45003", + "accept": false, + "reason": "Unrecognized model/model_type handling is a distinct issue from unsafe sys.modules access." + }, + { + "left": "issue:43295", + "right": "issue:45292", + "accept": false, + "reason": "Processor tokenizer regression with images is unrelated to resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "SAM3 vision encoder missing attribute and Qwen3OmniMoe video processing error are different models and different failure modes." + }, + { + "left": "issue:43653", + "right": "issue:44162", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug is unrelated to ESM2 breakage." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 96, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43232", + "issue:43441", + "issue:43606", + "issue:43673", + "issue:43931", + "issue:44060", + "issue:44077", + "issue:44297", + "issue:44368", + "issue:44466", + "issue:44623", + "issue:44734", + "issue:44869", + "issue:44933", + "issue:44977", + "issue:45335", + "issue:45356", + "issue:45459" + ], + "soft_pairs": [ + "issue:44060|issue:44077", + "issue:44466|issue:45335", + "issue:44933|issue:45459", + "issue:44368|issue:44977", + "issue:43673|issue:44734", + "issue:43441|issue:43606", + "issue:44869|issue:45356", + "issue:43232|issue:43673", + "issue:43931|issue:44368", + "issue:44297|issue:44623" + ], + "prepared_review_unit_hash": "c97f423679e41ab7ab934f76ee4cb3d70d37205fcf4c5b12c31a5fdf092d4004", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13604, + "estimated_input_tokens": 3401, + "estimated_eval_tokens": 7058 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change; the issues span unrelated models, subsystems, and failure modes. I\u2019d avoid merging them into a single duplicate cluster.", + "confidence": 0.84, + "canonical_issue_reason": "No single issue cleanly covers the whole set. If one must be chosen as an anchor, issue 44466 is the broadest, most clearly scoped v5 regression report with a concrete serialization symptom and version context.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44466 is the best representative issue overall because it is specific, reproducible, and framed as a regression, whereas the others are narrower model- or edge-case reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Different models and different bugs: one is a tied-weights warning on Qwen3-Next, the other is an invalid optional post_init allowance for patchtsmixer." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Both involve weight tying/serialization, but the concrete problems differ: inconsistent lm_head serialization by device vs resize_token_embeddings not updating decoder.embed_tokens." + }, + { + "left": "issue:44933", + "right": "issue:45459", + "accept": false, + "reason": "Both are tokenizer-related, but one is a nonexistent import from image_utils while the other is swallowed protobuf decode errors; different failure paths." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "Same model family and flash-attention context, but one is a tied-embeddings warning during LoRA fine-tuning and the other is a generation failure; not the same bug." + }, + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Both mention generation cache, but one reports missing cache in chunked_prefill while the other is a KV-cache continuation crash from wrong tensor indexing in serve." + }, + { + "left": "issue:43441", + "right": "issue:43606", + "accept": false, + "reason": "Both are model-specific runtime failures, but the concrete causes are unrelated: FlashAttention failure on Ministral-3 vs CPU offload device mismatch on bark-small." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Both touch tokenizer/decoding behavior, but one is a Whisper timestamp decode crash on a trailing replacement character and the other is a Kimi-K2.5 codec regression with a misleading warning." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "Both are generation-related, but the reported bugs differ: after sync_gpus model kwargs update vs missing cache during chunked_prefill." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Different symptoms and code paths: Qwen3-VL weight-shape mismatch on load versus a tie_word_embeddings warning during Qwen3.5 LoRA fine-tuning." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Both concern save_pretrained, but tokenizer_class mismatch in tokenizer_config.json is not the same as processor.save_pretrained omitting files." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 97, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:42673", + "issue:43381", + "issue:43646", + "issue:43673", + "issue:43704", + "issue:44038", + "issue:44162", + "issue:44361", + "issue:44623", + "issue:44749", + "issue:45230", + "issue:45278", + "issue:45292", + "issue:45310", + "issue:45335", + "issue:45397" + ], + "soft_pairs": [ + "issue:43673|issue:45278", + "issue:43646|issue:44162", + "issue:43381|issue:45292", + "issue:42673|issue:43704", + "issue:44361|issue:44749", + "issue:43381|issue:45335", + "issue:28282|issue:30990", + "issue:45230|issue:45397", + "issue:44749|issue:45310", + "issue:44038|issue:44623" + ], + "prepared_review_unit_hash": "6d99a3b5e15591286041229acfd807ce70e83945549fdee3444261ec0817ad85", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13091, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Only one soft duplicate pair is convincing: the two Qwen3ForCausalLM VRAM leak reports are the same bug with the same title. The rest are different failure modes, models, or performance/regression reports and should stay separate.", + "confidence": 0.93, + "canonical_issue_reason": "issue:42673 is the better canonical issue because it is earlier and has the same specific title as its duplicate, making it the cleanest representative of that bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42673 is the strongest issue choice overall: it is specific, clearly describes the underlying bug, and has a direct duplicate with matching wording.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "Both mention v5-era breakage, but one is a chunked_prefill cache issue and the other is broad import errors after upgrade; not the same bug." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Custom model initialization regression and ESM2 breakage are different problem reports; no shared concrete failure path is evident." + }, + { + "left": "issue:43381", + "right": "issue:45292", + "accept": false, + "reason": "Gradient checkpointing in eval mode is unrelated to resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": true, + "reason": "Exact same title and same VRAM leak scenario for Qwen3ForCausalLM in multiple dataloader threads; these are duplicates." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "MLukeTokenizer AttributeError and a transformer upgrade performance slowdown are unrelated issues." + }, + { + "left": "issue:43381", + "right": "issue:45335", + "accept": false, + "reason": "Eval-mode gradient checkpointing limitation is a different bug from t5gemma embedding resize propagation." + }, + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "PyTorch import-missing error and Sentence Transformers loading hang are distinct runtime problems." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "A generic bug report and a gemma-4 zero3 from_pretrained issue do not indicate the same concrete bug." + }, + { + "left": "issue:44749", + "right": "issue:45310", + "accept": false, + "reason": "Data filtering slowdown and Qwen3.5-Moe from_pretrained error are different symptoms and code paths." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Qwen3-VL-Moe/transformers 5.0 compatibility and processor.save_pretrained missing files are separate issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 98, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:42371", + "issue:42994", + "issue:43475", + "issue:43525", + "issue:43643", + "issue:43701", + "issue:43824", + "issue:44077", + "issue:44749", + "issue:44933", + "issue:45003", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45381" + ], + "soft_pairs": [ + "issue:36246|issue:43824", + "issue:44077|issue:45003", + "issue:42371|issue:45092", + "issue:43475|issue:45381", + "issue:43701|issue:44749", + "issue:45081|issue:45103", + "issue:41628|issue:44933", + "issue:43475|issue:43643", + "issue:42994|issue:43525" + ], + "prepared_review_unit_hash": "9fcb8a7887327f74bc5c2f6eb0717c7c86ea31bdd364339b7650628cf56850f9", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12731, + "estimated_input_tokens": 3183, + "estimated_eval_tokens": 6622 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous; the only clear duplicate-like pair is the two Qwen2.5-VL import/export errors. The rest appear to be unrelated bugs across different subsystems.", + "confidence": 0.68, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole set. Among the related subset, issue 43824 is the strongest representative because it is concrete, widely referenced, and matches the same Qwen2.5-VL import/export failure family as 36246.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43824 is the best overall issue candidate: it has the most inbound references, a specific failure mode, and the clearest duplicate-family match within the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36246", + "right": "issue:43824", + "accept": true, + "reason": "Both report import errors for Qwen2.5-VL symbols from the same transformers module family, suggesting the same underlying export/re-export bug." + }, + { + "left": "issue:44077", + "right": "issue:45003", + "accept": false, + "reason": "Different problems: one is a patchtsmixer API/post_init validation issue, the other is unsafe sys.modules access in modeling_utils." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "Unrelated: TF32 settings behavior vs remote-code checkpoint/meta-init compatibility." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "Different model-specific bugs: missing fpn_position_embeddings in SAM 3 video vs wrong vision_position_ids in qwen2.5-vl video input." + }, + { + "left": "issue:43701", + "right": "issue:44749", + "accept": false, + "reason": "Unrelated: resume_from_checkpoint key mismatch vs performance regression in filtering after a Transformers upgrade." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Different failure modes: tokenizer backend_tokenizer crash vs auto_docstring annotation handling crash." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "Both are import-related, but the titles point to different symbols and likely different root causes; not enough evidence they are the same bug." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "Different issues: missing SAM 3 video output attribute vs trust_remote_code returning incomplete config fields." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Different bugs: quantized model saving failure vs Llama4Config missing pad_token_id." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 99, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42222", + "issue:42831", + "issue:43232", + "issue:43475", + "issue:43873", + "issue:44188", + "issue:44222", + "issue:44351", + "issue:44661", + "issue:44945", + "issue:45200", + "issue:45310", + "issue:45362", + "issue:45375", + "issue:45397", + "issue:45478" + ], + "soft_pairs": [ + "issue:42222|issue:45200", + "issue:45375|issue:45478", + "issue:45310|issue:45375", + "issue:43873|issue:44945", + "issue:43475|issue:44661", + "issue:45310|issue:45397", + "issue:42831|issue:44222", + "issue:41628|issue:44351", + "issue:43232|issue:44188", + "issue:45362|issue:45375", + "issue:44222|issue:45310" + ], + "prepared_review_unit_hash": "9a01b7fbed3d8145b11e8c07415a230ebf72c7a9fab604ea7e694f5f54ae2d06", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12890, + "estimated_input_tokens": 3223, + "estimated_eval_tokens": 6702 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated issue titles; the only clear duplicate-like thread is the Qwen3.5 MoE from_pretrained error, with 45478 the best current tracker. The rest are separate bugs in different subsystems and should not be merged by title similarity alone.", + "confidence": 0.69, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:45478 is the freshest open report for the Qwen3.5 MoE from_pretrained regression and matches the exact title of the recurring bug thread better than the closed duplicate-style report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "Different models and failure modes: ViTPose import breakage vs Gemma 4 mm_token_type_ids defaulting. Same broad area only at a very high level." + }, + { + "left": "issue:45375", + "right": "issue:45478", + "accept": false, + "reason": "Both mention Qwen3.5, but one is a missing vision-config field under strict parsing and the other is a generic from_pretrained error; not enough evidence they are the same bug." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": false, + "reason": "Possible relation through Qwen3.5/MoE config handling, but the reports describe different concrete failures and code paths." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Offloading/quantization behavior and pipeline-parallel output divergence are different runtime issues." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "SAM 3 Vision encoder attribute error and add-new-model-like tokenizer mapping failure are unrelated code paths." + }, + { + "left": "issue:45310", + "right": "issue:45397", + "accept": false, + "reason": "Same general family of Transformers model loading errors, but Qwen3.5 MoE and Gemma 4 zero3 are distinct bugs." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "Both involve FP8, but one is an accuracy regression and the other is a save_pretrained bug; different concrete problems." + }, + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "Both are import-name errors from transformers, but they concern different symbols and likely different missing exports." + }, + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Generation kwargs update after sync_gpus and attention-kernel divergence under torch.compile are separate issues." + }, + { + "left": "issue:45362", + "right": "issue:45375", + "accept": false, + "reason": "Qwen3.5 chat crash and Qwen3.5MoEVisionConfig strict-field handling do not describe the same failure." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "FP8 MoE save_pretrained and Qwen3.5 MoE from_pretrained are different operations with different failure surfaces." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 100, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42994", + "issue:43232", + "issue:43377", + "issue:43479", + "issue:43673", + "issue:43824", + "issue:44038", + "issue:44079", + "issue:44188", + "issue:44242", + "issue:44792", + "issue:44869", + "issue:45310" + ], + "soft_pairs": [ + "issue:42994|issue:43824", + "issue:44038|issue:45310", + "issue:36010|issue:41628", + "issue:43377|issue:44242", + "issue:43232|issue:44792", + "issue:30990|issue:35141", + "issue:43673|issue:44188", + "issue:43479|issue:44079", + "issue:43479|issue:44869" + ], + "prepared_review_unit_hash": "28753463ae417617de59f57dbbbe132ac52de8d54a224897a4e8fab8929cb185", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12482, + "estimated_input_tokens": 3121, + "estimated_eval_tokens": 6498 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft duplicate edges are convincing here; the items are mostly unrelated issues across different subsystems and failure modes. The cluster should not be merged into a single canonical bug.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the clearest standalone report: a specific import failure with a concrete symbol and module path, making it the strongest representative issue in this otherwise heterogeneous set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42994", + "right": "issue:43824", + "accept": false, + "reason": "Both are import/save-related regressions, but one is about quantized model saving and the other is an ImportError for a missing model class; different code paths and fixes." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen MoE and from_pretrained/import problems, but they target different model variants and failure modes; not the same concrete bug." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but for different symbols (`GenerationMixin` vs `AutoImageProcessor`) and different modules; too broad to be duplicates." + }, + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "One is a MIMI encoder batching/padding-mask correctness bug; the other is an MoE load-balancing loss omission. Different functionality and symptoms." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Generation cache/update behavior and a Janus image-generation test failure are not the same underlying defect." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "Sentence-transformers loading hang is unrelated to embedding resize/post-init reinitialization." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "Both touch generation behavior, but one is about missing cache support in chunked prefill and the other is a torch.compile attention-kernel branching divergence; different bugs." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "A config initialization bug for multimodal defaults is unrelated to ModelOutput key assignment behavior." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Multimodal config defaulting and Whisper timestamp decoding crash are different components with no shared concrete failure path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 101, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42175", + "issue:42898", + "issue:43278", + "issue:43784", + "issue:43824", + "issue:43976", + "issue:44060", + "issue:44188", + "issue:44373", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44933", + "issue:44964", + "issue:45103", + "issue:45125", + "issue:45137", + "issue:45310" + ], + "soft_pairs": [ + "issue:42898|issue:44568", + "issue:42175|issue:44933", + "issue:43784|issue:43824", + "issue:43976|issue:44933", + "issue:44964|issue:45137", + "issue:44188|issue:44373", + "issue:44623|issue:45310", + "issue:43278|issue:45137", + "issue:44734|issue:45103", + "issue:44060|issue:45125" + ], + "prepared_review_unit_hash": "a22d403a284a88c297969817e132ff9ffea506ad68b945dc57753057001be682", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13423, + "estimated_input_tokens": 3356, + "estimated_eval_tokens": 6968 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a true duplicate set: it mixes unrelated tokenizer, packaging, import, model-loading, serving, training, docs, and tensor-parallelism issues. All soft-similarity pairs are superficial and should be rejected.", + "confidence": 0.93, + "canonical_issue_reason": "No canonical issue: the items describe different bugs and feature regressions across unrelated code paths, so there is no single representative issue for the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "No clear best issue either; at most some items are thematically similar (e.g. tokenizer/model-version regressions), but none is a safe global representative for the whole cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "Both involve tokenizer behavior in v5, but one is about clean_up_tokenization_spaces while the other is about add_special_tokens not adding BOS/EOS; different code paths and symptoms." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Packaging/backend dependency selection vs a missing image_utils import; no shared underlying bug." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Both are import failures, but for different packages/classes and different causes; not the same defect." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version compatibility in transformers 5.1.0 is unrelated to a missing image_utils import." + }, + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Different failure modes in different subsystems: multimodal model loading vs DeepSpeed ZeRO3 deque handling." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "Runtime kernel divergence under torch.compile is unrelated to a wrong docstring." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Missing files on processor save and Qwen3.5 MoE from_pretrained errors involve different save/load paths and are not the same bug." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Embedding dtype drift during evaluate and a ZeRO3 deque IndexError are unrelated training/runtime issues." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Serving KV-cache tensor indexing crash and auto-docstring AttributeError with future annotations are different subsystems and different root causes." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Both concern Qwen models, but one is a tied-weights warning bug and the other is missing _tp_plan for tensor parallelism; they are distinct changes/fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 102, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42898", + "issue:43295", + "issue:43723", + "issue:43874", + "issue:44077", + "issue:44222", + "issue:44242", + "issue:44297", + "issue:44442", + "issue:44448", + "issue:44568", + "issue:44779", + "issue:45092", + "issue:45137", + "issue:45276", + "issue:45356", + "issue:45459" + ], + "soft_pairs": [ + "issue:42898|issue:45356", + "issue:43723|issue:44568", + "issue:43874|issue:45092", + "issue:42898|issue:44779", + "issue:45137|issue:45276", + "issue:44077|issue:44242", + "issue:42898|issue:44448", + "issue:41628|issue:45459", + "issue:44222|issue:44297", + "issue:44442|issue:45459", + "issue:42898|issue:43295" + ], + "prepared_review_unit_hash": "4e68d080e1ff610fb60cb2e16d619595ddc5661fcab5d15ba564ad274f18fd18", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13825, + "estimated_input_tokens": 3457, + "estimated_eval_tokens": 7170 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly distinct tokenizer/processor regressions in v5 rather than true duplicates. The cluster is thematically related, but the concrete failure modes and affected code paths differ too much to merge any soft pair.", + "confidence": 0.86, + "canonical_issue_reason": "Issue 42898 is the broadest, most umbrella-like tokenizer-v5 regression report in the set, so it best serves as the cluster\u2019s anchor issue.", + "canonical_pr_reason": null, + "best_issue_reason": "42898 is the most representative issue for the general v5 tokenizer behavior-change theme; the others are narrower model-specific or API-specific regressions.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Both concern tokenizer regressions in v5, but 42898 is about clean_up_tokenization_spaces behavior while 45356 is Kimi-K2.5 codec handling and a misleading warning; different bugs and fixes." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "43723 is an AutoTokenizer loading failure for a custom tokenizer, whereas 44568 is about add_special_tokens not adding BOS/EOS for a specific tokenizer; not the same code-path problem." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "43874 is a missing image-processor method causing multimodal token counting failure, while 45092 is remote-code checkpoint incompatibility with meta initialization; separate issues." + }, + { + "left": "issue:42898", + "right": "issue:44779", + "accept": false, + "reason": "Both are v5 tokenizer regressions, but 42898 targets tokenization-space cleanup and 44779 targets Deepseek tokenizer output correctness; different underlying defects." + }, + { + "left": "issue:45137", + "right": "issue:45276", + "accept": false, + "reason": "45137 is a DeepSpeed ZeRO3 deque error, while 45276 is resize_token_embeddings not propagating to gemma4 embeddings; unrelated failures." + }, + { + "left": "issue:44077", + "right": "issue:44242", + "accept": false, + "reason": "44077 concerns a model config/post_init allowance, whereas 44242 is about load-balancing loss when output_router_logits=False; not the same bug." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "44448 reports Pegasus output differences between v4 and v5, but it is a model-specific regression and not clearly the same issue as clean_up_tokenization_spaces behavior in 42898." + }, + { + "left": "issue:41628", + "right": "issue:45459", + "accept": false, + "reason": "41628 is an AutoImageProcessor import error; 45459 is tokenizer error masking when protobuf is absent. Different subsystems and symptoms." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "44222 is an FP8 save_pretrained/moe issue, while 44297 is tokenizer_class mismatch in tokenizer_config.json; both involve saving but not the same concrete bug." + }, + { + "left": "issue:44442", + "right": "issue:45459", + "accept": false, + "reason": "44442 is AutoTokenizer failing to load a specific tokenizer class; 45459 is exception handling that hides tokenizer errors when protobuf is missing. Distinct problems." + }, + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "43295 is about processor.tokenizer access and passing images to tokenizer in a regression, not the same behavior change as clean_up_tokenization_spaces in 42898." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 103, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:42831", + "issue:43479", + "issue:43644", + "issue:43723", + "issue:44206", + "issue:44222", + "issue:44297", + "issue:44483", + "issue:44484", + "issue:44514", + "issue:44623", + "issue:44977", + "issue:45103", + "issue:45310", + "issue:45406" + ], + "soft_pairs": [ + "issue:43479|issue:43644", + "issue:44222|issue:44623", + "issue:44206|issue:45103", + "issue:42831|issue:44623", + "issue:44297|issue:45310", + "issue:22355|issue:28282", + "issue:43723|issue:44977", + "issue:44483|issue:45406", + "issue:44222|issue:44484", + "issue:44514|issue:45103" + ], + "prepared_review_unit_hash": "f20840d91cce7b28fae42997363cabd1eedfc1eed4d54ed236e816c94e3049fa", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12771, + "estimated_input_tokens": 3193, + "estimated_eval_tokens": 6642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the items cover unrelated bugs and regressions across config loading, save_pretrained, tokenizers, chat serving, FP8, and docstring tooling. None of the soft pairs look like the same underlying issue, so all should be kept separate.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43644 is the best anchor only in a loose sense: it describes a concrete, high-impact regression with a clear symptom and broader relevance, but it is not a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Different bugs: Phi4MultimodalConfig defaulting logic vs non-persistent buffer initialization in v5. No shared code path or fix." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "Both mention save_pretrained, but one is about FP8 MoE saving and the other about processor files missing. Different artifact types and failure modes." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "LasrFeatureExtractor center-arg crash is unrelated to auto_docstring failing on postponed annotations." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "FineGrainedFP8 accuracy regression is unrelated to processor.save_pretrained missing files." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Tokenizer config mismatch on save is a different problem from Qwen3.5 MoE from_pretrained failing." + }, + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Missing transformers.onnx module and missing PyTorch are distinct import/dependency errors." + }, + { + "left": "issue:43723", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3.5 flash-attention generation failure affect different paths and symptoms." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "Chat/completions request rejection is not the same as Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "FP8 MoE save_pretrained bug is unrelated to the max_shard_size default question." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "Qwen2_5_VLProcessor batched chat template crash is unrelated to auto_docstring's AttributeError on future annotations." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 104, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:38175", + "issue:43479", + "issue:43723", + "issue:44261", + "issue:44263", + "issue:44295", + "issue:44360", + "issue:44373", + "issue:44442", + "issue:44485", + "issue:44623", + "issue:44871", + "issue:44913", + "issue:44991", + "issue:45092", + "issue:45357" + ], + "soft_pairs": [ + "issue:44263|issue:44360", + "issue:44263|issue:45092", + "issue:44295|issue:44623", + "issue:43479|issue:44913", + "issue:44261|issue:45092", + "issue:43723|issue:44871", + "issue:43723|issue:44442", + "issue:43723|issue:44991", + "issue:44373|issue:44485", + "issue:30990|issue:38175", + "issue:45092|issue:45357" + ], + "prepared_review_unit_hash": "cf73c0537e666975ac270ef35faf12e14cba39464d095c060e72675000db28c9", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 13019, + "estimated_input_tokens": 3255, + "estimated_eval_tokens": 6766 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: most items are unrelated bug reports that only share broad wording like loading, tokenizer, config, or save_pretrained. I do not see any soft pair that is the same underlying bug/change, so all soft edges should be rejected.", + "confidence": 0.91, + "canonical_issue_reason": "Issue 43723 is the broadest representative of the cluster\u2019s recurring tokenizer/loading theme, so it is the least-bad canonical issue choice even though the cluster is not a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43723 is the best overall representative because it has the clearest, most general failure mode among the issues here; it is also the closest to several superficially related tokenizer-loading reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "Both mention DSA/indexer internals, but one is about torch.split return values and the other about a missing ReLU; different code-path problems." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "Completely different subsystems: GLM/DSA indexing versus multimodal meta-initialization compatibility." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "Position_ids buffer access and processor.save_pretrained missing files are unrelated bugs." + }, + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Phi4Multimodal default vision/audio config initialization is unrelated to GPTNeoX rotary_pct persistence on reload." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Different failure modes and targets: missing rms_norm_eps in MLA q_a_layernorm versus old InternVL2 meta-init incompatibility." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Both involve model loading, but one is AutoTokenizer loading failure and the other is an eos_token_id config mismatch in Gemma-3; not the same bug." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Both are tokenizer-loading complaints, but they concern different tokenizers and likely different root causes." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Different model/tokenizer targets and different loading failures; too broad to treat as one bug." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Wrong docstring for position_ids is documentation-only; GLM-5 RoPE implementation is a code semantics issue." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence Transformers loading hang and zero probabilities in siglip2-base-patch16-224 are unrelated model-specific issues." + }, + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Both touch multimodal checkpoints, but one is a load-time meta-init incompatibility and the other is a save_pretrained regression with visual encoder keys; different code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 105, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42831", + "issue:42886", + "issue:42994", + "issue:43066", + "issue:43122", + "issue:43576", + "issue:43784", + "issue:43824", + "issue:44038", + "issue:44117", + "issue:44263", + "issue:44373", + "issue:44514", + "issue:44521", + "issue:44704", + "issue:45310" + ], + "soft_pairs": [ + "issue:41628|issue:43824", + "issue:42886|issue:42994", + "issue:42831|issue:45310", + "issue:43576|issue:43784", + "issue:42831|issue:44038", + "issue:43122|issue:44521", + "issue:44263|issue:44373", + "issue:42886|issue:43066", + "issue:44117|issue:44704", + "issue:43122|issue:44514" + ], + "prepared_review_unit_hash": "bdc0891a5d0a5436b8977f0bca29cf610535f04e84bf0cee672d6d22b39e1135", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12604, + "estimated_input_tokens": 3151, + "estimated_eval_tokens": 6558 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly heterogeneous; the soft-similarity pairs share broad themes (tokenizers, imports, multimodal processors) but not the same concrete bug or change, so none should be merged as duplicates.", + "confidence": 0.86, + "canonical_issue_reason": "If one issue must anchor the cluster, #44521 is the clearest standalone bug report: a specific, reproducible failure in `apply_chat_template` for multimodal inputs with active discussion.", + "canonical_pr_reason": null, + "best_issue_reason": "#44521 is the strongest issue to keep as the representative because it has a concrete symptom, clear scope, and ongoing user impact.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols (`AutoImageProcessor` vs `Qwen2_5_VLForConditionalGeneration`) and likely separate export/regression paths." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Tokenizer offline cache loading and quantized model saving are unrelated failure modes in different code paths." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "One is a FineGrainedFP8 accuracy regression; the other is a Qwen3.5-MoE `from_pretrained` loading error. Same broad area only." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "A broken `transformers env` CLI command is unrelated to a `NameError` during sentence-transformers import." + }, + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "These are different model-specific problems: FP8 accuracy vs Qwen3-VL-MoE behavior in v5." + }, + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Both involve tokenization/chat templating, but one reports version-to-version tokenization differences while the other is an all-zero assistant-mask bug for multimodal inputs." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "A `torch.split()` behavior issue and a wrong docstring for `position_ids` are not the same bug; one is code, the other documentation." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Offline tokenizer cache loading and wrong tokenizer decoder type in v5 are distinct defects affecting different mechanisms." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "`TOKENIZER_MAPPING_NAMES` returning `None` is a mapping/assumption bug; `AutoProcessor.from_pretrained` not forwarding kwargs to `cached_file` is a separate API plumbing issue." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "Both are tokenizer/processor related, but one is a general tokenization change across versions and the other is a batched `apply_chat_template` crash with `padding=False`." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 106, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:33357", + "issue:39401", + "issue:42371", + "issue:42907", + "issue:43232", + "issue:43937", + "issue:43976", + "issue:44263", + "issue:44295", + "issue:44297", + "issue:44488", + "issue:44704", + "issue:44743", + "issue:44945", + "issue:45245", + "issue:45290" + ], + "soft_pairs": [ + "issue:30990|issue:33357", + "issue:42371|issue:44263", + "issue:44704|issue:45290", + "issue:43232|issue:45245", + "issue:42907|issue:44945", + "issue:44295|issue:44297", + "issue:43232|issue:44743", + "issue:43976|issue:44945", + "issue:39401|issue:44488", + "issue:42907|issue:43937" + ], + "prepared_review_unit_hash": "fcf496f54045955e1554132065d050ad00aa65ffa65f52e01c3db89ba9c9028d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12620, + "estimated_input_tokens": 3155, + "estimated_eval_tokens": 6566 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is heterogeneous: the issues cover unrelated bugs in loading, tokenizers, generation, serialization, TF32 settings, and model-specific behavior. None of the soft pairs look like the same underlying defect, so they should remain separate.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue stands out; the items are not duplicates and do not share a single underlying bug or code path.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a strong global dedupe representative for this cluster; the closest matches are still different problems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "Different symptoms and models: Sentence Transformers loading hangs vs a MacOS bus error with a community CLIP model." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "Unrelated topics: TF32 behavior settings vs a GlmMoeDsaIndexer torch.split return-value issue." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "Different code paths: AutoProcessor kwargs passthrough to cached_file vs apply_chat_template crashing on tool-call assistant messages." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Different failures: generation kwargs handling after sync_gpus vs a categorical cardinality limit error." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Serialization of dequantized Ministral/Devstral models is unrelated to incorrect outputs under pipeline parallelism." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "Separate tokenizer/model bugs: reading position_ids after buffer registration vs tokenizer_class mismatch in tokenizer_config.json." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both involve generation, but one is sync_gpus kwargs handling and the other is recurrent-state reset in modular_qwen3_5; not the same bug." + }, + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Unrelated: Python version compatibility for Transformers 5.1.0 vs incorrect LLM output with pipeline parallelism." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Both are tokenizer-related, but one is wrong offset_mapping for Qwen3 and the other is failing to load a specific community model." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Different GLM-related issues: saving dequantized Ministral/Devstral vs invalid GenerationConfig for GLM-5." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 107, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:29127", + "issue:30990", + "issue:41628", + "issue:43116", + "issue:43122", + "issue:43408", + "issue:43475", + "issue:43992", + "issue:44360", + "issue:44485", + "issue:44625", + "issue:44704", + "issue:44779", + "issue:44829", + "issue:44987", + "issue:45200", + "issue:45278" + ], + "soft_pairs": [ + "issue:41628|issue:45278", + "issue:43116|issue:45200", + "issue:43408|issue:43475", + "issue:44625|issue:44829", + "issue:29127|issue:30990", + "issue:44360|issue:44485", + "issue:43992|issue:44704", + "issue:41628|issue:44987", + "issue:43122|issue:44779" + ], + "prepared_review_unit_hash": "da8812b3e1a2af01fce835fc0c248f8f95bf3c181899a5b4f6950fd4b8d4fc02", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12496, + "estimated_input_tokens": 3124, + "estimated_eval_tokens": 6504 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is a heterogeneous mix of unrelated transformer bugs, regressions, and feature issues; the soft links look like broad topical similarity rather than true duplicates. I would not collapse the cluster into a single canonical item.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "Both mention import problems, but one is a specific AutoImageProcessor import failure and the other is a broad upgrade-to-5.5.0 import breakage report; not the same concrete bug." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Different symptoms and code paths: empty multi-label classification results in an example script versus Gemma 4 mm_token_type_ids defaulting/propagation." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "Both are SAM 3 video-adjacent, but one is a model-type mismatch warning and the other is a missing attribute error in vision encoder output; separate failures." + }, + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "Qwen3.5 num_labels propagation and flash_attention_3 degenerate training are unrelated bugs in different parts of the stack." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and Sentence Transformers loading hang are entirely different issues." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "DSA indexer ReLU discussion and GLM-5 RoPE implementation are different model/component concerns, not one underlying defect." + }, + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "UMT5Encoder missing embed_tokens.weight and AutoProcessor kwargs not forwarded to cached_file are separate loading bugs." + }, + { + "left": "issue:41628", + "right": "issue:44987", + "accept": false, + "reason": "An AutoImageProcessor import error is not the same as failing to load a specific model repo in transformers>=5.1.0." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different models and likely different tokenizer code paths; too broad to treat as the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 108, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36296", + "issue:39401", + "issue:42371", + "issue:42831", + "issue:42907", + "issue:43116", + "issue:43122", + "issue:43452", + "issue:43824", + "issue:44038", + "issue:44222", + "issue:44261", + "issue:44295", + "issue:44297", + "issue:44351", + "issue:44373", + "issue:44521", + "issue:44977" + ], + "soft_pairs": [ + "issue:44038|issue:44297", + "issue:39401|issue:44521", + "issue:44038|issue:44295", + "issue:44261|issue:44373", + "issue:42371|issue:42831", + "issue:36296|issue:43116", + "issue:42907|issue:43452", + "issue:44038|issue:44222", + "issue:43824|issue:44351", + "issue:43122|issue:44977" + ], + "prepared_review_unit_hash": "758d62a24e2d8766fc27355e76b5ee7183fbac16906d353610170a7f08f90296", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13152, + "estimated_input_tokens": 3288, + "estimated_eval_tokens": 6832 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated issues across tokenization, model import/export, FP8/save_pretrained, training, and docs/API behavior. The soft-similarity pairs share only broad theme or model family, not the same concrete bug, so none should be merged as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "If a single representative is needed, issue 44038 is the broadest user-facing regression in the Qwen3-VL-Moe / transformers 5.0 area. But the set is not a true duplicate cluster, so this is only a loose anchor, not a strong canonical.", + "canonical_pr_reason": null, + "best_issue_reason": "44038 is the most central of the listed issues because it describes a concrete runtime regression and is thematically closest to the few Qwen-related reports. Still, the cluster is heterogeneous enough that no issue is a clean global canonical.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "Both involve Qwen/transformers model handling, but one is a Qwen3-VL-Moe bug and the other is tokenizer_config save_pretrained metadata mismatch; different code paths and symptoms." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Both mention Qwen/tokenization, but offset_mapping errors and all-zero assistant masks come from different preprocessing stages and likely different fixes." + }, + { + "left": "issue:44038", + "right": "issue:44295", + "accept": false, + "reason": "Qwen3-VL-Moe regression and position_ids buffer reading error are unrelated bugs; no shared failure mode is evident." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "An MLA q_a_layernorm precision issue is not the same as a wrong docstring for position_ids; one is a functional bug, the other is documentation." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 API guidance and FineGrainedFP8 accuracy problems are different numerical/performance topics with different fixes." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor parallel training failure and multi-label classification returning empty results are distinct feature paths with no concrete overlap." + }, + { + "left": "issue:42907", + "right": "issue:43452", + "accept": false, + "reason": "Saving dequantized Ministral/Devstral models and gguf_file loader failures affect different model serialization/loading paths." + }, + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Both touch Qwen3-VL-Moe/FP8, but one is a general transformers 5.0 bug and the other is specifically FP8 save_pretrained behavior; not the same underlying defect." + }, + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "These are separate import errors for different symbols: Qwen2_5_VLForConditionalGeneration versus HybridCache." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer-version drift and flash-attention generation failure are different regressions; same broad Qwen3.5 area is not enough to duplicate them." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 109, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:42222", + "issue:42673", + "issue:42907", + "issue:43064", + "issue:43278", + "issue:43441", + "issue:43673", + "issue:43784", + "issue:44222", + "issue:44912", + "issue:44933", + "issue:44945", + "issue:45072", + "issue:45216", + "issue:45278", + "issue:45446", + "issue:45478" + ], + "soft_pairs": [ + "issue:43278|issue:44945", + "issue:45216|issue:45478", + "issue:42907|issue:44222", + "issue:43784|issue:45446", + "issue:42222|issue:43441", + "issue:42673|issue:45072", + "issue:42907|issue:44912", + "issue:38617|issue:45278", + "issue:43784|issue:44933", + "issue:43064|issue:43673" + ], + "prepared_review_unit_hash": "675bb7060e4418a39c4a647e0764f857a7a968e41788264bdf8ca502889968ec", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13308, + "estimated_input_tokens": 3327, + "estimated_eval_tokens": 6910 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated bug reports; none of the soft pairs look like the same underlying issue, so I rejected all duplicate edges. The broadest representative issue in the set is the import-regression report (#45278), but this cluster is not a true duplicate set.", + "confidence": 0.86, + "canonical_issue_reason": "#45278 is the broadest report here (multiple import errors after upgrading to 5.5.0) and is the closest thing to a cluster representative, though the set is heterogeneous.", + "canonical_pr_reason": null, + "best_issue_reason": "#45278 is the most general and central issue among these reports, making it the best representative if one must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Different bugs: one is a dtype mismatch between train/eval, the other is incorrect output under pipeline parallelism." + }, + { + "left": "issue:45216", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3.5, but one is a save_pretrained regression and the other is a from_pretrained/MoE loading error; not the same concrete fix." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both are save/load related, but they concern different quantization paths and model families (dequantized Ministral/Devstral vs FP8 MoE)." + }, + { + "left": "issue:43784", + "right": "issue:45446", + "accept": false, + "reason": "Both are import-related, but they are separate missing/guarded imports in different modules with different root causes." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "Completely different model/runtime failures: vitpose import breakage vs Ministral-3 FlashAttention failure." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "Different symptoms and code paths: VRAM leak in threaded Qwen3ForCausalLM use vs bfloat16 dtype mismatches in CI." + }, + { + "left": "issue:42907", + "right": "issue:44912", + "accept": false, + "reason": "Different quantization/save-load problems: saving dequantized weights is not the same as MXFP4 load fallback to bf16." + }, + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "#45278 is a broad import-regression report, but #38617 is a specific missing symbol import; not the same bug." + }, + { + "left": "issue:43784", + "right": "issue:44933", + "accept": false, + "reason": "Both mention imports, but they involve different missing symbols/modules and different failures." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Different training/distributed-state bug versus GenerationMixin cache issue; no shared concrete code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 110, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:42371", + "issue:43116", + "issue:43493", + "issue:43550", + "issue:43784", + "issue:43825", + "issue:43874", + "issue:43937", + "issue:43976", + "issue:44038", + "issue:44075", + "issue:44222", + "issue:44485", + "issue:44936", + "issue:45446", + "issue:45478" + ], + "soft_pairs": [ + "issue:43550|issue:43784", + "issue:43937|issue:44485", + "issue:44075|issue:44936", + "issue:22355|issue:29127", + "issue:42371|issue:43874", + "issue:43116|issue:43493", + "issue:43116|issue:43825", + "issue:44222|issue:45478", + "issue:43976|issue:45446", + "issue:44038|issue:45478" + ], + "prepared_review_unit_hash": "b05b955e3d30a08f17646b8ce2cff75707bedd259d4629d2bb4d3137b7c91913", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13207, + "estimated_input_tokens": 3302, + "estimated_eval_tokens": 6860 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a loose thematic cluster of unrelated Transformer issues, not a duplicate set. All soft pairs are false positives based on broad keyword overlap only.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue stands out because the items cover distinct bugs across different models, features, and code paths rather than one duplicated report.", + "canonical_pr_reason": "No PRs are present in this input, so there is no canonical PR candidate.", + "best_issue_reason": "No issue is a clear global representative for duplication triage; the closest title matches still describe different failures or different subsystems.", + "best_pr_reason": "No PRs are present in this input, so there is no best PR candidate.", + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43784", + "accept": false, + "reason": "Different bugs: Bamba torch.compile/SDPA failure vs sentence-transformers import NameError. Same broad library, not the same code-path." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "Both mention GLM-5, but one is a GenerationConfig validation error and the other is a RoPE implementation discussion. Different underlying problems." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "SGD optimizer arguments being ignored is unrelated to trainer.evaluate() failing after trainer.train(). Different components and symptoms." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "No module named transformers.onnx is an import/package issue; layoutlmv3 box clarity is an error-message/validation issue." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "TF32 API/settings guidance is unrelated to a missing get_number_of_image_patches method in an image processor." + }, + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "Multi-label classification returning empty results is not the same as a SigLIP2 HF-vs-JAX discrepancy. Different model/task and failure mode." + }, + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "Empty multi-label classification results and a pipeline() translation-task error message are unrelated issues." + }, + { + "left": "issue:44222", + "right": "issue:45478", + "accept": false, + "reason": "Both involve MoE/pretrained wording, but one is FP8 save_pretrained and the other is from_pretrained loading. Not the same concrete bug." + }, + { + "left": "issue:43976", + "right": "issue:45446", + "accept": false, + "reason": "Python version compatibility for Transformers 5.1.0 is unrelated to an incorrect PyTorch version check in flex_attention." + }, + { + "left": "issue:44038", + "right": "issue:45478", + "accept": false, + "reason": "Qwen3-VL-Moe on transformers 5.0 and Qwen3.5 MoE from_pretrained on transformers>=5.4.0 are different model/version bugs, not one merged fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 111, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33453", + "issue:39401", + "issue:42673", + "issue:43421", + "issue:43450", + "issue:43827", + "issue:43906", + "issue:43931", + "issue:43937", + "issue:43986", + "issue:44291", + "issue:44779", + "issue:44945", + "issue:44977", + "issue:45071", + "issue:45083", + "issue:45405", + "issue:45446" + ], + "soft_pairs": [ + "issue:39401|issue:44779", + "issue:33453|issue:39401", + "issue:43931|issue:44945", + "issue:45083|issue:45446", + "issue:43937|issue:44977", + "issue:42673|issue:43906", + "issue:43827|issue:45446", + "issue:43450|issue:43986", + "issue:45405|issue:45446", + "issue:44291|issue:45071", + "issue:39401|issue:43421" + ], + "prepared_review_unit_hash": "202bd1dacc6e86cd8be6f02995a2425ba899d1180b98a4194f7ed99fc5479b3a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13617, + "estimated_input_tokens": 3405, + "estimated_eval_tokens": 7066 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This is a noisy, heterogeneous cluster: the soft pairs share broad subsystem terms, but they are not the same concrete bug or change, so none should be merged as duplicates.", + "confidence": 0.82, + "canonical_issue_reason": "No single canonical issue cleanly covers the set; the items span unrelated tokenizer, model-loading, video, docs, and version-check problems.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must anchor the cluster, 39401 is the most concrete and self-contained tokenizer report, but it still only fits a narrow subset of the items.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer-related, but one is a Qwen3 offset_mapping bug and the other is a broader Deepseek v5 regression in outputs; not the same defect." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Tokenizer loading regression vs wrong offset_mapping; different symptoms and likely different code paths." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Model weight-shape loading failure vs incorrect LLM output under pipeline parallelism; these are distinct bugs." + }, + { + "left": "issue:45083", + "right": "issue:45446", + "accept": false, + "reason": "Qwen3 Omni feature helper behavior is unrelated to the flex_attention PyTorch version check." + }, + { + "left": "issue:43937", + "right": "issue:44977", + "accept": false, + "reason": "GenerationConfig validation failure and flash-attention generation breakage are different issues." + }, + { + "left": "issue:42673", + "right": "issue:43906", + "accept": false, + "reason": "VRAM leak in multi-threaded Qwen3ForCausalLM is not the same as an isolated reproduction of another issue." + }, + { + "left": "issue:43827", + "right": "issue:45446", + "accept": false, + "reason": "Docs still referencing removed pipeline() is unrelated to a flex_attention import/version-check bug." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "Batched video processor shape bug vs AutoProcessor crash without torchvision; related area, but not the same bug." + }, + { + "left": "issue:45405", + "right": "issue:45446", + "accept": false, + "reason": "PEFT minimum-version bump issue is unrelated to the flex_attention PyTorch version check." + }, + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "Unexpected _is_hf_initialized argument during init_empty_weights is a different failure mode from PretrainedConfig type checking." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Offset_mapping regression and runtime post-processor update for special tokens are separate tokenizer-backend problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 112, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42673", + "issue:43408", + "issue:43577", + "issue:43582", + "issue:43742", + "issue:44117", + "issue:44220", + "issue:44410", + "issue:44484", + "issue:44829", + "issue:45081", + "issue:45216", + "issue:45310" + ], + "soft_pairs": [ + "issue:38617|issue:41628", + "issue:44484|issue:45310", + "issue:44829|issue:45216", + "issue:42673|issue:43577", + "issue:44117|issue:45081", + "issue:42673|issue:43582", + "issue:38617|issue:44220", + "issue:42175|issue:42222", + "issue:44410|issue:45310", + "issue:43408|issue:43742" + ], + "prepared_review_unit_hash": "455512cbb1a82e031fb6bb52fe45225e8ef20b817d4254aa7e7f0ddcc99ae1c5", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12966, + "estimated_input_tokens": 3242, + "estimated_eval_tokens": 6740 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous and does not look like a single duplicate set. The only plausible duplicate pair is the two Qwen3.5 regression reports around save/load behavior in transformers>=5.4.0; the rest point to distinct subsystems and failure modes.", + "confidence": 0.74, + "canonical_issue_reason": "issue:45216 is the best issue anchor because it is the more specific regression report, names the version boundary, and directly describes the save_pretrained checkpoint corruption that the follow-up from_pretrained failure appears to stem from.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45216 is the strongest representative of the only apparent duplicate bug in the cluster and is more actionable than the broader follow-up report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they involve different missing symbols (`layer_type_validation` vs `AutoImageProcessor`) and different code paths." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "These are unrelated: one asks about shard-size defaults in `save_pretrained`, the other is a Qwen3.5 MoE load regression." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "Different bugs in different areas: flash-attention training behavior vs Qwen3.5 checkpoint serialization." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "One is a VRAM leak in Qwen3ForCausalLM with dataloader threads; the other is a dtype propagation issue in BLIP2 loading." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "`TOKENIZER_MAPPING_NAMES` returning None and a Mistral regex patch crash are separate tokenizer-loading failures." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "Different runtime problems: a Qwen3 VRAM leak vs an Apple Silicon TypeError in `caching_allocator_warmup`." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "ImportError for a missing config symbol is not the same underlying problem as `_torch_extract_fbank_features()` behavior." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Package/backend dependency omission and broken VitPose model files are unrelated." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "A missing projection layer in qwen3next is a model architecture bug, not the Qwen3.5 save/load regression." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "Model-type warning for sam3_tracker/sam3_video and a key error loading MobileLLM-125M are distinct model-loading issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 113, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42222", + "issue:42371", + "issue:42617", + "issue:43066", + "issue:43122", + "issue:43673", + "issue:43937", + "issue:44261", + "issue:44483", + "issue:44484", + "issue:44496", + "issue:44933", + "issue:44964", + "issue:45278", + "issue:45341", + "issue:45459", + "issue:45464", + "issue:45478" + ], + "soft_pairs": [ + "issue:42371|issue:44261", + "issue:43937|issue:44496", + "issue:44933|issue:45278", + "issue:44483|issue:45464", + "issue:42617|issue:45341", + "issue:43066|issue:43122", + "issue:44484|issue:45478", + "issue:44964|issue:45278", + "issue:42222|issue:45459", + "issue:43673|issue:45341" + ], + "prepared_review_unit_hash": "0cf0c9ed82cd7e60ca5b685be14b6a40d06d702ee95c25247bc8bc59d70bafe4", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13184, + "estimated_input_tokens": 3296, + "estimated_eval_tokens": 6848 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of separate Transformers v5 regression reports, not true duplicates. A few items are loosely similar within import/tokenization/chat-completions breakage, but the exact failure modes and code paths differ enough that none of the soft pairs should be merged.", + "confidence": 0.63, + "canonical_issue_reason": "No single canonical issue cleanly covers the whole cluster. If forced to pick a representative, issue 45278 is the broadest umbrella for the import-regression subset, but the cluster spans multiple unrelated bugs and breakages.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45278 is the best overall anchor because it is open, broad, and directly describes a widespread post-upgrade import problem. It is still not a true duplicate target for the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 settings guidance vs. MLA q_a_layernorm precision bug; different subsystems and root causes." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "Both involve v5 config/model loading, but one is invalid GenerationConfig and the other is an unrecognized model config; not the same bug." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "45278 is a broad import-error report, but 44933 is a specific missing image_utils import; related theme, not the same underlying issue." + }, + { + "left": "issue:44483", + "right": "issue:45464", + "accept": false, + "reason": "Both mention chat/completions, but one is a general v5.3 request rejection and the other is a streaming-specific Qwen3.5 failure." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "A runtime script failure and a testing_utils bug are unrelated code paths." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both are tokenizer-related v5 regressions, but one is decoder-type resolution and the other is changed tokenization output; not the same defect." + }, + { + "left": "issue:44484", + "right": "issue:45478", + "accept": false, + "reason": "save_pretrained shard-size behavior and Qwen3.5 MoE from_pretrained errors are unrelated." + }, + { + "left": "issue:44964", + "right": "issue:45278", + "accept": false, + "reason": "Phi-4 multimodal loading failure is model-specific; 45278 is a generic import-error report, so they are not duplicates." + }, + { + "left": "issue:42222", + "right": "issue:45459", + "accept": false, + "reason": "VitPose model breakage and protobuf-related tokenizer error masking are different failure modes." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "GenerationMixin cache missing during chunked_prefill is unrelated to a testing_utils bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 114, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:41628", + "issue:42222", + "issue:42994", + "issue:43012", + "issue:43097", + "issue:43408", + "issue:43441", + "issue:43874", + "issue:43986", + "issue:44261", + "issue:44263", + "issue:44410", + "issue:44484", + "issue:44485", + "issue:44493", + "issue:44829", + "issue:45071", + "issue:45478" + ], + "soft_pairs": [ + "issue:43097|issue:44493", + "issue:43874|issue:44261", + "issue:43986|issue:44484", + "issue:43441|issue:44829", + "issue:44410|issue:45478", + "issue:42222|issue:42994", + "issue:43012|issue:43408", + "issue:44263|issue:44485", + "issue:41628|issue:45071" + ], + "prepared_review_unit_hash": "f23fb8f8981b45537b4cfc123e4026de63d19701e7cb35be2383452f3a16de6a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13036, + "estimated_input_tokens": 3259, + "estimated_eval_tokens": 6774 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are thematically similar in being Transformers bug reports, but they do not describe the same underlying defect. They span unrelated areas like imports, model-specific architecture bugs, quantization/saving, attention backends, config regressions, and warning changes, so none of the soft pairs should be merged as duplicates.", + "confidence": 0.92, + "canonical_issue_reason": "No single issue is a good canonical representative: the cluster is too heterogeneous and the soft pairs only share broad subsystem similarity, not the same concrete bug.", + "canonical_pr_reason": null, + "best_issue_reason": "If one had to pick a broad representative, issue 43097 is the most regression-like and general, but it still does not subsume the other reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Different failures: one is about `tie_embeddings_and_encoder_decoder` removal in v5, the other about unexpected `position_id` keys across many models." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "Both are model-specific errors, but they involve different models and different root causes: missing image-processor method vs missing `rms_norm_eps` config." + }, + { + "left": "issue:43986", + "right": "issue:44484", + "accept": false, + "reason": "Unrelated problems: video AutoProcessor loading with missing torchvision vs `save_pretrained()` shard-size default behavior." + }, + { + "left": "issue:43441", + "right": "issue:44829", + "accept": false, + "reason": "Both mention FlashAttention 3, but the concrete bugs differ: one is a Ministral-3 inference failure, the other is degenerate training in sequence classification." + }, + { + "left": "issue:44410", + "right": "issue:45478", + "accept": false, + "reason": "Same model family, but different code paths and symptoms: missing projections in layer 0 vs a `from_pretrained` failure for Qwen3.5 MoE." + }, + { + "left": "issue:42222", + "right": "issue:42994", + "accept": false, + "reason": "Completely different issues: broken ViT-Pose models vs quantized model saving failure." + }, + { + "left": "issue:43012", + "right": "issue:43408", + "accept": false, + "reason": "Different concerns: a PyTorch warning during bfloat16 compilation vs a model-type mismatch warning for sam3_video/sam3_tracker." + }, + { + "left": "issue:44263", + "right": "issue:44485", + "accept": false, + "reason": "Same broad GLM area, but different bugs: `torch.split()` handling in MoE indexing vs RoPE implementation issues." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "Different API regressions: missing `AutoImageProcessor` import vs `PretrainedConfig` type-checking breakage." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 115, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:43097", + "issue:43335", + "issue:43408", + "issue:43452", + "issue:43874", + "issue:43931", + "issue:43986", + "issue:44038", + "issue:44117", + "issue:44263", + "issue:44368", + "issue:44410", + "issue:44987", + "issue:45003", + "issue:45071", + "issue:45341", + "issue:45372" + ], + "soft_pairs": [ + "issue:45003|issue:45341", + "issue:43097|issue:43452", + "issue:44117|issue:44987", + "issue:43986|issue:45341", + "issue:43931|issue:44410", + "issue:43874|issue:44263", + "issue:43335|issue:45071", + "issue:43408|issue:44368", + "issue:43931|issue:44038", + "issue:43986|issue:45372" + ], + "prepared_review_unit_hash": "12a8d8451cd326f89e5531a029fd4e01018210e7a32b924a1f0f1372e63973fc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12928, + "estimated_input_tokens": 3232, + "estimated_eval_tokens": 6720 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated bug reports across different models/subsystems. I do not see any soft pair that clearly describes the same underlying defect, so all soft edges should be rejected.", + "confidence": 0.44, + "canonical_issue_reason": "issue:45003 is the broadest core-library bug report here, centered on a generic modeling_utils failure path and likely to be the most reusable anchor for duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45003 is the best global representative because it targets a central code path, is broadly phrased, and has active discussion.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Different subsystems and failures: sys.modules access in modeling_utils versus a testing_utils bug." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "One is about a removed config option; the other is a gguf_file loading regression in tokenizer/model loading." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "Both involve from_pretrained-style loading, but the concrete bugs differ: tokenizer mapping None-handling vs a specific model load failure for physical-intelligence/fast." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "Video AutoProcessor without torchvision is a model-loading dependency issue, not the same as a testing utility bug." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Different Qwen3-VL/Qwen3next model-specific architecture problems with no clear shared code-path defect." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "An image-processor missing method causing multimodal token counting failure is unrelated to a torch.split return-value issue." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "One is a SwitchTransformers sparse-layer config bug; the other is a PretrainedConfig type-checking regression." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "A sam3_video/sam3_tracker config warning and a Qwen3.5 tie_word_embeddings warning are different model/config problems." + }, + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "Both mention Qwen3-VL, but one is a weight-shape mismatch and the other is a transformers 5.0 loading issue for Qwen3-VL-Moe." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "Video AutoProcessor torchvision dependency crash is unrelated to a mistral_common ReasoningEffort import error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 116, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:43064", + "issue:43531", + "issue:43976", + "issue:43986", + "issue:44062", + "issue:44117", + "issue:44261", + "issue:44315", + "issue:44351", + "issue:44464", + "issue:44485", + "issue:44704", + "issue:44829", + "issue:45071", + "issue:45103", + "issue:45245", + "issue:45341" + ], + "soft_pairs": [ + "issue:44829|issue:45245", + "issue:43064|issue:44315", + "issue:43976|issue:45071", + "issue:44464|issue:45071", + "issue:43531|issue:45341", + "issue:44704|issue:45103", + "issue:44261|issue:44485", + "issue:38617|issue:44351", + "issue:44062|issue:44117", + "issue:43986|issue:44117" + ], + "prepared_review_unit_hash": "3432e3bf5cf93f7ffc9d47c8cbaba0a39cf2cea97a57fc672a48af92c3395263", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13455, + "estimated_input_tokens": 3364, + "estimated_eval_tokens": 6984 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is noisy: these issues cover unrelated bugs across imports, training, processors, tokenizers, configs, and model-specific behavior. None of the soft pairs look like true duplicates or the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": "issue:43064 is the most concrete and actionable report in the set, with a specific multi-component training failure mode and clear rank-dependent behavior.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43064 is the strongest standalone issue to keep: it is detailed, reproducible, and describes a high-impact training correctness bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "Different bugs: flash_attention_3 training degeneration vs a category-count runtime limit. No shared code-path or fix." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "Both involve training, but one is optimizer-state corruption under FSDP2/PEFT and the other is Liger Kernel not being applied with model_init. Different mechanisms and fixes." + }, + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "Separate compatibility issues: Python 3.9/v5.1.0 failure vs v5.4.0 PretrainedConfig type-checking breakage. Not the same underlying bug." + }, + { + "left": "issue:44464", + "right": "issue:45071", + "accept": false, + "reason": "Chunked generation with compiled forward is unrelated to PretrainedConfig type checking. Different subsystem and failure mode." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to a testing_utils bug." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "AutoProcessor kwargs forwarding to cached_file is a processor loading bug; the other is an auto_docstring crash with future annotations. Distinct code paths." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "Different model-specific architecture concerns: missing rms_norm_eps in MLA q_a_layernorm vs GLM-5 RoPE implementation. Not mergeable as one fix." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing exports ('layer_type_validation' vs 'HybridCache') and likely different release regressions." + }, + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "One is AddedToken construction with duplicate 'special' kwarg; the other is tokenizer mapping returning None during from_pretrained. Different root causes." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "AutoProcessor video loading without torchvision and tokenizer mapping None are unrelated loading paths with different fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 117, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:38617", + "issue:43066", + "issue:43097", + "issue:43502", + "issue:43582", + "issue:43824", + "issue:43873", + "issue:43906", + "issue:43957", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44351", + "issue:44410", + "issue:44484", + "issue:45341", + "issue:45375" + ], + "soft_pairs": [ + "issue:44410|issue:45375", + "issue:43502|issue:43992", + "issue:43582|issue:45341", + "issue:38617|issue:43097", + "issue:44038|issue:44410", + "issue:43957|issue:44351", + "issue:43992|issue:44484", + "issue:43824|issue:43986", + "issue:43066|issue:43906", + "issue:43097|issue:43873" + ], + "prepared_review_unit_hash": "552f1152506ec9c20b864327721e37b816e9752c2b57b867ab48cad21ada4d5d", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12709, + "estimated_input_tokens": 3178, + "estimated_eval_tokens": 6612 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the soft pairs share broad Transformers/v5 or Qwen-themed wording, but each describes a different concrete failure mode or code path. None look like safe duplicate merges.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Both are Qwen3-related, but one reports missing attention projections in layer 0 while the other reports a missing config field dropped by strict parsing. Different underlying bugs." + }, + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "One is about unwanted network requests with local_files_only=True; the other is missing embed_tokens.weight when loading UMT5Encoder. Different load-time failures." + }, + { + "left": "issue:43582", + "right": "issue:45341", + "accept": false, + "reason": "Apple Silicon TypeError in caching_allocator_warmup is unrelated to a testing_utils bug. No shared code path." + }, + { + "left": "issue:38617", + "right": "issue:43097", + "accept": false, + "reason": "Both mention Transformers v5 API breakage, but the missing import of layer_type_validation and removal of tie_embeddings_and_encoder_decoder are distinct API changes." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "Both involve Qwen model support, but the reported symptoms differ: a general Qwen3-VL-Moe bug versus missing Qwen3Next projections. Not the same concrete fix." + }, + { + "left": "issue:43957", + "right": "issue:44351", + "accept": false, + "reason": "Meta-device loading failures and missing HybridCache import are unrelated failures." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "Loading missing weights versus save_pretrained shard-size behavior are different mechanisms and different code paths." + }, + { + "left": "issue:43824", + "right": "issue:43986", + "accept": false, + "reason": "An import error for Qwen2_5_VLForConditionalGeneration and a crash in AutoProcessor without torchvision are separate issues." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "The titles suggest different bugs; one is a tokenizer decoder type regression, the other is a reproduction of another issue with no clear shared failure mode." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "Removal of tie_embeddings_and_encoder_decoder and offloading problems with quantization are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 118, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42886", + "issue:42907", + "issue:43012", + "issue:43097", + "issue:43208", + "issue:43352", + "issue:43381", + "issue:43525", + "issue:43550", + "issue:43825", + "issue:43901", + "issue:43937", + "issue:44117", + "issue:44368", + "issue:44380", + "issue:44655", + "issue:44792", + "issue:45479" + ], + "soft_pairs": [ + "issue:43352|issue:44380", + "issue:43825|issue:44655", + "issue:43097|issue:43901", + "issue:43012|issue:43550", + "issue:43012|issue:44368", + "issue:42886|issue:43381", + "issue:43208|issue:43550", + "issue:43937|issue:44792", + "issue:44380|issue:45479", + "issue:42907|issue:44655", + "issue:43525|issue:44117" + ], + "prepared_review_unit_hash": "0dddcd192daadb0bd172fd4cc1d1cef74af2a82156cdffaacbb66e1c3425cd45", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13716, + "estimated_input_tokens": 3429, + "estimated_eval_tokens": 7114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates of one another; they span unrelated tokenizer, pipeline, config, attention, saving, and model-specific bugs. None of the soft pairs looks like the same underlying change or fix.", + "confidence": 0.97, + "canonical_issue_reason": "No suitable canonical issue: the cluster is heterogeneous and does not represent one underlying bug or feature request.", + "canonical_pr_reason": null, + "best_issue_reason": "No clear global best issue: the items are too unrelated for one issue to serve as a representative duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "Both mention attention backends, but one is a model-specific FlashAttention support error and the other is GPT-2 attention scaling behavior; different code paths and fixes." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "One is about a misleading pipeline() error message, the other about save_pretrained for Pipeline objects; unrelated behavior." + }, + { + "left": "issue:43097", + "right": "issue:43901", + "accept": false, + "reason": "Both touch docs/API messaging, but they concern different deprecated behaviors in different pipelines and are not the same bug." + }, + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "A PyTorch warning during bfloat16 compilation is not the same as a torch.compile failure on Bamba with SDPA." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "These are distinct warning reports affecting different models and triggers; not the same underlying issue." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Tokenizer offline cache loading and gradient checkpointing in eval mode are unrelated failures." + }, + { + "left": "issue:43208", + "right": "issue:43550", + "accept": false, + "reason": "xLSTM training bugs and Bamba torch.compile/SDPA failure are separate model-specific problems." + }, + { + "left": "issue:43937", + "right": "issue:44792", + "accept": false, + "reason": "Invalid GenerationConfig for GLM-5 and a Janus image-generation test failure are different subsystems and symptoms." + }, + { + "left": "issue:44380", + "right": "issue:45479", + "accept": false, + "reason": "GPT-2 attention scaling under SDPA/FlashAttention is unrelated to the sequence-classification zero-loss bug." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Saving dequantized models and saving Pipeline objects both mention save behavior, but they are different save paths with different fixes." + }, + { + "left": "issue:43525", + "right": "issue:44117", + "accept": false, + "reason": "A missing Llama4Config attribute and a tokenizer mapping None-assumption bug are different initialization issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 119, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:38617", + "issue:43012", + "issue:43064", + "issue:43208", + "issue:43352", + "issue:43502", + "issue:43526", + "issue:43582", + "issue:43618", + "issue:43761", + "issue:43976", + "issue:44380", + "issue:44483", + "issue:44514", + "issue:44683", + "issue:45290" + ], + "soft_pairs": [ + "issue:43352|issue:43976", + "issue:43064|issue:44380", + "issue:43208|issue:43526", + "issue:43618|issue:44380", + "issue:44483|issue:44514", + "issue:36683|issue:43582", + "issue:43618|issue:43761", + "issue:43012|issue:44683", + "issue:38617|issue:43502", + "issue:44483|issue:45290" + ], + "prepared_review_unit_hash": "1db2210f17bc0fd987faeeb5ab386a6df94c877ff9f25a56c33dcb59fb254f4b", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12884, + "estimated_input_tokens": 3221, + "estimated_eval_tokens": 6698 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated models, APIs, runtime errors, and backend-specific regressions. I do not see a duplicate set or a single canonical representative.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "Different problems: Flash Attention support for Nemotron vs a Python/version import-compatibility issue." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "Distinct code paths: FSDP2/PEFT optimizer-state corruption vs GPT-2 attention scaling being ignored in SDPA/FlashAttention." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "Unrelated components and symptoms: xLSTM training bugs vs BeitImageProcessorFast label reduction behavior." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "Both mention attention, but one is CLIP output fields missing and the other is GPT-2 backend-specific attention scaling; not the same bug." + }, + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "Both are chat/template-related, but one is an API request rejection and the other is a processor crash on batched input with padding disabled." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "Completely different areas: Gemma3 config attribute error vs Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": false, + "reason": "Related CLIP family, but one bug is missing attentions and the other is hidden_states not returned; these are separate output regressions." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "Both involve attention/precision context, but one is a PyTorch warning from bfloat16 compilation and the other is a compiled flex_attention failure on newer torch." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "Different failure modes: missing import from configuration_utils vs unexpected network requests despite local_files_only=True." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Both concern apply_chat_template, but the failures are different: request handling in /v1/chat/completions vs tool-call assistant messages with no content." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 120, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:42994", + "issue:43066", + "issue:43299", + "issue:43352", + "issue:43475", + "issue:43756", + "issue:43867", + "issue:43901", + "issue:43937", + "issue:44038", + "issue:44220", + "issue:44230", + "issue:44483", + "issue:44485", + "issue:44655", + "issue:45216", + "issue:45381" + ], + "soft_pairs": [ + "issue:44038|issue:44230", + "issue:44483|issue:45381", + "issue:43299|issue:43867", + "issue:43756|issue:44485", + "issue:43901|issue:44655", + "issue:43867|issue:45216", + "issue:42994|issue:44655", + "issue:36683|issue:43475", + "issue:43352|issue:43937", + "issue:44220|issue:44655", + "issue:43066|issue:45381" + ], + "prepared_review_unit_hash": "ef01d4af9a076acecd33fdb0b182fd8ba269c4ee4a4cd7bdbdb3510d5c11172e", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13443, + "estimated_input_tokens": 3361, + "estimated_eval_tokens": 6978 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes unrelated bugs around model loading/saving, RoPE, tokenizer/docs, flash-attn support, and API validation. The soft links are mostly superficial (shared model families or shared serialization terminology), not the same underlying bug or change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "Both mention Qwen3-VL-Moe/fp8, but one is a broad Transformers 5 breakage/loading report and the other is a specific fp8 inference support request; different underlying problems." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "One is chat/completions request rejection in v5.3, the other is a qwen2.5-vl video position-id bug; different code paths and symptoms." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Both are model-loading failures, but one is Qwen3-VL-Moe regression and the other is state_dict ordering causing load errors; not the same bug." + }, + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Both reference RoPE, but one is a Smollm3 layer-count mismatch and the other is a GLM-5 RoPE implementation discussion; not mergeable as one fix." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Docs mentioning return_all_scores is a documentation mismatch, while the pipeline issue is a save_pretrained serialization failure; unrelated." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "Both involve checkpoint/load/save behavior, but one is sorted state_dict loading and the other is Qwen3.5 save_pretrained producing a bad checkpoint; different failure modes." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Quantized model saving and pipeline object saving are separate serialization paths; same broad theme, but not the same concrete bug." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Both are attribute errors on model-related objects, but they concern different missing fields on different model families." + }, + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "Flash Attention 2.0 unsupported is a capability/runtime guard, while GenerationConfig invalid is a config-validation issue; unrelated." + }, + { + "left": "issue:44220", + "right": "issue:44655", + "accept": false, + "reason": "Audio feature extraction and pipeline saving are unrelated code paths; no evidence of the same defect." + }, + { + "left": "issue:43066", + "right": "issue:45381", + "accept": false, + "reason": "Tokenizer decoder-type documentation/regression and qwen2.5-vl video vision_position_ids are different issues with different fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 121, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:43010", + "issue:43064", + "issue:43525", + "issue:43618", + "issue:43976", + "issue:43992", + "issue:44077", + "issue:44188", + "issue:44242", + "issue:44263", + "issue:44380", + "issue:44492", + "issue:44938", + "issue:45216", + "issue:45278", + "issue:45310", + "issue:45440" + ], + "soft_pairs": [ + "issue:43064|issue:44492", + "issue:44077|issue:44380", + "issue:36683|issue:44077", + "issue:44263|issue:45440", + "issue:43976|issue:44938", + "issue:36683|issue:43525", + "issue:44188|issue:44380", + "issue:43992|issue:45310", + "issue:43618|issue:45216", + "issue:36683|issue:44242", + "issue:43010|issue:45278" + ], + "prepared_review_unit_hash": "f1c0781a821c537cdbf52a73173ce8202ea13717b925a974d67bf79c72b1a4bb", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13575, + "estimated_input_tokens": 3394, + "estimated_eval_tokens": 7044 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated issues spanning different models, loading paths, attention backends, platform compatibility, and one training/FSDP bug. No PRs are present, and all soft-similarity pairs should be rejected as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No clear canonical issue: the set does not form one duplicate cluster, and the reports describe distinct failures in different code paths and model families.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good global representative. The closest broad report is still too general and does not subsume the other bugs without conflating unrelated problems.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "Completely different bugs: FSDP/PEFT optimizer-state corruption vs a typo in cache strategies." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "Different concrete problems: optional post_init handling vs GPT2 attention scaling being ignored under SDPA/FlashAttention." + }, + { + "left": "issue:36683", + "right": "issue:44077", + "accept": false, + "reason": "Different failures and code paths: missing Gemma3Config.vocab_size vs patchtsmixer post_init policy." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "Unrelated: a torch.split return-value issue in an indexer vs DeepseekV3MoE behavioral divergence." + }, + { + "left": "issue:43976", + "right": "issue:44938", + "accept": false, + "reason": "Both are runtime compatibility reports, but they are not the same bug: Python version support failure vs Python 3.14 import/load failure." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Similar symptom class (missing config attribute), but different model attributes and fixes: Gemma3 vocab_size vs Llama4 pad_token_id." + }, + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Both touch attention behavior, but one is a torch.compile branching divergence and the other is backend-specific scaling config handling." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "Both concern model loading, but they affect different model families and failure modes; not the same underlying bug." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "Different model/output regressions: CLIPOutput attentions assignment vs Qwen3.5 save_pretrained checkpoint corruption." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "Gemma3Config missing vocab_size is unrelated to MoE load-balancing loss being skipped when output_router_logits=False." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "Generic similarity only: a no_grad decoration request is unrelated to broad import errors after upgrading Transformers." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 122, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36683", + "issue:41669", + "issue:43010", + "issue:43064", + "issue:43352", + "issue:43867", + "issue:43931", + "issue:43976", + "issue:43992", + "issue:44246", + "issue:44360", + "issue:44938", + "issue:45071", + "issue:45357", + "issue:45375", + "issue:45381", + "issue:45440", + "issue:45478" + ], + "soft_pairs": [ + "issue:41669|issue:44246", + "issue:36683|issue:43352", + "issue:43352|issue:43931", + "issue:43992|issue:45478", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:44938|issue:45071", + "issue:43867|issue:45357", + "issue:43010|issue:43976", + "issue:44938|issue:45381", + "issue:44360|issue:45440" + ], + "prepared_review_unit_hash": "abc9509323b6ef70af94ef79c459d1c5f5265f605200dbb09c60f43f883d4c8a", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13577, + "estimated_input_tokens": 3395, + "estimated_eval_tokens": 7046 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a single duplicate set; the soft pairs are mostly false positives across unrelated bugs, regressions, and performance issues. Several pairs only share a broad subsystem or wording, not the same concrete failure mode.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "Both are about import performance, but one is specifically removing `import *` from models while the other is an intermittent slow `import transformers` complaint. No concrete same fix path is evident." + }, + { + "left": "issue:36683", + "right": "issue:43352", + "accept": false, + "reason": "Different model/runtime failures: Gemma3 config missing `vocab_size` vs NemotronHForCausalLM rejecting Flash Attention 2.0. Not the same bug." + }, + { + "left": "issue:43352", + "right": "issue:43931", + "accept": false, + "reason": "Unrelated model-specific loading/support issues: Flash Attention 2.0 support gating vs Qwen3-VL weight shape mismatch." + }, + { + "left": "issue:43992", + "right": "issue:45478", + "accept": false, + "reason": "Both mention Qwen3.5/MoE loading, but one is a missing `embed_tokens.weight` in UMT5Encoder and the other is a `from_pretrained` error for Qwen3.5 MoE. Too different to treat as the same bug." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "Both concern loading/configuration, but one is a state_dict ordering load error and the other is a missing `deepstack_visual_indexes` config field. Different root causes." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "No overlap beyond general training/model code: Gemma3 config attribute error vs FSDP2/PEFT optimizer-state corruption. Different code paths and symptoms." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Both are Python 3.14 / v5-related compatibility problems, but one is import/load failure and the other is `PretrainedConfig` type-checking regression. Not the same change." + }, + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "One is a generic state_dict load failure; the other is a Qwen3.5 save_pretrained visual-encoder key regression. Different direction and failure point." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "`@torch.no_grad` on cache/layer update methods is unrelated to the Python 3.9+/5.1.0 compatibility failure. No shared bug." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Different versioned Python/Qwen-VL issues: general Python 3.14 load failure vs Qwen2.5-VL video `vision_position_ids` mismatch." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "The DSA indexer discussion is unrelated to DeepseekV3MoE divergence from remote implementation." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 123, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:28282", + "issue:33453", + "issue:36296", + "issue:42890", + "issue:43502", + "issue:43575", + "issue:43618", + "issue:43644", + "issue:43867", + "issue:44062", + "issue:44077", + "issue:44112", + "issue:44484", + "issue:44857", + "issue:44938", + "issue:44964", + "issue:44977" + ], + "soft_pairs": [ + "issue:43618|issue:44077", + "issue:43644|issue:44938", + "issue:36296|issue:43575", + "issue:43867|issue:44484", + "issue:42890|issue:44112", + "issue:42890|issue:44964", + "issue:43618|issue:44857", + "issue:28282|issue:33453", + "issue:43618|issue:44977", + "issue:43502|issue:44062" + ], + "prepared_review_unit_hash": "c3bfe4103107eeb8be315646ccf7e06c203fd2dd0859e24315d4cb839e7e34de", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12610, + "estimated_input_tokens": 3153, + "estimated_eval_tokens": 6562 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the issues cover unrelated bugs in model loading, tensor parallelism, CI flakiness, buffer initialization, tokenizer construction, and config/runtime behavior. The shared wording around \"load\" or \"bug\" is superficial; there is no strong duplicate group here.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out because the items describe different concrete failures in different code paths and models.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick the most self-contained report, issue 43502 is a clear reproducible bug description, but it is not representative of the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "Different problems: CLIPOutput attentions missing vs PatchTSMixer post_init allowance. No shared code-path bug." + }, + { + "left": "issue:43644", + "right": "issue:44938", + "accept": false, + "reason": "One is buffer initialization junk in Transformers 5.0.0; the other is a Python 3.14 import/load failure. Unrelated." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Both mention tensor parallelism, but one is a generic training bug and the other is an OOM when loading a specific Qwen model. Not the same bug." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "State-dict loading error and a question about save_pretrained shard size are different behaviors and code paths." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Both are flaky tests, but for different models and causes; not the same underlying failure." + }, + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Test seed flakiness vs model loading failure for Phi-4 multimodal. Different issues." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "CLIPOutput attentions regression and LwDetrImageLoss AMP/CUDA crash are unrelated." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "PyTorch missing ImportError vs tokenizer-loading regression. Different subsystems and failure modes." + }, + { + "left": "issue:43618", + "right": "issue:44977", + "accept": false, + "reason": "Missing CLIPOutput attentions is unrelated to Qwen3.5 generation problems with flash-attention." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "Local-files-only API leakage and AddedToken keyword-argument error are unrelated bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 124, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42548", + "issue:42890", + "issue:43010", + "issue:43065", + "issue:43232", + "issue:43723", + "issue:44075", + "issue:44222", + "issue:44351", + "issue:44355", + "issue:44387", + "issue:44556", + "issue:44749", + "issue:45125", + "issue:45341", + "issue:45375", + "issue:45478" + ], + "soft_pairs": [ + "issue:43010|issue:44075", + "issue:42890|issue:45341", + "issue:43723|issue:44556", + "issue:42548|issue:44351", + "issue:44387|issue:44556", + "issue:43010|issue:43065", + "issue:43010|issue:43232", + "issue:44222|issue:45125", + "issue:44355|issue:44749", + "issue:45125|issue:45478", + "issue:45125|issue:45375" + ], + "prepared_review_unit_hash": "f766ccf5eababff706548df5c6cd1782d7f07389ffed24e7090e643593dcf589", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12995, + "estimated_input_tokens": 3249, + "estimated_eval_tokens": 6754 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it contains several small thematic groups (Qwen3.5 MoE config/tensor-parallel issues, v5 loading/reload regressions, import/export failures, and unrelated test/cache/generation bugs), but the soft pairs are not close enough to be true duplicates.", + "confidence": 0.78, + "canonical_issue_reason": "issue:45478 is the strongest representative of a concrete, user-facing regression: a specific from_pretrained failure for Qwen3.5.5 MoE on transformers>=5.4.0, with a clear runtime symptom and current relevance.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45478 is the best global issue candidate because it is specific, reproducible, and broad enough to matter to affected users, while the other issues are either narrower, older migration problems, or clearly different defects.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "Both mention model/optimizer behavior, but one is about cache/layer update under no_grad and the other is about SGD args being ignored; different code paths and fixes." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "These are unrelated test flakiness reports in different utilities/models; no shared bug mechanism is evident." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": false, + "reason": "Both are v5 loading-related, but one is tokenizer import/loading and the other is checkpoint reload after upgrading versions; related theme, but not the same concrete failure." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different symbols (PreTrainedModel vs HybridCache) and likely different missing exports or package issues." + }, + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "One is an int4 quantization memory/OOM regression, the other is a model reload compatibility issue; different symptoms and fixes." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "Cache update no_grad and Sam3PixelDecoder dummy Conv2d are unrelated implementation concerns." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "Generation kwargs syncing and cache update decoration are different generation internals, but not the same bug." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "FP8 save_pretrained for MoE and missing _tp_plan for tensor parallelism affect different features and failure modes." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Both concern a slowdown/behavior change after upgrading versions, but one is compiled Python file errors and the other is data filtering becoming much slower; distinct issues." + }, + { + "left": "issue:45125", + "right": "issue:45478", + "accept": false, + "reason": "Both involve Qwen3.5 MoE, but one is missing tensor-parallel planning and the other is a from_pretrained error in newer transformers; not the same concrete bug." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Missing _tp_plan and missing deepstack_visual_indexes are different config/model metadata omissions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 125, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:30990", + "issue:31795", + "issue:36296", + "issue:38175", + "issue:42890", + "issue:43010", + "issue:43299", + "issue:43502", + "issue:44075", + "issue:44315", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44373", + "issue:44485", + "issue:44556", + "issue:44655", + "issue:44961" + ], + "soft_pairs": [ + "issue:30990|issue:31795", + "issue:43502|issue:44485", + "issue:44315|issue:44360", + "issue:36296|issue:44075", + "issue:43010|issue:44368", + "issue:38175|issue:43502", + "issue:42890|issue:44373", + "issue:43299|issue:44556", + "issue:43502|issue:44961", + "issue:44355|issue:44655" + ], + "prepared_review_unit_hash": "810fee4be05e9bd413beb97dc672a6d77e4ed201a36c5d76fb6c4ba3592f9eb0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13169, + "estimated_input_tokens": 3293, + "estimated_eval_tokens": 6842 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are largely unrelated issues spanning docs, model loading, training, tests, and optimization. No soft pair looks like the same underlying bug, so none should be merged as duplicates. If a single issue must be chosen as the best representative, the clearest concrete user-facing bug is #43502.", + "confidence": 0.96, + "canonical_issue_reason": "#43502 is the most concrete and reproducible runtime bug in the set; the rest are mostly unrelated or narrower follow-ups.", + "canonical_pr_reason": null, + "best_issue_reason": "#43502 describes a specific, actionable failure mode with clear user impact ('local_files_only=True' still triggering network access).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Different problems: model loading hangs vs confusing forward() docs for inputs/caching." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Different subsystems and symptoms: offline file loading/network requests vs GLM-5 RoPE implementation discussion." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Unrelated bugs: Liger kernel not applied with model_init vs DSA indexer ReLU discussion." + }, + { + "left": "issue:36296", + "right": "issue:44075", + "accept": false, + "reason": "Different training/optimization issues: tensor parallel bug vs SGD arguments not being used." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "Different concerns: no_grad decoration for cache/layer updates vs a tie_word_embeddings warning during LoRA fine-tuning." + }, + { + "left": "issue:38175", + "right": "issue:43502", + "accept": false, + "reason": "Different model behavior: zero probabilities in SigLIP2 output vs offline loading still making API requests." + }, + { + "left": "issue:42890", + "right": "issue:44373", + "accept": false, + "reason": "Different scope: flaky integration test due to missing seeds vs a documentation typo for position_ids." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Both are loading/version-related, but the concrete failures differ: Qwen3VL MoE load breakage vs checkpoint reload incompatibility across versions." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "No substantive similarity; 'racoon' is not evidence of the same bug as offline network requests." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Different failure modes: compiled Python file errors vs inability to save Pipeline objects." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 126, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38617", + "issue:42831", + "issue:42890", + "issue:43232", + "issue:43576", + "issue:43673", + "issue:43824", + "issue:43827", + "issue:43874", + "issue:44222", + "issue:44230", + "issue:44355", + "issue:44492", + "issue:44961", + "issue:45071" + ], + "soft_pairs": [ + "issue:44222|issue:44230", + "issue:44355|issue:45071", + "issue:42831|issue:43874", + "issue:42890|issue:43232", + "issue:33453|issue:34689", + "issue:33453|issue:36296", + "issue:43827|issue:44961", + "issue:43576|issue:45071", + "issue:43576|issue:43824", + "issue:43673|issue:44492", + "issue:38617|issue:43576" + ], + "prepared_review_unit_hash": "f5a87a3c372b1121e6940665d64bcec4d9cf63706e0266d57149f6faa67a2277", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13420, + "estimated_input_tokens": 3355, + "estimated_eval_tokens": 6966 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose semantic grouping of unrelated Transformers issues (v5 regressions, loading/import failures, generation/cache bugs, FP8, docs, tests). None of the soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.23, + "canonical_issue_reason": "Issue 43576 is the closest thing to a thematic anchor because it is a broad v5 breakage report and has the most cluster-like linkage, but the overall set is too heterogeneous to treat as a true duplicate family.", + "canonical_pr_reason": null, + "best_issue_reason": "43576 is the best available representative only by breadth and centrality, not because it matches the other reports; the remaining issues each point to different code paths or failure modes.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "Both mention FP8/MoE, but one is about save_pretrained and the other about inference support; different behaviors and likely different fixes." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "Compiled Python file errors and PretrainedConfig type-checking breakage are unrelated problems." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "Accuracy regression in FineGrainedFP8 is not the same as a missing method causing an AttributeError in GLM46V image token counting." + }, + { + "left": "issue:42890", + "right": "issue:43232", + "accept": false, + "reason": "A flaky integration test due to missing set_seed is separate from a generation kwargs bug after sync_gpus." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Both are loading-related, but tokenizer loading regression and a specific model-loading failure are different failure modes." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading regression and tensor-parallel training bug do not look like the same defect." + }, + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Docs referencing removed pipeline() is unrelated to the racoon issue." + }, + { + "left": "issue:43576", + "right": "issue:45071", + "accept": false, + "reason": "Both are v5 regressions, but one is an env CLI command issue and the other is a type-checking breakage in PretrainedConfig." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "Broken env command and a missing model import are separate v5 breakages in different code paths." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "Generation cache missing during chunked prefill is unrelated to a typo in cache strategy docs/code." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "An import error from configuration_utils is not the same as the broken env command." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 127, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31795", + "issue:33357", + "issue:33453", + "issue:36296", + "issue:38617", + "issue:41628", + "issue:42548", + "issue:43526", + "issue:43576", + "issue:43901", + "issue:44273", + "issue:44351", + "issue:44485", + "issue:44908", + "issue:44938", + "issue:44961", + "issue:45003", + "issue:45468" + ], + "soft_pairs": [ + "issue:33357|issue:33453", + "issue:41628|issue:42548", + "issue:44961|issue:45468", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:43576|issue:44938", + "issue:44485|issue:44961", + "issue:44273|issue:45003", + "issue:36296|issue:43901", + "issue:36296|issue:43526" + ], + "prepared_review_unit_hash": "db88692f20a49d561f0739affc22e457a50cf8ff6b6615504ebeaa044057e72c", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13329, + "estimated_input_tokens": 3333, + "estimated_eval_tokens": 6922 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a grab bag of unrelated issues: import errors, docs regressions, scheduler behavior, model-specific bugs, and lazy-loading problems. None of the soft-edge pairs look like the same underlying bug or change, so I rejected all of them. If a single anchor is needed, issue #45003 is the most concrete standalone bug report in the set.", + "confidence": 0.95, + "canonical_issue_reason": "Issue #45003 is the most concrete, actionable bug report here: it names a specific unsafe sys.modules access path and describes a clear runtime failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45003 is the best representative issue in this set because it is specific, technical, and actionable, unlike several broader or documentation-only reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33357", + "right": "issue:33453", + "accept": false, + "reason": "MacOS bus error in a CLIP model load is not the same bug as a tokenizer-loading regression; the failure modes and likely code paths differ." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "Both are import errors, but for different symbols with different causes; this is too broad to treat as the same underlying issue." + }, + { + "left": "issue:44961", + "right": "issue:45468", + "accept": false, + "reason": "'racoon' is unrelated to Gemma-4 audio positional encoding; no shared code path or failure signature is evident." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Documentation confusion about forward arguments is unrelated to an ImportError for layer_type_validation." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "A broken env CLI command and a HybridCache import error are different problems affecting different surfaces." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Scheduler kwargs being ignored is unrelated to the 'racoon' issue; there is no evidence of a shared bug." + }, + { + "left": "issue:43576", + "right": "issue:44938", + "accept": false, + "reason": "A v5 env command regression and a Python 3.14 load failure are different failures, not one concrete bug." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "GLM-5 RoPE implementation discussion does not match the unrelated 'racoon' issue." + }, + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Both mention loading/lazy behavior, but one is a broad lazy-loading report and the other is a specific sys.modules access bug; not the same underlying issue." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "A tensor-parallel training bug is unrelated to a docs/behavior mismatch in TextClassificationPipeline." + }, + { + "left": "issue:36296", + "right": "issue:43526", + "accept": false, + "reason": "Tensor parallel training and BeitImageProcessorFast label reduction are different subsystems and different bug types." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 128, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:36296", + "issue:41628", + "issue:41669", + "issue:43012", + "issue:43576", + "issue:43673", + "issue:44038", + "issue:44261", + "issue:44355", + "issue:44360", + "issue:44393", + "issue:44556", + "issue:44623", + "issue:45071", + "issue:45092", + "issue:45125", + "issue:45230" + ], + "soft_pairs": [ + "issue:41628|issue:44355", + "issue:36296|issue:43012", + "issue:44623|issue:45230", + "issue:44261|issue:44360", + "issue:43576|issue:44556", + "issue:44393|issue:45071", + "issue:41669|issue:43673", + "issue:44038|issue:45125", + "issue:44556|issue:45092" + ], + "prepared_review_unit_hash": "643abf8bdf78d0808345718941c351412ecf2514c861503511964270023ae973", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12216, + "estimated_input_tokens": 3054, + "estimated_eval_tokens": 6364 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items cover unrelated bugs in import/export, compilation, config typing, generation, tensor parallelism, and model-specific compatibility. None of the soft pairs look like the same underlying issue, so there is no strong duplicate canonicalization here.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:44623 is the clearest, most actionable report in the set: it describes a specific save_pretrained() missing-files failure with a concrete symptom, even though it does not unify the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "Different failure modes: AutoImageProcessor import failure vs errors running compiled Python files; no shared code path or fix." + }, + { + "left": "issue:36296", + "right": "issue:43012", + "accept": false, + "reason": "Tensor-parallel training bug and bf16 precision warning are unrelated symptoms in different parts of the stack." + }, + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "44623 is a specific processor.save_pretrained file-loss bug; 45230 is a generic, underspecified bug report with no evidence of the same issue." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "MLA q_a_layernorm rms_norm_eps precision issue and DSA indexer missing ReLU are distinct model/kernel bugs." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "transformers env CLI breakage and checkpoint reload incompatibility after upgrade are different user-facing problems." + }, + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Qwen3-VL 2D bounding-box output issues are model-specific generation problems, while PretrainedConfig type checking is a separate API/type-system regression." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "Import-* cleanup in models and missing GenerationMixin cache in chunked_prefill are not the same bug and would not be merged as one fix." + }, + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Both mention Qwen3 MoE, but one is a general v5 bug and the other is a missing _tp_plan for tensor parallelism; different concrete code-path failures." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both concern v5 compatibility, but one is checkpoint reload after upgrade and the other is remote-code/meta-init incompatibility for InternVL2; too different to be duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 129, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42371", + "issue:42890", + "issue:43352", + "issue:43408", + "issue:43576", + "issue:43976", + "issue:44246", + "issue:44273", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44393", + "issue:44492", + "issue:44704", + "issue:44908", + "issue:45071", + "issue:45230" + ], + "soft_pairs": [ + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44704|issue:44908", + "issue:44246|issue:44273", + "issue:44297|issue:45230", + "issue:43408|issue:44315", + "issue:43576|issue:44908", + "issue:43352|issue:44393", + "issue:43976|issue:44393", + "issue:44295|issue:45230", + "issue:44908|issue:45071" + ], + "prepared_review_unit_hash": "e64fdc118c70d4f58579559ed04d5cc0533116fc97f677a4125ed8917966b9a0", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12906, + "estimated_input_tokens": 3227, + "estimated_eval_tokens": 6710 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated issue reports; there are no PRs. The only plausible duplicate pair is the lazy-loading/import regression pair (#44246, #44273).", + "confidence": 0.89, + "canonical_issue_reason": "#44273 is the most direct and specific report of the shared lazy-loading problem; #44246 reads like a symptom of the same import/lazy-loading regression.", + "canonical_pr_reason": null, + "best_issue_reason": "#44273 is the best representative issue because it names the underlying mechanism (lazy loading) rather than just the downstream symptom (slow import).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 settings and a cache-strategy typo are unrelated topics with no shared bug path." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Missing set_seed in integration tests is unrelated to scheduler kwargs handling." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "Both involve ignored kwargs, but in different code paths and components; not the same bug." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": true, + "reason": "Both point to the same lazy-loading/import regression: one reports slow imports, the other says lazy loading is not working properly." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "Tokenizer save_pretrained metadata mismatch is unrelated to an unspecified generic bug report." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "SAM tracker/model-type warning and Liger Kernel application in model_init are separate issues." + }, + { + "left": "issue:43576", + "right": "issue:44908", + "accept": false, + "reason": "A broken env command in v5 and a scheduler kwarg bug are different features with different failure modes." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash Attention support for Nemotron and Qwen3-VL 2D bbox errors are unrelated model-specific bugs." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility and Qwen3-VL output errors are unrelated." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "Position_ids buffer access error is too specific and unrelated to an unspecified generic bug report." + }, + { + "left": "issue:44908", + "right": "issue:45071", + "accept": false, + "reason": "Scheduler kwargs being ignored and PretrainedConfig type-checking regressions are different problems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 130, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31795", + "issue:33453", + "issue:41669", + "issue:43723", + "issue:43827", + "issue:44077", + "issue:44315", + "issue:44351", + "issue:44393", + "issue:44492", + "issue:44655", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:45341", + "issue:45478" + ], + "soft_pairs": [ + "issue:41669|issue:44351", + "issue:41669|issue:45341", + "issue:44315|issue:44829", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:44393|issue:45478", + "issue:44077|issue:44861", + "issue:31795|issue:33453", + "issue:43827|issue:44908", + "issue:41669|issue:44908", + "issue:43723|issue:44393", + "issue:44655|issue:44861" + ], + "prepared_review_unit_hash": "aafcbf85e8a880f2b557e502c1b7e5b1c2b4c0b13f03717cd06ecf5d0e4ddd55", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 12, + "serialized_chars": 13239, + "estimated_input_tokens": 3310, + "estimated_eval_tokens": 6876 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a mix of unrelated issues: docs, tokenizer/model loading, v5 migration fallout, cache/type errors, training bugs, and model-specific failures. I do not see a single underlying bug or change linking them as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No exact canonical issue: the items span several distinct problems rather than one shared defect.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 41669 is the closest cluster hub by discussion/activity and broad loader/import-related scope, but it is still not a true representative for the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41669", + "right": "issue:44351", + "accept": false, + "reason": "Import cleanup/perf regression and a HybridCache import failure are different problems with different failure modes." + }, + { + "left": "issue:41669", + "right": "issue:45341", + "accept": false, + "reason": "A model import/perf issue and a testing_utils bug are unrelated." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Both involve model creation/training, but one is about Liger Kernel application with model_init and the other is a flash_attention_3 training degeneration bug; not the same code-path failure." + }, + { + "left": "issue:44492", + "right": "issue:44945", + "accept": false, + "reason": "A cache-strategy typo and incorrect pipeline-parallel output are unrelated." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import-* cleanup and a cache-strategy typo are not the same underlying issue." + }, + { + "left": "issue:44393", + "right": "issue:45478", + "accept": false, + "reason": "Both are Qwen-related, but one is a vision/VL bounding-box output problem and the other is a from_pretrained failure in Qwen3.5 MoE; different bugs." + }, + { + "left": "issue:44077", + "right": "issue:44861", + "accept": false, + "reason": "Optional post_init validation for patchtsmixer and an AttributeError in tied-weight key handling are unrelated." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Documentation confusion around forward args is not a duplicate of a tokenizer loading regression." + }, + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Pipeline-removal documentation drift and inverse_sqrt scheduler kwargs handling are different issues." + }, + { + "left": "issue:41669", + "right": "issue:44908", + "accept": false, + "reason": "A model import/perf issue does not match a scheduler kwargs bug." + }, + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3-VL output errors affect different subsystems and behaviors." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Saving Pipeline objects and tied-weight-key attribute handling are separate code paths and bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 131, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31795", + "issue:34689", + "issue:37428", + "issue:41669", + "issue:42371", + "issue:43519", + "issue:43576", + "issue:43704", + "issue:44273", + "issue:44315", + "issue:44360", + "issue:44393", + "issue:44485", + "issue:44908", + "issue:45071", + "issue:45230", + "issue:45310", + "issue:45468" + ], + "soft_pairs": [ + "issue:43576|issue:44273", + "issue:44360|issue:45468", + "issue:45230|issue:45310", + "issue:44273|issue:44908", + "issue:41669|issue:43704", + "issue:43519|issue:44485", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:31795|issue:37428", + "issue:44315|issue:44393", + "issue:44273|issue:45071", + "issue:43704|issue:44485", + "issue:44485|issue:45468", + "issue:42371|issue:43704" + ], + "prepared_review_unit_hash": "e27e06600d74effacb209489a79ab138e0906942517d08dd3361f55cade42f14", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 14, + "serialized_chars": 14276, + "estimated_input_tokens": 3569, + "estimated_eval_tokens": 7394 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like a true duplicate; the matches are broad topical similarities at best, not the same underlying bug or change. The strongest standalone issue is the Qwen3.5 MoE from_pretrained regression because it is concrete and version-scoped.", + "confidence": 0.91, + "canonical_issue_reason": "issue:45310 is specific, reproducible, and version-scoped, making it a better canonical issue than the vague or overly broad reports in this set.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45310 has a clear model, API path, and failing version range, so it is the most actionable and representative issue here.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Different problems: one is a broken CLI command in v5, the other is a generic lazy-loading complaint. Same release area, but not the same bug." + }, + { + "left": "issue:44360", + "right": "issue:45468", + "accept": false, + "reason": "Unrelated model/component issues: DSA indexer ReLU discussion vs Gemma-4 audio positional encoding bug." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "issue:45230 is too generic to prove identity; 45310 is a specific Qwen3.5 MoE from_pretrained regression. No evidence they are the same bug." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading and inverse_sqrt scheduler kwargs are unrelated subsystems and failure modes." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "One is about import-star usage in models, the other about VRAM leakage in Qwen3ForCausalLM across dataloader threads. Different code paths and symptoms." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Timestamp calculation in Qwen3VL Processor and GLM-5 RoPE implementation are different model-specific bugs." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Vision timestamp math and scheduler kwarg handling are unrelated issues." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Documentation confusion versus a model loading regression are not the same underlying problem." + }, + { + "left": "issue:31795", + "right": "issue:37428", + "accept": false, + "reason": "Docs wording issue and flash-attention import error are distinct; no shared failing code path is evident." + }, + { + "left": "issue:44315", + "right": "issue:44393", + "accept": false, + "reason": "Liger Kernel application during model_init and Qwen3-VL 2D bbox output errors are different behaviors in different components." + }, + { + "left": "issue:44273", + "right": "issue:45071", + "accept": false, + "reason": "Lazy loading complaint and PretrainedConfig type-checking breakage are separate issues, even if both relate to v5 changes." + }, + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leak in multi-threaded loading and RoPE implementation are unrelated." + }, + { + "left": "issue:44485", + "right": "issue:45468", + "accept": false, + "reason": "GLM-5 RoPE discussion and Gemma-4 audio positional encoding are different model-specific implementations." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 behavior settings and Qwen3ForCausalLM VRAM leakage are unrelated; same project area is not enough." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 132, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:42890", + "issue:42913", + "issue:43066", + "issue:43502", + "issue:43519", + "issue:43606", + "issue:43618", + "issue:43704", + "issue:44261", + "issue:44263", + "issue:44492", + "issue:44623", + "issue:44861", + "issue:44908", + "issue:44998", + "issue:45290", + "issue:45341", + "issue:45468" + ], + "soft_pairs": [ + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:44263|issue:45468", + "issue:44261|issue:45468", + "issue:43606|issue:45468", + "issue:42913|issue:43066", + "issue:42890|issue:43704", + "issue:44908|issue:44998", + "issue:44861|issue:45290", + "issue:44998|issue:45341", + "issue:44492|issue:44998", + "issue:43618|issue:44998" + ], + "prepared_review_unit_hash": "34a816c11f9bfd7e098f5f80c2cbfc74bc1596a2c10c31a3ea6b6e828569ddfd", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 13, + "serialized_chars": 14035, + "estimated_input_tokens": 3509, + "estimated_eval_tokens": 7274 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues do not form a true duplicate cluster: they span unrelated tokenizer, processor, model, scheduler, cache, and test bugs. No soft edge should be merged.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue exists here because the items are not the same underlying bug; they are mostly unrelated standalone reports.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45290 is the strongest standalone report: it describes a specific crash condition in apply_chat_template with a clear edge case. Even so, it is not a canonical duplicate representative for the whole set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "Both are API/processor-related, but one is about local_files_only network calls and the other about Qwen3VL timestamp math; different code paths and bugs." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Processor timestamp calculation vs save_pretrained missing files are unrelated failures in different subsystems." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "Qwen3VL timestamp computation and VRAM leakage in Qwen3ForCausalLM are distinct issues with different symptoms and fixes." + }, + { + "left": "issue:44263", + "right": "issue:45468", + "accept": false, + "reason": "torch.split return handling in GlmMoeDsaIndexer is unrelated to Gemma4AudioRelPositionalEncoding." + }, + { + "left": "issue:44261", + "right": "issue:45468", + "accept": false, + "reason": "Missing rms_norm_eps in MLA q_a_layernorm is a precision/config bug, not the same as Gemma-4 audio positional encoding." + }, + { + "left": "issue:43606", + "right": "issue:45468", + "accept": false, + "reason": "CPU offload device mismatch for bark-small is unrelated to Gemma-4 audio positional encoding." + }, + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both mention tokenizer v5, but one reports a broad behavior change from v4 and the other a specific decoder-type bug; not the same concrete defect." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "A flaky integration test due to missing set_seed is unrelated to VRAM leakage in Qwen3ForCausalLM." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "inverse_sqrt scheduler kwargs are unrelated to an issue titled 'Unemployment'." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "_get_tied_weight_keys AttributeError and apply_chat_template crashing on tool-call assistant messages are separate code paths." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "The titles are unrelated; neither indicates the same bug or subsystem." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "A cache-strategy typo does not match the unrelated 'Unemployment' issue." + }, + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "CLIPOutput attentions assignment regression and 'Unemployment' are clearly unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 13, + "review_unit_index": 133, + "review_unit_count": 133, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-359", + "nodes": [ + "issue:31515", + "issue:31795", + "issue:33453", + "issue:36296", + "issue:36683", + "issue:38617", + "issue:43298", + "issue:43525", + "issue:43572", + "issue:44230", + "issue:44556", + "issue:44623", + "issue:44861", + "issue:44998", + "issue:45310" + ], + "soft_pairs": [ + "issue:44556|issue:44998", + "issue:43525|issue:44861", + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:44230|issue:45310", + "issue:31515|issue:31795", + "issue:31515|issue:33453", + "issue:43298|issue:44230", + "issue:31515|issue:36683" + ], + "prepared_review_unit_hash": "85e71e9da3a939a76a2813c467648668026ec55dc0cc018e9dbaac84131acbdc", + "component_budget": { + "node_count": 359, + "item_count": 359, + "soft_pair_count": 1335, + "serialized_chars": 689788, + "estimated_input_tokens": 172447, + "estimated_eval_tokens": 345150 + }, + "budget": { + "node_count": 15, + "item_count": 15, + "soft_pair_count": 11, + "serialized_chars": 11714, + "estimated_input_tokens": 2929, + "estimated_eval_tokens": 6114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated. A few revolve around loading/from_pretrained, but they describe different symptoms or code paths (performance, tokenizer regressions, config attributes, version-specific reload failures), so none of the soft candidates should be merged as duplicates.", + "confidence": 0.88, + "canonical_issue_reason": "issue:44556 is the most representative concrete loading bug in the set: it targets from_pretrained/reload breakage after a version upgrade, which is closer to the core loading/reload theme than the performance-only or model-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44556 is the best single issue to anchor this cluster because it is specific, actionable, and about an actual checkpoint reload failure rather than a docs or performance complaint.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Completely unrelated: checkpoint reload failure vs an off-topic 'Unemployment' issue." + }, + { + "left": "issue:43525", + "right": "issue:44861", + "accept": false, + "reason": "Different bugs: missing config attribute on Llama4Config vs _tied_weights_keys list handling." + }, + { + "left": "issue:43572", + "right": "issue:44861", + "accept": false, + "reason": "Unrelated code paths: StableLmConfig pad_token_idx regression vs tied-weight key extraction crash." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "Different symptoms and subsystems: slow from_pretrained checkpoint loading vs import error for layer_type_validation." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "Feature/support request for fp8 inference is not the same as processor.save_pretrained missing files." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Checkpoint loading performance issue is unrelated to tensor-parallel training behavior." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen models, but one is fp8 inference support and the other is a from_pretrained failure; not the same bug." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Loading speed problem and forward-pass documentation confusion are unrelated." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Both involve loading, but one is a speed complaint and the other is a tokenizer-loading regression; different concrete failures." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Continuous batching audio-model support is a different feature gap from fp8 inference support." + }, + { + "left": "issue:31515", + "right": "issue:36683", + "accept": false, + "reason": "from_pretrained slowness is unrelated to Gemma3Config missing vocab_size." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-26350-2", + "nodes": [ + "issue:26350", + "issue:33260" + ], + "soft_pairs": [ + "issue:26350|issue:33260" + ], + "prepared_review_unit_hash": "aaf6c048daf322a888ee8aa4206847c55185a3f69683e0795283f8b25dc47495", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1589, + "estimated_input_tokens": 398, + "estimated_eval_tokens": 1052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate feature requests about extending support to different backends/features; they are not duplicates.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "If one must be surfaced, issue 26350 is the older and more discussed tracker, but it is not a duplicate of 33260.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:26350", + "right": "issue:33260", + "accept": false, + "reason": "Different requested capabilities (Flash Attention 2 support vs GGUF support) and likely different implementation paths; the shared theme of broad architecture support is too general to merge them." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "cc91f282234355fa2b4aaa9604c184bfd8f6e8e836d922aa67b3b39faf4d4a00", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4455, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These look like separate documentation-translation issue templates/requests rather than duplicates of one concrete task. The Turkish issue is the only specific one; the others are generic language placeholders and should not be merged just on template similarity.", + "confidence": 0.89, + "canonical_issue_reason": "issue:27088 is the only concrete, language-specific request ([i18n-TR] Turkish) and is the best issue to anchor this cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:27088 is the clearest and most specific issue in the set; the others are generic placeholders with no concrete target language visible.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": false, + "reason": "Both are translation-request issues, but the target language/topic is not the same concrete task; this is not enough to treat them as the same underlying issue." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": false, + "reason": "The Turkish-specific request is distinct from another language translation request; shared template wording does not indicate a duplicate." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": false, + "reason": "This appears to be a different language translation request, not the same bug/change as the Turkish docs translation issue." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": false, + "reason": "These are generic translation-template issues, but there is no evidence they refer to the same language or same concrete task." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": false, + "reason": "High text similarity alone is insufficient; these look like separate translation requests, not one duplicate issue." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": false, + "reason": "The available evidence only shows similar template text, not the same underlying translation request." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:43909|issue:44205", + "issue:43671|issue:43837", + "issue:44121|issue:45322", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:37015|issue:43909", + "issue:43671|issue:45177", + "issue:37015|issue:43837", + "issue:43671|issue:45027", + "issue:37015|issue:44121", + "issue:43837|issue:44121", + "issue:43837|issue:45177", + "issue:43671|issue:43909", + "issue:43671|issue:44121", + "issue:41211|issue:43671", + "issue:36817|issue:41211", + "issue:37015|issue:42503", + "issue:37015|issue:45027", + "issue:36817|issue:43671", + "issue:42738|issue:45027", + "issue:43837|issue:45027", + "issue:42503|issue:43837", + "issue:42503|issue:42738", + "issue:42503|issue:43671" + ], + "prepared_review_unit_hash": "ff22bed71b64b670ea2b2f680c1d17496b90585069ffa734aa19436190181c4c", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16006, + "estimated_input_tokens": 4002, + "estimated_eval_tokens": 8260 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 24, + "serialized_chars": 14121, + "estimated_input_tokens": 3531, + "estimated_eval_tokens": 7318 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; it is a loose collection of unrelated model-add/support requests across text, audio/ASR/TTS, OCR, vision, and architecture-specific models. None of the soft edges look like the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a true canonical representative because the requests cover different models and modalities rather than one shared bug/change.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43909", + "right": "issue:44205", + "accept": false, + "reason": "Different targets: LFM2.5 Audio support vs SAM3-LiteText. Separate model requests." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "Same vendor family but different tasks/modality: Qwen3-TTS vs Qwen3-ASR. Not the same underlying change." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "Unrelated architectures and use cases: weight-sparse transformer vs EUPE encoder." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "Different audio models and support paths: Qwen3-ASR vs LFM2.5 Audio." + }, + { + "left": "issue:43837", + "right": "issue:45322", + "accept": false, + "reason": "ASR support request vs a perception encoder model request; unrelated." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT text model request is unrelated to LFM2.5 Audio." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "TTS support request vs DeepSeek-OCR2; different modalities and code paths." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT addition vs Qwen3-ASR support; distinct models." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Both are audio-related but different concrete models (Qwen3-TTS vs Voxtral TTS) and likely separate integration work." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT model addition is not the same as adding a weight-sparse transformer." + }, + { + "left": "issue:43837", + "right": "issue:44121", + "accept": false, + "reason": "ASR model support and sparse transformer support are unrelated changes." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "ASR support vs OCR model support; different modalities." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "Qwen3-TTS and LFM2.5 Audio are different models; not a duplicate." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "TTS support request vs sparse transformer architecture request." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 addition is unrelated to Qwen3-TTS support." + }, + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "EuroBert config addition vs DEIMv2; separate model requests." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "Different model families: NeoBERT vs ModernVBERT." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT text model request is unrelated to Voxtral TTS support." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBert config request is unrelated to Qwen3-TTS support." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "Generic BERT-like/RoPE discussion vs a specific TTS model support request." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Both are audio-related, but ASR vs TTS for different model families; not the same concrete change." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT model addition vs Qwen3-ASR support; different targets." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "ModernVBERT models and BERT-like models with RoPE are not the same request; one is specific, the other is a broad architecture discussion." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT models are unrelated to Qwen3-TTS support." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 13, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:42503", + "issue:45027" + ], + "soft_pairs": [ + "issue:42503|issue:45027" + ], + "prepared_review_unit_hash": "fcf04445269dcf344adc9b86a47051d33e0ecd82f77ee6760afeca428a5efc5e", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16006, + "estimated_input_tokens": 4002, + "estimated_eval_tokens": 8260 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1433, + "estimated_input_tokens": 359, + "estimated_eval_tokens": 974 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates: one requests adding ModernVBERT models, while the other requests support for Voxtral-4B-TTS-2603. They target different model families and different functionality (generic model addition vs TTS support).", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the two tickets describe different requests for different models and code paths, so neither should subsume the other.", + "canonical_pr_reason": null, + "best_issue_reason": "Neither issue is a good cluster canonical because they are unrelated; if forced to choose a representative, issue #45027 is the more concrete feature request, but it should not absorb #42503.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42503", + "right": "issue:45027", + "accept": false, + "reason": "Different underlying requests: ModernVBERT model addition vs Voxtral-4B-TTS-2603 support. They do not describe the same bug or change and should not be merged." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "adfee87fd10a6f609a0cf5f5dd8b7fccee791790523526507cf7bc368d336366", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6292, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These items are not duplicates; they are a loose cluster of unrelated model/config/tokenizer issues that only share generic ML-library similarity.", + "confidence": 0.96, + "canonical_issue_reason": "No canonical issue: the titles point to different bugs in different subsystems/models, not the same underlying problem.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong representative issue; 42936 is the most discussed, but it does not cover the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "Different model components and bug types: Llama4TextL2Norm vs ModernBERT config type hint." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Mask2Former ignore_value handling is unrelated to kwargs not being passed to a loss function." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "Mask2Former ignore_value bug is unrelated to tokenizer regex warning behavior." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "Different subsystems: Mask2Former loss masking vs a typo in a quantization attribute name." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Different issues in different models: Mask2Former ignore_value vs ModernBERT norm_eps typing." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "Loss kwargs plumbing and config type-hint correctness are unrelated bugs." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "ModernBERT config typing issue and tokenizer regex warning are separate problems." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "A config type-hint typo is unrelated to a quantization-operation attribute typo." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "2c3661ea485823452c8fda7c4dc3cc85d434aed57fd1b4466414ff0aadf1e892", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two issues are not duplicates: one requests a generic \"Set Block Decoding\" capability, while the other asks for \"DeepSeek v3.2 support.\" They describe different features and do not point to the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43037 is the better representative issue because it is more specific and actionable, naming a concrete model/version support request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Different requested features with no shared code path, target, or explicit linkage; not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "06fca2f379985d728236dd16de09c917f28e1c9b3756c5a128045ccccbbeb088", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1584, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two separate Trainer customizability issues: one about `remove_unused_column` behavior, the other about FLOPS estimation for custom trainers. They share a broad theme but not the same underlying bug or fix.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 43598 is the more concrete/feature-specific request, focused on revisiting `remove_unused_column` in `Trainer` to improve customizability.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43598 is the best issue anchor because it is narrowly scoped and describes a specific Trainer behavior change request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Rejected: the titles and likely problems are different. One concerns `remove_unused_column` customization, while the other concerns FLOPS estimation for custom trainers; they do not describe the same bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "c228be1521990ed314b34580fe1c9670fddaf80e7c0c2225989e6d136e529a82", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1612, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are both model-registration support issues, but they target different model families and different AutoModel entry points: Qwen3-Omni for AutoModel/AutoModelForConditionalGeneration vs. Qwen3.5 for AutoModelForSequenceClassification. They do not look like duplicates of the same underlying change.", + "confidence": 0.95, + "canonical_issue_reason": "Issue #43684 is the better canonical issue because it is broader and more directly about adding model registration support, with more discussion activity.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43684 is the best representative of the cluster since it covers the general registration gap and is the broader, more canonical support request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different models and different AutoModel heads; this is related support work, not the same bug/change. They would not plausibly be merged into one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "326262ddbfa7659213b0f549b61914c101946f82c629fc23b7c2f69999ecb1bc", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4266, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4266, + "estimated_input_tokens": 1067, + "estimated_eval_tokens": 2390 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Three issues titled \"huggingface\" look like probable duplicates of each other, while \"Fett\" appears unrelated and should not be merged into that cluster. With no body details or explicit targets, only very weak triage is possible.", + "confidence": 0.39, + "canonical_issue_reason": "Issue 43845 is the earliest of the near-identical \"huggingface\" reports, so it is the safest placeholder canonical issue among the likely duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "43845 is the best issue candidate because it is one of the clearly similar reports and was created first; however, the lack of body content makes this only a tentative choice.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Titles are unrelated (\u0000b0Fett\u0000b0 vs \u0000b0huggingface\u0000b0), and there is no body or target evidence tying them to the same underlying issue." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "No concrete overlap beyond weak similarity; the titles do not indicate the same bug or request." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "\u0000b0Fett\u0000b0 does not plausibly refer to the same underlying issue as the \u0000b0huggingface\u0000b0 reports." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "These are both titled \u0000b0huggingface\u0000b0, but there is no description/target evidence to confirm they are the same underlying issue rather than separate reports." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "Same broad title is not enough to safely deduplicate without body details or explicit links." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "The available evidence is only title similarity; that's too weak to accept as the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "3c9726f83b8d85e5cb7d0cd87c486f7412e87becc21ffc47a643fdac9a8dd38a", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5791, + "estimated_input_tokens": 1448, + "estimated_eval_tokens": 3152 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All items look like unrelated low-information/spam issues rather than duplicates of one concrete bug or request. The identical \"\" titles are not enough to treat them as the same underlying issue.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": false, + "reason": "Same spammy title, but no body detail, targets, or evidence of a shared underlying bug/change." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": false, + "reason": "Same spammy title, but these look like separate junk reports rather than one concrete issue." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "One is spam and the other is a generic documentation suggestion; no common problem statement." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "Spam title versus unrelated branded text; no indication they describe the same issue." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "Generic docs suggestion and spam are unrelated; no shared technical content." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Generic docs suggestion and spam are unrelated; not the same underlying request." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": false, + "reason": "Identical spam titles are not sufficient to merge as duplicates without any concrete shared bug or change description." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "Spam issue and unrelated marketing-style title; no common subject matter." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "Spam issue and unrelated marketing-style title; no evidence of the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "b0f2278ebda48cfb39cce3644c42d5871fcb0958bfdb4225404c4ac67b84ef31", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These issues are very likely duplicates: they have identical titles, similar size, and a soft-similarity signal. #45145 is the better canonical issue because it is earlier and already has an inbound reference. ", + "confidence": 0.93, + "canonical_issue_reason": "#45145 is earlier and already has an inbound reference, so it is the best canonical thread for consolidating this likely duplicate report.", + "canonical_pr_reason": null, + "best_issue_reason": "#45145 is the best issue candidate because it is the earliest matching report and already has inbound discussion.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": true, + "reason": "The titles are identical and the soft-similarity signal is strong, so these appear to be likely duplicates even though the packet does not expose the full bodies." + } + ] + }, + "evaluator_result": { + "accept": false, + "feedback": "The canonical choice is grounded (earlier issue with an inbound reference and identical title), but the duplicate judgment is overstated. The packet only shows exact title match plus a soft-similarity signal; it does not provide the bodies, so \"near-identical content/body\" and \"straightforward duplicate\" are stronger than the evidence supports. A more conservative wording would be that they are likely duplicates based on identical titles and similarity signals." + }, + "evaluator_used": true, + "retried": true, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 13, + "component_count": 13, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "f5fd95fada2f530ab0999e343c17c16266cde0c7182bb22509972ca44827cd49", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1286, + "estimated_input_tokens": 322, + "estimated_eval_tokens": 900 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are the same missing feature request for Gemma 4 sequence classification support; the newer issue is more specific and better documented.", + "confidence": 0.97, + "canonical_issue_reason": "Issue #45373 is the clearest canonical issue because it precisely names the missing class (`Gemma4ForSequenceClassification`) and states the gap in the `gemma4` module.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45373 is the best representative: it is open, more detailed, and directly describes the exact API missing from Gemma 4.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-14", + "nodes": [ + "pull_request:39895", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45078", + "pull_request:45170", + "pull_request:45213", + "pull_request:45435", + "pull_request:45454", + "pull_request:45489" + ], + "soft_pairs": [ + "pull_request:43251|pull_request:43254", + "pull_request:45454|pull_request:45489", + "pull_request:44827|pull_request:45489", + "pull_request:42668|pull_request:43823", + "pull_request:42668|pull_request:45078", + "pull_request:45078|pull_request:45454", + "pull_request:43823|pull_request:45489", + "pull_request:45213|pull_request:45435", + "pull_request:43823|pull_request:45454", + "pull_request:43823|pull_request:45435", + "pull_request:44827|pull_request:45170", + "pull_request:43424|pull_request:44827", + "pull_request:43823|pull_request:43995", + "pull_request:43251|pull_request:43823", + "pull_request:43995|pull_request:45078", + "pull_request:45078|pull_request:45435", + "pull_request:43995|pull_request:45435", + "pull_request:43424|pull_request:45435", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:43823", + "pull_request:43995|pull_request:45213", + "pull_request:39895|pull_request:43424", + "pull_request:44827|pull_request:45213", + "pull_request:43995|pull_request:44827" + ], + "prepared_review_unit_hash": "0e3c5f39a143fc8545c980985ff4c6ceac70c4fa2dac26d4446a9ee58f303c0a", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 41382, + "estimated_input_tokens": 10346, + "estimated_eval_tokens": 20948 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 24, + "serialized_chars": 37962, + "estimated_input_tokens": 9491, + "estimated_eval_tokens": 19238 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs are mostly unrelated. The only clear duplicate pair is the two `fixed_cross_entropy` patches, which both add `weight` and `label_smoothing` support and forward them to `nn.functional.cross_entropy` in the same helper.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43251 is the best canonical representative for the loss fix: it has the explicit issue link, the more complete patch, and the broader discussion around passing kwargs through `fixed_cross_entropy`.", + "best_issue_reason": null, + "best_pr_reason": "PR 43251 is the strongest global canonical PR among these because it most directly addresses a specific bug, has the linked target issue, and cleanly represents the duplicate loss-helper change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Same underlying change in `src/transformers/loss/loss_utils.py`: both add `weight` and `label_smoothing` support to `fixed_cross_entropy` and pass them into `nn.functional.cross_entropy`." + }, + { + "left": "pull_request:45454", + "right": "pull_request:45489", + "accept": false, + "reason": "Both touch Gemma model code, but one is about text-only training samples in Gemma4; the other is about Gemma3n cache-sharing/attention behavior. Different bugs/changes." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45489", + "accept": false, + "reason": "Mistral4 test fixes and Gemma3n/Gemma4 cache-sharing refactoring are unrelated changes; shared ecosystem and model files are not enough to make them duplicates." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Processor auto-loading robustness vs adding a new MobileLLM model are unrelated PRs, even though both involve model infrastructure." + }, + { + "left": "pull_request:42668", + "right": "pull_request:45078", + "accept": false, + "reason": "One broadens processor/tokenizer loading behavior; the other changes tokenizer auto-selection error handling. Different code paths and bugs." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45454", + "accept": false, + "reason": "Tokenizer auto-selection logic and Gemma4 training-mask behavior are completely different areas of the codebase." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45489", + "accept": false, + "reason": "New MobileLLM model support and Gemma3n/Gemma4 cache-sharing alignment are unrelated model additions/refactors." + }, + { + "left": "pull_request:45213", + "right": "pull_request:45435", + "accept": false, + "reason": "A model-creation skill PR and a Whisper tokenizer bounds fix are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45454", + "accept": false, + "reason": "MobileLLM model addition and Gemma4 training-only sample handling are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45435", + "accept": false, + "reason": "MobileLLM model addition does not overlap with the Whisper special-token indexing bug fix." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 test fixes and a `layrnorm`/`layernorm` rename across CLIP-like models are different changes." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "Executorch dynamic-shape export tests and Mistral4 test fixes are unrelated test additions." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "Adding MobileLLM and refactoring Falcon output collection are unrelated model changes." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss helper kwargs support and MobileLLM model addition have no shared bug or code path." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45078", + "accept": false, + "reason": "Falcon output-collection refactoring and tokenizer auto-selection error handling are unrelated." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45435", + "accept": false, + "reason": "Tokenizer auto-selection behavior and Whisper tokenization bounds checking are distinct tokenizer bugs." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45435", + "accept": false, + "reason": "Falcon forward/output refactoring and Whisper special-token indexing are unrelated." + }, + { + "left": "pull_request:43424", + "right": "pull_request:45435", + "accept": false, + "reason": "Executorch export tests and Whisper tokenization fix do not address the same underlying issue." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "Cross-entropy kwargs support and Mistral4 test fixes are unrelated." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "Executorch dynamic-shape test coverage and MobileLLM model addition are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Falcon standardized-output refactor and the model-creation skill draft are unrelated." + }, + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "VideoPrism model/docs addition and Executorch export tests are unrelated additions." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45213", + "accept": false, + "reason": "Mistral4 test fixes and the model-creation skill draft are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon output-collection refactor and Mistral4 test fixes are different model-specific changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-14", + "nodes": [ + "pull_request:43823", + "pull_request:44827", + "pull_request:45078", + "pull_request:45213" + ], + "soft_pairs": [ + "pull_request:43823|pull_request:44827", + "pull_request:43823|pull_request:45213", + "pull_request:45078|pull_request:45213", + "pull_request:43823|pull_request:45078" + ], + "prepared_review_unit_hash": "9550ced32705c66e6f2fd580ac700e34c207e21ac6d896f0fe4fa9351bf50e4e", + "component_budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 28, + "serialized_chars": 41382, + "estimated_input_tokens": 10346, + "estimated_eval_tokens": 20948 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 4, + "serialized_chars": 10285, + "estimated_input_tokens": 2572, + "estimated_eval_tokens": 5400 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These four PRs are not duplicates: one adds MobileLLM, one fixes Mistral4 tests and grouped-mm behavior, one changes tokenizer auto-selection/error handling, and one is a large draft model-creation skill/tooling PR.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR 43823 is the most self-contained substantive code addition with model files and tests, but it is unrelated to the other PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "Different changes and code paths: new MobileLLM model vs Mistral4 test fix and grouped_mm contiguity tweak." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "MobileLLM model implementation is unrelated to the model-creation skill/tooling and docs workflow PR." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45213", + "accept": false, + "reason": "Tokenizer auto-loading logic change is a separate runtime behavior fix, not the model-creation skill or scaffolding PR." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45078", + "accept": false, + "reason": "New model implementation vs tokenizer_auto conversion/error handling; they do not fix the same underlying bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "6258f3819f6acfa2fdab78d7e7a3247eaeb585d87fb5e74413ff5b199f334567", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7225, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs are VibeVoice-related, but they are not the same change: #40546 is the base VibeVoice implementation, while #42781 adds a separate realtime extension and acoustic tokenizer work. The overlap is mostly shared subsystem files, not a single concrete fix/change.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "#40546 is the primary, non-draft VibeVoice implementation with substantial discussion and broader initial coverage; it is the better anchor PR for this cluster.", + "best_issue_reason": null, + "best_pr_reason": "#40546 best represents the cluster because it is the main implementation PR, whereas #42781 is a draft follow-up for realtime support and is narrower/specialized.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "Shared VibeVoice files and auto-registry edits indicate related area, but #42781 adds realtime/acoustic-tokenizer functionality and generation changes rather than fixing or implementing the same concrete code path as #40546. They would not plausibly be merged as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "ca08cfafffb81953b0a8dd69f72b038e36d33eb0ee0c1977f4bceb0c4fd8a2ea", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4674, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4674, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two PRs are unrelated: one adds TimesFM covariate forecasting support, while the other adds DINOv3 image classification support. They do not fix the same bug or implement the same change, so this is not a duplicate cluster.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "No single best PR for deduplication: the items target different model families, different files, and different user-facing capabilities.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Different features in different subsystems: TimesFM covariate forecasting vs. DINOv3 image classification. No shared code-path problem or mergeable overlap." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "bf853edcac36b9d46727ab7091e6d4edc2525d28b28d3cc612346441fafe02a8", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests are the same feature family: adding DEIMv2 support to Transformers. PR 44339 is the more complete and later integration, while PR 41356 is an earlier/basic scaffold for the same model addition.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "pull_request:44339 is the better canonical PR because it is the fuller DEIMv2 integration (model, loss, auto mappings, conversion script, modular code, and tests) and appears to supersede the earlier partial implementation.", + "best_issue_reason": null, + "best_pr_reason": "pull_request:44339 is the strongest representative for the cluster since it contains the most complete implementation path for adding DEIMv2 and covers the same underlying change as the earlier PR.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": true, + "reason": "Yes: both PRs implement DEIMv2 support for the same model addition; 44339 is a superset/follow-up of the earlier 41356 scaffold and could plausibly be merged as one feature PR." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: both PRs target the same issue (41211) and add DEIMv2 support, with 44339 clearly covering a broader implementation surface than 41356. The soft-edge verdict is conservative enough, since the overlap is real and the two changes appear to be successive parts of the same feature rather than unrelated work." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "73cb19002c31055ec111d538563a22c523e9281928f2d85f19cf165b9e871102", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6723, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two PRs are both new-model integration patches, but they add different models (DeepSeek OCR vs Isaac) and do not fix the same underlying bug/change. The shared files are mostly generic Transformers scaffolding, so this is not a duplicate pair.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR 41797 is the better representative artifact for the cluster because it is a complete model-addition PR with broad integration coverage and substantial review context, but it is still unrelated to PR 45186.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Different model additions with distinct codepaths and artifacts; overlap is limited to shared framework registration/docs boilerplate, so they are not the same change and should not be merged as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42166-2", + "nodes": [ + "pull_request:42166", + "pull_request:45426" + ], + "soft_pairs": [ + "pull_request:42166|pull_request:45426" + ], + "prepared_review_unit_hash": "c44458b8a8a9be4fe66685cd762a1528157fa896c200c396e7af829c061a9514", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5407, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5407, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two unrelated model-addition PRs that only overlap in shared auto-registration plumbing. They do not appear to fix the same bug or implement the same change, so the soft duplicate link should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: #42166 adds InternVLFlash, while #45426 adds AXK1; they are distinct feature PRs with different model implementations and docs.", + "best_issue_reason": null, + "best_pr_reason": "#42166 is the better representative PR for the cluster only because it is larger and more mature, but it is not a duplicate of #45426.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42166", + "right": "pull_request:45426", + "accept": false, + "reason": "Shared transformer registry files are incidental; the PRs add different models (InternVLFlash vs AXK1) with different code paths and cannot plausibly be merged as one fix/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42227-4", + "nodes": [ + "pull_request:42227", + "pull_request:43291", + "pull_request:43665", + "pull_request:44815" + ], + "soft_pairs": [ + "pull_request:42227|pull_request:43291", + "pull_request:42227|pull_request:43665", + "pull_request:42227|pull_request:44815", + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "af869772927016408959bba30c7230e0e7560e2048c770e9a1f3ae5f753d376b", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10167, + "estimated_input_tokens": 2542, + "estimated_eval_tokens": 5340 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 10167, + "estimated_input_tokens": 2542, + "estimated_eval_tokens": 5340 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is not a duplicate set: it mixes a Whisper ASR bugfix, a Whisper tokenizer/test change, a CLIP/ViT test-only diff, and an FP8 dequant/model-loading change. They do not look like the same underlying bug or change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR fits the cluster: the items touch different code paths and purposes (Whisper ASR return_language plumbing, Whisper tokenizer timestamp decoding/tests, unrelated CLIP/ViT test edits, and FP8 dequant/model-loading changes).", + "best_issue_reason": null, + "best_pr_reason": "PR 42227 is the strongest standalone fix to keep as representative because it contains a concrete runtime bugfix with an explicit issue target; the others are either test-only, unrelated, or broader refactors.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42227", + "right": "pull_request:43291", + "accept": false, + "reason": "Both are Whisper-related, but they fix different paths: 42227 plumbs return_language through the ASR pipeline, while 43291 changes tokenizer decode/timestamp handling and tests. Not the same change." + }, + { + "left": "pull_request:42227", + "right": "pull_request:43665", + "accept": false, + "reason": "Unrelated changes: 42227 is Whisper ASR logic, while 43665 is test-only edits for CLIP/ViT. Not the same bug or fix." + }, + { + "left": "pull_request:42227", + "right": "pull_request:44815", + "accept": false, + "reason": "Different subsystems and problems: Whisper ASR language return vs FP8 dequant/model-loading work. Not mergeable as one PR." + }, + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "Whisper tokenizer behavior vs CLIP/ViT tests; different code paths and no shared underlying bug." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "Whisper tokenizer decoding changes are unrelated to FP8 dequant/model-loading fixes." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "Test-only CLIP/ViT edits are unrelated to the FP8 dequant/model-loading change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "60617b591df0c371a7ec5114a7742ef002d411e8dd0d437e1219354434e3ae26", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5935, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two unrelated maintenance PRs that both update GitHub Actions versions across workflows, but they target different actions and different scopes/motivations. They are similar in theme, not the same underlying change.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #42975 is the more representative cluster member because it broadly upgrades multiple GitHub Actions to Node 24-compatible versions across many workflows, matching the cluster\u2019s shared maintenance theme.", + "best_issue_reason": null, + "best_pr_reason": "PR #42975 is the best single representative of the cluster because it is the broader, more general GitHub Actions compatibility upgrade; #42976 is a narrower version bump focused mainly on docker/build-push-action and a few related workflow updates.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "Both are CI maintenance PRs, but they update different third-party actions (Node 24 compatibility vs latest docker/build-push-action versions) and do not appear to fix the same concrete problem or be mergeable into one PR as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-4", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451", + "pull_request:45490" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:42978|pull_request:45490", + "pull_request:43448|pull_request:43451", + "pull_request:43448|pull_request:45490", + "pull_request:43451|pull_request:45490" + ], + "prepared_review_unit_hash": "a9cb13311be3ecb2735116917f14c051e4d842f81e9f9df85f0ae86d04a68992", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 14136, + "estimated_input_tokens": 3534, + "estimated_eval_tokens": 7324 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 14136, + "estimated_input_tokens": 3534, + "estimated_eval_tokens": 7324 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All four PRs are separate model-addition changes, not duplicates: ViT NEPA, Molmo, Molmo2, and CTSM. The overlap is limited to shared boilerplate areas like docs, auto-registry mappings, and test scaffolding.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR: each PR introduces a different model family and distinct implementation files, so they should not be merged as one underlying change.", + "best_issue_reason": null, + "best_pr_reason": "No single best PR for duplicate triage; none of these is a duplicate of the others, and the shared files are generic model-registration/docs plumbing rather than the same code-path fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "ViT NEPA vs Molmo are different model additions with different configs/modeling/tests; shared registry/docs files are boilerplate only." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "ViT NEPA and Molmo2 are distinct model families with separate source trees; no same underlying bug/change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:45490", + "accept": false, + "reason": "ViT NEPA and CTSM add unrelated models; overlap is limited to generic docs/auto-mapping updates." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are related by name but implement different model families and codepaths; not mergeable as one PR." + }, + { + "left": "pull_request:43448", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo and CTSM are separate model additions; shared changes are only broad integration boilerplate." + }, + { + "left": "pull_request:43451", + "right": "pull_request:45490", + "accept": false, + "reason": "Molmo2 and CTSM add different models with different implementation files; not the same concrete change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-3", + "nodes": [ + "pull_request:43488", + "pull_request:43532", + "pull_request:45476" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532", + "pull_request:43488|pull_request:45476", + "pull_request:43532|pull_request:45476" + ], + "prepared_review_unit_hash": "598280c219cb888b3aa248bab5b45f510020632848474e2671034e1feaf3725f", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7469, + "estimated_input_tokens": 1868, + "estimated_eval_tokens": 3992 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7469, + "estimated_input_tokens": 1868, + "estimated_eval_tokens": 3992 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three pull requests are unrelated test/CI or placeholder changes; they do not appear to fix the same underlying bug or implement the same change. The shared Vit test file is incidental and not enough to merge any pair.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "Different changesets: one alters setup/versioning and a Vit test, the other rewires self-comment CI workflows and testing utilities. Shared filename alone is incidental." + }, + { + "left": "pull_request:43488", + "right": "pull_request:45476", + "accept": false, + "reason": "Unrelated purposes: one is a repo-bot/version formatting check, the other adds a PR CI workflow and unrelated code/test edits. No common concrete bug or fix." + }, + { + "left": "pull_request:43532", + "right": "pull_request:45476", + "accept": false, + "reason": "Both touch CI/test areas, but they are not the same code-path problem. The workflow rewrites and added dummy edits are distinct and would not plausibly be merged as one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "4a48926e4e2e05016c6e89b018607273646819f9d25b7f41bd77599a14767b8e", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6082, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6082, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are related only at the tooling/docs level; they add different pipelines for different tasks and should not be deduplicated.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR: the two pull requests implement different pipeline features (concept segmentation vs visual segmentation) with different model/test surfaces.", + "best_issue_reason": null, + "best_pr_reason": "PR 43613 is the broader and more complete pipeline addition, touching more model integrations and tests, so it is the better representative of the cluster even though it is not a duplicate of 43612.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Different concrete changes: one adds Promptable Concept Segmentation, the other Promptable Visual Segmentation. Shared docs/pipeline plumbing is too broad to treat as the same fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "0085a96816bcdd342f6b2ba4f2084924e960065c7e6d22dc441574e0b5ca7b57", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4292, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are small trainer.py PRs, but they address different code paths: gradient-accumulation loss scaling vs dataloader signature-column filtering. They should not be deduplicated.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR cleanly represents the cluster as a duplicate set; 43651 and 43663 change unrelated Trainer behaviors despite both touching trainer.py.", + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a strong canonical representative for the pair. If one must be chosen, 43651 is the more self-contained behavior fix, but it is not a duplicate of 43663.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Different underlying changes: 43651 alters loss scaling during gradient accumulation, while 43663 adds a hook for signature-column filtering. Same file/subsystem is not enough to treat them as the same fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "1ea3ba19090d629f59021f2199adc50692738eaa6c65ef7f4687326c0123a4a2", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs touch `src/transformers/cli/serve.py` and the CLI\u2019s TypeAdapter/runtime typing problem, but they are not all the same fix. PR 43842 is the most focused patch for the actual import-time NameError; 43836 looks like a broader alternative workaround in the same area; 43656 appears to be a separate, broader typing cleanup despite the similar title.", + "confidence": 0.69, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43842 is the best canonical PR: it directly targets the TypeAdapter NameError in the CLI with the smallest, clearest change set and the strongest issue linkage.", + "best_issue_reason": null, + "best_pr_reason": "PR 43842 is the best overall fit because it is the most narrowly scoped and directly addresses the concrete runtime NameError without introducing broader unrelated typing changes.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": false, + "reason": "Both mention the CLI TypeAdapter error, but 43656 is a broader typing/`__future__` cleanup tied to a different issue target and does not clearly look like the same concrete fix as 43842." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "Both target the same serve.py import-time TypeAdapter NameError in the CLI, and the fixes are alternative ways of deferring annotation evaluation in the same code path." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays appropriately conservative. The rejection of 43656 vs 43842 is justified by the different issue target and the fact that the diff preview suggests a broader typing cleanup rather than the same concrete fix. The acceptance of 43836 vs 43842 is also reasonable: both target the same issue in the same file and appear to be alternative ways of fixing the same import-time TypeAdapter problem." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43757-3", + "nodes": [ + "pull_request:43757", + "pull_request:45500", + "pull_request:45506" + ], + "soft_pairs": [ + "pull_request:45500|pull_request:45506", + "pull_request:43757|pull_request:45500", + "pull_request:43757|pull_request:45506" + ], + "prepared_review_unit_hash": "01c33bc963e7f0ba36ed7a427db39f572861f21fabed6710902203b682053d7b", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7783, + "estimated_input_tokens": 1946, + "estimated_eval_tokens": 4148 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 7783, + "estimated_input_tokens": 1946, + "estimated_eval_tokens": 4148 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "45500 and 45506 are near-identical duplicate PRs for GPT-OSS GGUF loading support; 45506 is the newer copy. 43757 is related but only a narrower fallback patch, not the same full change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "45506 is the latest duplicate of the full GPT-OSS GGUF loading support PR, with the same title, files, and issue target as 45500.", + "best_issue_reason": null, + "best_pr_reason": "45506 is the best representative because it is the newest identical full-support PR and includes the complete code/docs/tests update, not just the earlier fallback workaround.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45500", + "right": "pull_request:45506", + "accept": true, + "reason": "Same PR content: identical title, same files, same issue target, and effectively the same GGUF GPT-OSS support change; 45506 is just the newer duplicate." + }, + { + "left": "pull_request:43757", + "right": "pull_request:45500", + "accept": false, + "reason": "43757 is only a small fallback to avoid hard failure for gpt-oss GGUF, while 45500 adds full loading support plus docs/tests; they are related but not the same concrete change." + }, + { + "left": "pull_request:43757", + "right": "pull_request:45506", + "accept": false, + "reason": "Same as 45500: 43757 is a partial workaround, not the same full GPT-OSS GGUF support PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs target the same SwanLabCallback enhancement in the same file and appear to implement the same underlying run-resume/id support for SwanLab init. PR 43816 looks like the more complete formulation, with explicit parameter docs plus the code-path change.", + "confidence": 0.79, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43816 is the better canonical representative because it more clearly and completely describes the same SwanLab resume/id feature, including documentation for the new `SWANLAB_RUN_ID` and `SWANLAB_RESUME` parameters alongside the integration change.", + "best_issue_reason": null, + "best_pr_reason": "PR 43816 is the strongest overall candidate: it targets the same issue, touches the same code path, and appears to be the more complete implementation of the SwanLab `id`/`resume` support.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": true, + "reason": "Same underlying SwanLabCallback change: both modify `integration_utils.py` to support `id`/`resume`-style run resumption, share the same issue target, and could plausibly be merged into one PR." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The packet supports the analyst\u2019s conclusion: both PRs target issue 43698, touch the same file, and are clearly about the same SwanLab `id`/`resume` resume-support bugfix. The soft-edge mergeability verdict is conservative enough here. The only caveat is that PR 43816\u2019s preview is truncated, so the \u201cmore complete\u201d claim should be treated as tentative, but it is still grounded by the titles and diff previews." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43838-3", + "nodes": [ + "pull_request:43838", + "pull_request:43973", + "pull_request:45283" + ], + "soft_pairs": [ + "pull_request:43838|pull_request:45283", + "pull_request:43973|pull_request:45283" + ], + "prepared_review_unit_hash": "be2c93a177e9dfc00b51bc0ba9b57f20502e1545c380122387f4dc914c3770ba", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 8699, + "estimated_input_tokens": 2175, + "estimated_eval_tokens": 4606 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 8699, + "estimated_input_tokens": 2175, + "estimated_eval_tokens": 4606 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three separate feature PRs: Qwen3-ASR support, Lfm2Audio support, and Qwen3.5 GGUF loading. They share only loose naming/subsystem similarity and do not look like the same underlying change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43838 is the most representative item only because it is the largest, most developed PR and has an explicit issue link, but it is still a distinct Qwen3-ASR feature PR, not a duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 43838 is the best single representative for the cluster due to its scope, review activity, and explicit tracking issue, but it should not be merged with the other two PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43838", + "right": "pull_request:45283", + "accept": false, + "reason": "Qwen3-ASR model support versus Qwen3.5 GGUF loading are different changes touching different code paths; shared Qwen naming is not enough to treat them as the same PR." + }, + { + "left": "pull_request:43973", + "right": "pull_request:45283", + "accept": false, + "reason": "Lfm2Audio model integration and Qwen3.5 GGUF loading are unrelated feature additions in different subsystems and are not plausibly one merged fix/change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:44007", + "pull_request:44013", + "pull_request:44018", + "pull_request:44019", + "pull_request:44044", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44085", + "pull_request:44086", + "pull_request:44116", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44066|pull_request:44071", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44071|pull_request:44085" + ], + "prepared_review_unit_hash": "60eb5f5e06020335410adf6c9e23b5e5831db61c1b09ee926747a29ff899ce93", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 24, + "serialized_chars": 34769, + "estimated_input_tokens": 8693, + "estimated_eval_tokens": 17642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a wave of model-specific output-tracing refactors. I only see a couple of near-duplicate pairs within the same model family; most other pairs are just related by the shared umbrella issue or refactor theme, not the same concrete change.", + "confidence": 0.83, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44066 is the clearest representative: it has an explicit refactor scope, an issue link, and a concrete decorator-based output-tracing conversion across GPT-J/CodeGen.", + "best_issue_reason": null, + "best_pr_reason": "PR 44066 is the best overall exemplar of the cluster because it is issue-linked, concrete, and more representative than the narrower per-model refactors.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and files; same umbrella output-tracing theme is not enough to make these the same change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "CVT/FNet refactor vs DeBERTa refactor; only the general tracing pattern overlaps." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": false, + "reason": "Both touch GPT-J-style output tracing, but the evidence is too weak to treat them as the same concrete PR-level change." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "ResNet-family refactor vs EfficientNet refactor; shared issue target does not imply duplicate code change." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "EfficientNet and GPT-J are different model implementations, so these are not the same underlying fix." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "Different model families and different code paths; only the standardized output-tracing pattern matches." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "MobileNetV2 vs DeBERTa; same refactor style, but not the same bug or change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J/CodeGen refactor versus MGP-STR refactor; no evidence of the same concrete change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "Different model stack and different files; this is only broad similarity around output capture." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Same model, same file, and very similar GPT-Neo output-tracing decorator refactor; these look like duplicate implementations of the same change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-J/CodeGen refactor and GPT-Neo refactor are different model-specific changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44066", + "accept": false, + "reason": "GPT-Neo versus GPT-J/CodeGen; they share the umbrella refactor theme but not the same code path." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet output-tracing refactor and GPT-J output-tracing refactor are different concrete changes." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "MGP-STR and GPT-J are unrelated model implementations for duplicate-triage purposes." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet refactor versus a GPT-J-related refactor; not the same underlying bug/change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are different model code paths, so these are not duplicates." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo refactor versus Flaubert refactor; only the output-tracing mechanism is shared." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo and GPT-J are separate model implementations, so these are not the same concrete change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo versus Flaubert; this is broad architectural similarity, not duplicate content." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-Neo and MGP-STR are different model-specific refactors." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model families and no strong evidence that the exact same implementation change is being made." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44085", + "accept": false, + "reason": "MPT and GPT-J are unrelated concrete refactors despite both being about output tracing." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44017", + "pull_request:44018", + "pull_request:44056", + "pull_request:44059", + "pull_request:44068", + "pull_request:44071", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:44068|pull_request:44086", + "pull_request:44059|pull_request:44086", + "pull_request:44059|pull_request:44722", + "pull_request:44085|pull_request:44086", + "pull_request:44098|pull_request:44101", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:44059|pull_request:44071", + "pull_request:43997|pull_request:44073", + "pull_request:44017|pull_request:44071", + "pull_request:44059|pull_request:44068", + "pull_request:44073|pull_request:44101", + "pull_request:44018|pull_request:44059", + "pull_request:44056|pull_request:44101", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:43997|pull_request:44017", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101" + ], + "prepared_review_unit_hash": "46eec002f913899a4b70e884219c51975a8aaf8827132ae7314272fb1244b899", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 16, + "item_count": 16, + "soft_pair_count": 24, + "serialized_chars": 38236, + "estimated_input_tokens": 9559, + "estimated_eval_tokens": 19374 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly separate model-specific refactors to standardized output tracing/capture_outputs, not duplicates of one another. The only real commonality is the refactor pattern; the concrete codepaths and model files differ, so all soft-edge candidate merges should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR is the same underlying change as the others; they are distinct per-model refactors. If one must be chosen as the representative PR, 44161 is the clearest because it explicitly targets the shared output-tracing migration and is the most broadly framed of the set.", + "best_issue_reason": null, + "best_pr_reason": "44161 is the best representative PR for the cluster because it explicitly references the shared decorator-based output-tracing migration, has the broadest refactor scope, and aligns with the common theme across the other PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44068", + "right": "pull_request:44086", + "accept": false, + "reason": "Different models and codepaths (GPT-Neo vs MGP-STR); same refactor style only, not the same change." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-2 vs MGP-STR are unrelated model implementations; both mention output tracing but fix different forward paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44722", + "accept": false, + "reason": "Both touch output tracing, but one is GPT-2 and the other is GPT-J/CodeGen; separate model-specific refactors." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J and MGP-STR are different architectures with different forward logic; not the same underlying bug or change." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT vs Flaubert/XLM are different model families; these are parallel refactors, not duplicates." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are distinct codepaths; similar standardized-output cleanup, but not one mergeable duplicate PR." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44722", + "accept": false, + "reason": "SegFormer output tracing refactor is unrelated to GPT-J/CodeGen implementation details." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44161", + "accept": false, + "reason": "SegFormer and LongT5 are different models; same infrastructure pattern, different concrete changes." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44073", + "accept": false, + "reason": "SegFormer vs VisualBert targets different forward/capture paths; not the same underlying fix." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are separate model implementations; the refactors are analogous but not duplicates." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44086", + "accept": false, + "reason": "MPT and MGP-STR touch different model-specific output handling; no shared concrete bug." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-2 and MPT are different models; only the refactor theme matches." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44073", + "accept": false, + "reason": "RegNet and VisualBert have different output structures and files; these are separate standardized-output ports." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44071", + "accept": false, + "reason": "SegFormer and MPT refactors affect different model internals and output capture paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-2 vs GPT-Neo are distinct model files and forward implementations; not mergeable as one PR." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44101", + "accept": false, + "reason": "VisualBert and Flaubert/XLM are unrelated model families; same pattern, different changes." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44059", + "accept": false, + "reason": "GPT-Neo and GPT-2 are different model codepaths; these are parallel refactors, not duplicates." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44101", + "accept": false, + "reason": "MPNet and Flaubert/XLM are different model implementations; no evidence of the same bug or patch." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert and LongT5 are unrelated model-specific output-tracing refactors." + }, + { + "left": "pull_request:44101", + "right": "pull_request:44161", + "accept": false, + "reason": "Flaubert/XLM and LongT5 are different model families; same standardized-output theme only." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44017", + "accept": false, + "reason": "RegNet and SegFormer are separate vision model implementations with different output-capture behavior." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet and MPNet are unrelated models; these are not the same underlying change." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44076", + "accept": false, + "reason": "MPT and ImageGPT are different model files and output paths; similar refactor pattern only." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet and Flaubert/XLM are different architectures; no shared concrete fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 3, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44017", + "pull_request:44018", + "pull_request:44044", + "pull_request:44056", + "pull_request:44066", + "pull_request:44071", + "pull_request:44073", + "pull_request:44074", + "pull_request:44085", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44018", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154" + ], + "prepared_review_unit_hash": "0bd87223b3bd5fc5bafd82823b0d52d503a0971cfa1ba04def4f32e58cd949e0", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 23, + "serialized_chars": 40366, + "estimated_input_tokens": 10092, + "estimated_eval_tokens": 20440 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of output-tracing refactor PRs across different model families. Most are related by the same broader initiative, but they are not duplicates because they change different model code paths. The only clear duplicate-like overlap is among the GPT-J/CodeGen PRs.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44066 is the best representative: it directly targets #43979, covers the central GPT-J/CodeGen output-tracing refactor, and appears to be the earliest complete version in this set.", + "best_issue_reason": null, + "best_pr_reason": "44066 is the strongest global PR choice because it is the most central and complete match to the cluster\u2019s underlying refactor theme.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "Both are output-tracing refactors, but for different models (MPNet vs LongT5) and different code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet and GPT-Neo are unrelated model implementations; same broad refactor theme is not enough." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "Different model families and different forward paths; not the same underlying change." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet and LongT5 are separate model refactors, not one shared concrete bug fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44071", + "accept": false, + "reason": "RegNet and MPT are different code paths; only the output-tracing theme overlaps." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBert and MPT are unrelated model-specific changes." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "SqueezeBert and VisualBert refactors are not the same underlying change." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-Neo and SpeechT5 touch different architectures and output code paths." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44074", + "accept": false, + "reason": "RegNet and TextNet are distinct model-specific output refactors." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44129", + "accept": false, + "reason": "Different models and different implementation details; not mergeable as one fix." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44018", + "accept": false, + "reason": "Different models and separate decorator migrations; not the same concrete bug." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44129", + "accept": false, + "reason": "MPT and SpeechT5 are unrelated changes despite the common refactor pattern." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44161", + "accept": false, + "reason": "CVT/FNet versus LongT5 are different model code paths, so this is not a duplicate." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "CVT/FNet and SegFormer are separate refactors across different model families." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44044", + "accept": false, + "reason": "SqueezeBert and DeBERTa-v2 are unrelated model-specific output changes." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44074", + "accept": false, + "reason": "DeBERTa-v2 and TextNet do not share the same concrete implementation change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44722", + "accept": true, + "reason": "Both target the GPT-J/CodeGen output-tracing refactor for #43979 and touch the same code paths." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44722", + "accept": true, + "reason": "Both are GPT-J output-tracing refactors on the same underlying code path, so they are plausible duplicates/merge candidates." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-J and SpeechT5 are different architectures and unrelated fixes." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44007", + "accept": false, + "reason": "CVT/FNet versus RegNet/ResNet/RT-DETR are different model families and separate refactors." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "Different model families and different output paths; not the same change." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44129", + "accept": false, + "reason": "DeBERTa-v2 and SpeechT5 do not share the same concrete bug or patch." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44154", + "accept": false, + "reason": "DeBERTa-v2 and VITS are separate model-specific changes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 4, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44024", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44024|pull_request:44076", + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "5ea2a279e80ab312807e208b5c76a3c0dfba1175efe7b4d5cd80e05cc01ba1df", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84507, + "estimated_input_tokens": 21127, + "estimated_eval_tokens": 42510 + }, + "budget": { + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17126, + "estimated_input_tokens": 4282, + "estimated_eval_tokens": 8820 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a set of model-specific refactors to standardized output tracing/capture_outputs across unrelated architectures, not duplicate fixes for the same underlying code path.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44073 is the clearest representative of the cluster: its title matches the migration theme, and the diff shows concrete output-tracing refactoring with capture_outputs/can_return_tuple plus model-specific hook mapping.", + "best_issue_reason": null, + "best_pr_reason": "PR 44073 is the best overall representative because it is clearly scoped, self-explanatory, and directly implements the standardized output-tracing migration in a concrete model file.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Different models (FocalNet vs ImageGPT) and separate model-specific output-capture migrations; same umbrella pattern, but not the same concrete change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "VisualBert and TextNet are different architectures with distinct implementation details; both refactor output tracing, but they are not the same code-path fix." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "MPNet and VisualBert each add their own output-recording hooks and decorator usage; similar refactor theme, but no shared concrete bug or mergeable single PR." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "MPNet vs TextNet are unrelated model implementations; the diffs are separate output-tracing refactors, not duplicates." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert touch different model families and different output-handling code paths; same migration style, not the same change." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "SqueezeBert and SpeechT5 are unrelated model code paths; despite both involving output tracing, they are distinct refactors and should not be merged as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44003", + "pull_request:44001|pull_request:44003", + "pull_request:44002|pull_request:44003", + "pull_request:44000|pull_request:44003", + "pull_request:44003|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:43998|pull_request:44001", + "pull_request:44026|pull_request:44029", + "pull_request:44029|pull_request:44030", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44027|pull_request:44028", + "pull_request:44001|pull_request:44002", + "pull_request:44000|pull_request:44001" + ], + "prepared_review_unit_hash": "6b42c5fd153c729b9730320dec6b8ac7cf0f10ebf71e14d4195ba269a94e2eee", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 42, + "serialized_chars": 40936, + "estimated_input_tokens": 10234, + "estimated_eval_tokens": 20724 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 24, + "serialized_chars": 32101, + "estimated_input_tokens": 8026, + "estimated_eval_tokens": 16308 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a set of similarly named but separate PRs that all refactor output tracing for different model families. They share a theme, but they are not the same underlying change and should not be deduplicated together.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "No issue is present; as a representative PR, #44003 is the clearest anchor because it shows the common output-tracing refactor pattern across a related model family and has a broader, multi-file change than most others.", + "best_issue_reason": null, + "best_pr_reason": "#44003 is the best single PR representative for the cluster theme, but it is still only one model-family refactor rather than a superset of the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "Both are output-tracing refactors, but they touch different model code paths (`timm_backbone` vs `falcon_mamba`/`mamba`) and are not a single mergeable change." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "Same broad refactor theme, but `univnet` and `mamba` are unrelated implementations with different forward signatures and helper usage." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "`upernet` and `mamba` are different architectures; the similarity is only in the output-tracing refactor pattern, not the concrete bug/change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "These modify different model families (`vision_text_dual_encoder` vs `mamba`) and do not appear to fix the same code-path problem." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "`mamba`/`falcon_mamba` output tracing and `codegen` cache/output handling are distinct implementations, so this is not one combined change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44004", + "accept": false, + "reason": "Both are refactors around model outputs, but they affect unrelated code paths (`vision_text_dual_encoder` vs `codegen`) and should stay separate." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "Different model families (`mobilenet_v1` vs `univnet`) and different output-capture mechanics; not the same underlying fix." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "`mobilenet_v1` and `upernet` are separate models with only a shared refactor style, not a shared concrete bug." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "These are separate output-tracing updates in different models (`mobilenet_v1` vs `depth_anything`/`prompt_depth_anything`), not one PR-sized change." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "`mobilenet_v1` and `vision_encoder_decoder` are unrelated code paths; the overlap is only in the refactoring theme." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "`mobilenet_v1` and `speech_encoder_decoder` are distinct model implementations, so they are not duplicates." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "`timm_backbone` and `upernet` both live in vision, but the patch targets different classes and forward/output plumbing, so they are not the same change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "`timm_backbone` and `vision_text_dual_encoder` have different model contracts and output paths; this is only a superficial similarity." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "These are separate model refactors (`timm_backbone` vs `univnet`) with no shared concrete code-path bug." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "`vision_encoder_decoder` and `rwkv` are unrelated architectures; the PRs do not look mergeable into one concrete fix." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "`rwkv` and `dpr` change different model internals and output signatures, so they are not the same underlying bug/change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "Both are output-tracing refactors, but they target different encoder/decoder and depth models; the code paths are separate." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "`depth_anything` and `speech_encoder_decoder` are unrelated implementations, so this is not a duplicate change." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "`depth_anything` and `superpoint` both update output handling, but they are distinct model families and not one underlying fix." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "`vision_encoder_decoder` and `speech_encoder_decoder` are separate encoder-decoder stacks; same theme, different code paths." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "`vision_encoder_decoder` and `superpoint` are unrelated models with different forward logic, so they should not be merged as duplicates." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "`speech_encoder_decoder` and `superpoint` are different architectures with no shared concrete patch target." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "`univnet` and `upernet` share only the refactor motif; their model internals and output contracts are different." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "`vision_text_dual_encoder` and `univnet` are unrelated implementations and not the same bug fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:44000|pull_request:44002", + "pull_request:43998|pull_request:44004", + "pull_request:43998|pull_request:44029", + "pull_request:44002|pull_request:44029", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44001|pull_request:44025", + "pull_request:44001|pull_request:44026", + "pull_request:44002|pull_request:44025", + "pull_request:44000|pull_request:44025", + "pull_request:44000|pull_request:44026" + ], + "prepared_review_unit_hash": "d6e4727c76c17e9ac429ab7a9b4fed50a1888231983726aff88d4426ab4aba22", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 42, + "serialized_chars": 40936, + "estimated_input_tokens": 10234, + "estimated_eval_tokens": 20724 + }, + "budget": { + "node_count": 11, + "item_count": 11, + "soft_pair_count": 18, + "serialized_chars": 26343, + "estimated_input_tokens": 6586, + "estimated_eval_tokens": 13428 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are separate model-specific output-tracing refactors with similar wording, but they do not appear to be duplicate fixes or mergeable into one PR. The shared theme is broad; the concrete code paths and files differ across models.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR is a true duplicate target for the cluster: each PR updates a different model implementation and forward signature. If one must be chosen as representative, PR 44025 is the broadest example because it spans two related model files, but it is still not a canonical duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 44025 is the best representative PR in this set because it is a clean, self-contained output-tracing refactor affecting two closely related files. That said, the cluster is not a duplicate set, so this is only a representative choice, not a merge target.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Same refactor theme, but different model families and different forward paths/files; not the same underlying change." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "Both touch output-tracing logic, but they are unrelated model implementations with different APIs and code paths." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "Different models and different implementation details; shared wording is too broad to treat as one change." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "Different subsystems and files; this is a similar refactor pattern, not the same bug or fix." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "Distinct models and forward signatures; they do not fix the same concrete code-path problem." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "Only the high-level refactor theme matches; the actual code paths and model targets differ." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "Unrelated models and files; no evidence they address the same underlying issue." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "Different model implementations with different forward behavior; not plausibly one merged PR." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "Different encoder-decoder and DPR code paths; this is not the same concrete fix." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model families and different output-tracing changes; too broad to merge as duplicates." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "Shared refactor wording only; the affected code and behavior are different." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "Different model targets and implementation changes; not the same underlying change." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "These are separate model-specific refactors, not duplicate fixes." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "Different architectures and files; the overlap is only the output-tracing refactor pattern." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44026", + "accept": false, + "reason": "No shared concrete bug or fix path; just similar API cleanup across different models." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model code paths and outputs; not the same change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "Different model families and different forward implementations; too dissimilar for duplicate merging." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44026", + "accept": false, + "reason": "Same style of refactor, but unrelated model targets and behavior." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "ab317a3058e004cc23c01a74311c78198d533eebb7988efc3933cd0e8336c847", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11073, + "estimated_input_tokens": 2769, + "estimated_eval_tokens": 5794 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11073, + "estimated_input_tokens": 2769, + "estimated_eval_tokens": 5794 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three PRs are separate model-integrations with shared Hugging Face boilerplate, not duplicate bugfixes or the same concrete change. No pair should be merged as a duplicate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45075 is the strongest standalone representative: it is a complete model add, non-draft, and has the most review activity among the three, but it is not a duplicate of the others.", + "best_issue_reason": null, + "best_pr_reason": "PR #45075 is the best representative PR in this cluster because it is the most mature and fully-scoped integration, with broader implementation coverage and more review feedback.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model integrations (PenguinVL vs Voxtral TTS). They share only generic auto-registration/docs scaffolding, not the same code-path or feature." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "Different model integrations (PenguinVL vs DeepSeek-OCR-2). Overlap is limited to standard model boilerplate and registry updates, so they are not the same change." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "Different models and different implementation targets (DeepSeek-OCR-2 vs Voxtral TTS). Shared framework files are expected for new model additions, but the concrete changes are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "247a8b461d08abde754a61f6383060cbf593f427f418fd093c4a7f6b5ac240a0", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4408, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4408, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These PRs are related to module/kernel fusion, but they are not the same concrete change: #44979 adds a new module fusion API and tests, while #45363 wires n-to-1 kernel fusion through `KernelConfig` and related integration points. They could land independently and are not a mergeable duplicate pair.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "#45363 is the more complete user-facing fusion feature, spanning `KernelConfig`, integration hooks, and an example; #44979 is a lower-level API foundation rather than the same end-to-end change.", + "best_issue_reason": null, + "best_pr_reason": "#45363 is the best representative of the cluster because it shows the concrete fusion workflow and broader integration surface, whereas #44979 is just the underlying API scaffold.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "Different layers of the fusion stack: one introduces the module fusion API, the other integrates n-to-1 kernel fusion via `KernelConfig`. Related, but not the same bug/fix or a single plausible merged PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "7d7c419fbad4f8ceeeae1af97c511ade8eb19d28975c021389c0640e201e0456", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are near-identical fixes for the same Mistral regex AttributeError in `tokenization_utils_tokenizers.py`, both targeting issue #45081. They patch the same code path and differ mainly in added tests and PR polish, so they should be clustered together.", + "confidence": 0.99, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45317 is the better representative: it contains the same core fix as #45086 plus test coverage and more review activity, making it the more complete implementation of the same bug fix.", + "best_issue_reason": null, + "best_pr_reason": "PR #45317 is the strongest cluster member for the underlying change because it implements the same fix with additional validation and appears to be the more mature version of the patch.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Same bug, same file, same issue target, and effectively the same code change fixing `_patch_mistral_regex`; the added tests in #45317 do not change the underlying fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "3e4e003158cf9f75f31d454f0830428b3e67187aa75e017ffc7d3f0ae0bc9724", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5398, + "estimated_input_tokens": 1350, + "estimated_eval_tokens": 2956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two related open PRs for heterogeneous model support, but they split different layers of the feature: one adds per-layer config infrastructure, the other adds broader modeling/cache/masking support. They share files and theme, but they do not look like the same concrete change.", + "confidence": 0.91, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45332 is the better canonical representative because it is the broader feature PR and appears to encompass the downstream modeling work, while 45333 is a narrower config-only slice.", + "best_issue_reason": null, + "best_pr_reason": "PR 45332 is the strongest single representative of the cluster: it includes the most complete implementation surface (config, cache, masking, modeling, tests) and is closer to the overall heterogeneous support effort.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "Related feature work, but not the same underlying change. 45333 adds per-layer heterogeneous config support; 45332 adds heterogeneous model/runtime support and only partially overlaps via shared config infrastructure. They could be separate PRs and do not look merge-equivalent." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "6e4909aaaea00bafd711cf7883af51cd69ffd6dcfad919d7d6544a45101d8fa7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5304, + "estimated_input_tokens": 1326, + "estimated_eval_tokens": 2908 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5304, + "estimated_input_tokens": 1326, + "estimated_eval_tokens": 2908 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are typing-related pull requests, but they address different changes: PR 45415 is a broad repo-wide type-checking sweep plus pyproject ignore tuning, while PR 45425 adds specific typing protocols/aliases to support modeling_utils and PEFT integration. They are related in theme, not the same underlying change.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45425 is the more self-contained and coherent typing change, centered on `_typing.py` and specific downstream annotations/support. PR 45415 is a much broader type-checking sweep across many files plus lint config changes, so it is less clean as a canonical representative.", + "best_issue_reason": null, + "best_pr_reason": "PR 45425 best represents the cluster because it has a focused code change with a clear purpose, whereas PR 45415 is a large cross-cutting typing cleanup that is not the same concrete fix/change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "They are not the same underlying bug or change. 45415 is a broad typing/configuration sweep across many files, while 45425 adds specific typing abstractions for modeling_utils/PEFT; they would not plausibly be merged into one PR as a single fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45470-2", + "nodes": [ + "pull_request:45470", + "pull_request:45487" + ], + "soft_pairs": [ + "pull_request:45470|pull_request:45487" + ], + "prepared_review_unit_hash": "143e61e685e46b05b7f94a899a20fadbe95580e2e71f5c82773eae430c6827eb", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4769, + "estimated_input_tokens": 1193, + "estimated_eval_tokens": 2642 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4769, + "estimated_input_tokens": 1193, + "estimated_eval_tokens": 2642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs are unrelated: one is a test skip for Sam3 Lite Text flash-attn dispatch behavior, while the other fixes model-parallel/device handling in AltCLIP/ChineseClip and related text models.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45487 is the better canonical PR for the cluster because it contains the substantive code fix and a clear bug target (model parallel/device placement) across implementation files, whereas #45470 is only a test skip.", + "best_issue_reason": null, + "best_pr_reason": "PR #45487 is the strongest representative because it addresses a concrete runtime bug with actual model code changes; #45470 is a narrow test adjustment and not a comparable fix.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45470", + "right": "pull_request:45487", + "accept": false, + "reason": "Different underlying problems and code paths: #45470 skips a flash-attn test for Sam3 Lite Text, while #45487 fixes token-type/device handling for AltCLIP/ChineseClip model parallelism." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +} diff --git a/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/manifest.json b/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..943b6a9f0eb880769d4887af761bb1d6e583c250 --- /dev/null +++ b/snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/manifest.json @@ -0,0 +1,14 @@ +{ + "analysis_id": "hybrid-gpt54mini-20260418t150720z", + "artifacts": { + "hybrid": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "model": null, + "published_at": "2026-04-18T15:42:46Z", + "repo": "huggingface/transformers", + "schema_version": 1, + "snapshot_id": "20260418T150536Z", + "variant": "hybrid" +} diff --git a/snapshots/20260418T150536Z/manifest.json b/snapshots/20260418T150536Z/manifest.json index 8fff0074e390631a111e443f6f393e71c2c5431a..1d869db160086a175772e470fae651c1510f92d1 100644 --- a/snapshots/20260418T150536Z/manifest.json +++ b/snapshots/20260418T150536Z/manifest.json @@ -30,6 +30,24 @@ "timeline_events": 43 }, "extracted_at": "2026-04-18T15:05:36Z", + "published_analysis": { + "canonical_analysis_id": "hybrid-gpt54mini-20260418t150720z", + "runs": { + "hybrid-gpt54mini-20260418t150720z": { + "analysis_id": "hybrid-gpt54mini-20260418t150720z", + "artifacts": { + "hybrid": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.json", + "hybrid_reviews": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/analysis-report-hybrid.llm-reviews.json" + }, + "channel": "canonical", + "manifest_path": "snapshots/20260418T150536Z/analysis-runs/hybrid-gpt54mini-20260418t150720z/manifest.json", + "model": null, + "published_at": "2026-04-18T15:42:46Z", + "variant": "hybrid" + } + }, + "schema_version": 1 + }, "repo": "huggingface/transformers", "snapshot_id": "20260418T150536Z", "watermark": {