| { | |
| "version": 2, | |
| "weight_format": "mxtq", | |
| "profile": "JANGTQ", | |
| "source_model": { | |
| "name": "MiniMax-M2.7", | |
| "org": "MiniMaxAI", | |
| "architecture": "minimax_m2" | |
| }, | |
| "mxtq_seed": 42, | |
| "mxtq_bits": { | |
| "attention": 8, | |
| "shared_expert": 8, | |
| "routed_expert": 2, | |
| "embed_tokens": 8, | |
| "lm_head": 8 | |
| }, | |
| "quantization": { | |
| "method": "affine+mxtq", | |
| "group_size": 64, | |
| "bits_default": 2 | |
| }, | |
| "capabilities": { | |
| "reasoning_parser": "qwen3", | |
| "tool_parser": "minimax", | |
| "think_in_template": true, | |
| "supports_tools": true, | |
| "supports_thinking": true, | |
| "family": "minimax_m2", | |
| "modality": "text", | |
| "cache_type": "kv" | |
| } | |
| } | |