AEON-7 commited on
Commit
ed26986
·
verified ·
1 Parent(s): dc069cd

NVFP4 quantization via ModelOpt AWQ Full (block_size=16, 4096 calib samples x 4096 tokens)

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Jiunsong/supergemma4-26b-abliterated-multimodal
3
+ tags:
4
+ - gemma4
5
+ - nvfp4
6
+ - modelopt
7
+ - awq
8
+ - quantized
9
+ - blackwell
10
+ library_name: transformers
11
+ pipeline_tag: image-text-to-text
12
+ license: gemma
13
+ ---
14
+
15
+ # SuperGemma4-26B-Abliterated-Multimodal — NVFP4 (ModelOpt AWQ Full)
16
+
17
+ NVFP4 quantization of [Jiunsong/supergemma4-26b-abliterated-multimodal](https://huggingface.co/Jiunsong/supergemma4-26b-abliterated-multimodal) optimized for NVIDIA Blackwell GPUs (DGX Spark, RTX Pro 6000, B200, GB200).
18
+
19
+ ## Quantization Details
20
+
21
+ | Parameter | Value |
22
+ |---|---|
23
+ | Tool | NVIDIA ModelOpt 0.42.0 |
24
+ | Method | NVFP4 AWQ Full (activation-aware weight quantization) |
25
+ | Weight dtype | NVFP4 (4-bit floating point, block size 16) |
26
+ | Calibration | 4096 samples × 4096 tokens from CNN/DailyMail, batch size 16 |
27
+ | Excluded modules | `vision_tower`, `embed_vision` (kept in BF16) |
28
+
29
+ ## Usage with vLLM (Blackwell)
30
+
31
+ ```bash
32
+ export VLLM_USE_FLASHINFER_MOE_FP4=1
33
+
34
+ vllm serve AEON-7/supergemma4-26b-abliterated-multimodal-nvfp4 \
35
+ --quantization modelopt \
36
+ --kv-cache-dtype fp8_e4m3 \
37
+ --calculate-kv-scales \
38
+ --enable-prefix-caching \
39
+ --enable-chunked-prefill \
40
+ --async-scheduling \
41
+ --gpu-memory-utilization 0.90 \
42
+ --max-model-len 262144 \
43
+ --max-num-seqs 6 \
44
+ --max-num-batched-tokens 16384 \
45
+ --trust-remote-code
46
+ ```
47
+
48
+ ## Hardware Requirements
49
+
50
+ - **Inference**: Blackwell GPU required (sm_100+)
51
+ - **Target deployment**: NVIDIA DGX Spark (128GB unified memory)
52
+ - **Model size**: ~13 GB (NVFP4) + ~1.2 GB (vision in BF16)
config.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma4ForConditionalGeneration"
4
+ ],
5
+ "audio_config": null,
6
+ "audio_token_id": 258881,
7
+ "boa_token_id": 256000,
8
+ "boi_token_id": 255999,
9
+ "dtype": "bfloat16",
10
+ "eoa_token_id": 258883,
11
+ "eoa_token_index": 258883,
12
+ "eoi_token_id": 258882,
13
+ "eos_token_id": [
14
+ 1,
15
+ 106,
16
+ 50
17
+ ],
18
+ "image_token_id": 258880,
19
+ "initializer_range": 0.02,
20
+ "model_type": "gemma4",
21
+ "text_config": {
22
+ "attention_bias": false,
23
+ "attention_dropout": 0.0,
24
+ "attention_k_eq_v": true,
25
+ "bos_token_id": 2,
26
+ "dtype": "bfloat16",
27
+ "enable_moe_block": true,
28
+ "eos_token_id": 1,
29
+ "final_logit_softcapping": 30.0,
30
+ "global_head_dim": 512,
31
+ "head_dim": 256,
32
+ "hidden_activation": "gelu_pytorch_tanh",
33
+ "hidden_size": 2816,
34
+ "hidden_size_per_layer_input": 0,
35
+ "initializer_range": 0.02,
36
+ "intermediate_size": 2112,
37
+ "layer_types": [
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "sliding_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "full_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "full_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "sliding_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "full_attention",
56
+ "sliding_attention",
57
+ "sliding_attention",
58
+ "sliding_attention",
59
+ "sliding_attention",
60
+ "sliding_attention",
61
+ "full_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "sliding_attention",
65
+ "sliding_attention",
66
+ "sliding_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 262144,
70
+ "model_type": "gemma4_text",
71
+ "moe_intermediate_size": 704,
72
+ "num_attention_heads": 16,
73
+ "num_experts": 128,
74
+ "num_global_key_value_heads": 2,
75
+ "num_hidden_layers": 30,
76
+ "num_key_value_heads": 8,
77
+ "num_kv_shared_layers": 0,
78
+ "pad_token_id": 0,
79
+ "rms_norm_eps": 1e-06,
80
+ "rope_parameters": {
81
+ "full_attention": {
82
+ "partial_rotary_factor": 0.25,
83
+ "rope_theta": 1000000.0,
84
+ "rope_type": "proportional"
85
+ },
86
+ "sliding_attention": {
87
+ "rope_theta": 10000.0,
88
+ "rope_type": "default"
89
+ }
90
+ },
91
+ "sliding_window": 1024,
92
+ "tie_word_embeddings": true,
93
+ "top_k_experts": 8,
94
+ "use_bidirectional_attention": "vision",
95
+ "use_cache": true,
96
+ "use_double_wide_mlp": false,
97
+ "vocab_size": 262144,
98
+ "vocab_size_per_layer_input": 262144
99
+ },
100
+ "tie_word_embeddings": true,
101
+ "transformers_version": "5.5.4",
102
+ "video_token_id": 258884,
103
+ "vision_config": {
104
+ "_name_or_path": "",
105
+ "architectures": null,
106
+ "attention_bias": false,
107
+ "attention_dropout": 0.0,
108
+ "chunk_size_feed_forward": 0,
109
+ "default_output_length": 280,
110
+ "dtype": "bfloat16",
111
+ "global_head_dim": 72,
112
+ "head_dim": 72,
113
+ "hidden_activation": "gelu_pytorch_tanh",
114
+ "hidden_size": 1152,
115
+ "id2label": {
116
+ "0": "LABEL_0",
117
+ "1": "LABEL_1"
118
+ },
119
+ "initializer_range": 0.02,
120
+ "intermediate_size": 4304,
121
+ "is_encoder_decoder": false,
122
+ "label2id": {
123
+ "LABEL_0": 0,
124
+ "LABEL_1": 1
125
+ },
126
+ "max_position_embeddings": 131072,
127
+ "model_type": "gemma4_vision",
128
+ "num_attention_heads": 16,
129
+ "num_hidden_layers": 27,
130
+ "num_key_value_heads": 16,
131
+ "output_attentions": false,
132
+ "output_hidden_states": false,
133
+ "patch_size": 16,
134
+ "pooling_kernel_size": 3,
135
+ "position_embedding_size": 10240,
136
+ "problem_type": null,
137
+ "return_dict": true,
138
+ "rms_norm_eps": 1e-06,
139
+ "rope_parameters": {
140
+ "rope_theta": 100.0,
141
+ "rope_type": "default"
142
+ },
143
+ "standardize": true,
144
+ "use_clipped_linears": false
145
+ },
146
+ "vision_soft_tokens_per_image": 280,
147
+ "quantization_config": {
148
+ "ignore": [
149
+ "lm_head",
150
+ "model.embed_vision*",
151
+ "model.language_model.layers.0.router*",
152
+ "model.language_model.layers.1.router*",
153
+ "model.language_model.layers.10.router*",
154
+ "model.language_model.layers.11.router*",
155
+ "model.language_model.layers.12.router*",
156
+ "model.language_model.layers.13.router*",
157
+ "model.language_model.layers.14.router*",
158
+ "model.language_model.layers.15.router*",
159
+ "model.language_model.layers.16.router*",
160
+ "model.language_model.layers.17.router*",
161
+ "model.language_model.layers.18.router*",
162
+ "model.language_model.layers.19.router*",
163
+ "model.language_model.layers.2.router*",
164
+ "model.language_model.layers.20.router*",
165
+ "model.language_model.layers.21.router*",
166
+ "model.language_model.layers.22.router*",
167
+ "model.language_model.layers.23.router*",
168
+ "model.language_model.layers.24.router*",
169
+ "model.language_model.layers.25.router*",
170
+ "model.language_model.layers.26.router*",
171
+ "model.language_model.layers.27.router*",
172
+ "model.language_model.layers.28.router*",
173
+ "model.language_model.layers.29.router*",
174
+ "model.language_model.layers.3.router*",
175
+ "model.language_model.layers.4.router*",
176
+ "model.language_model.layers.5.router*",
177
+ "model.language_model.layers.6.router*",
178
+ "model.language_model.layers.7.router*",
179
+ "model.language_model.layers.8.router*",
180
+ "model.language_model.layers.9.router*",
181
+ "model.vision_tower*"
182
+ ],
183
+ "quant_algo": "NVFP4_AWQ",
184
+ "producer": {
185
+ "name": "modelopt",
186
+ "version": "0.42.0"
187
+ },
188
+ "quant_method": "modelopt"
189
+ }
190
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 1,
6
+ 106,
7
+ 50
8
+ ],
9
+ "pad_token_id": 0,
10
+ "temperature": 1.0,
11
+ "top_k": 64,
12
+ "top_p": 0.95,
13
+ "transformers_version": "5.5.4"
14
+ }
hf_quant_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "producer": {
3
+ "name": "modelopt",
4
+ "version": "0.42.0"
5
+ },
6
+ "quantization": {
7
+ "quant_algo": "NVFP4_AWQ",
8
+ "kv_cache_quant_algo": null,
9
+ "group_size": 16,
10
+ "has_zero_point": false,
11
+ "pre_quant_scale": true,
12
+ "exclude_modules": [
13
+ "lm_head",
14
+ "model.embed_vision*",
15
+ "model.language_model.layers.0.router*",
16
+ "model.language_model.layers.1.router*",
17
+ "model.language_model.layers.10.router*",
18
+ "model.language_model.layers.11.router*",
19
+ "model.language_model.layers.12.router*",
20
+ "model.language_model.layers.13.router*",
21
+ "model.language_model.layers.14.router*",
22
+ "model.language_model.layers.15.router*",
23
+ "model.language_model.layers.16.router*",
24
+ "model.language_model.layers.17.router*",
25
+ "model.language_model.layers.18.router*",
26
+ "model.language_model.layers.19.router*",
27
+ "model.language_model.layers.2.router*",
28
+ "model.language_model.layers.20.router*",
29
+ "model.language_model.layers.21.router*",
30
+ "model.language_model.layers.22.router*",
31
+ "model.language_model.layers.23.router*",
32
+ "model.language_model.layers.24.router*",
33
+ "model.language_model.layers.25.router*",
34
+ "model.language_model.layers.26.router*",
35
+ "model.language_model.layers.27.router*",
36
+ "model.language_model.layers.28.router*",
37
+ "model.language_model.layers.29.router*",
38
+ "model.language_model.layers.3.router*",
39
+ "model.language_model.layers.4.router*",
40
+ "model.language_model.layers.5.router*",
41
+ "model.language_model.layers.6.router*",
42
+ "model.language_model.layers.7.router*",
43
+ "model.language_model.layers.8.router*",
44
+ "model.language_model.layers.9.router*",
45
+ "model.vision_tower*"
46
+ ]
47
+ }
48
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efcae77013d20e079cfb34525b4b2ffe3edee502cdb54862933aace4df57b402
3
+ size 49247181380
processor_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_seq_length": 750,
3
+ "image_processor": {
4
+ "do_convert_rgb": true,
5
+ "do_normalize": false,
6
+ "do_rescale": true,
7
+ "do_resize": true,
8
+ "image_mean": [
9
+ 0.0,
10
+ 0.0,
11
+ 0.0
12
+ ],
13
+ "image_processor_type": "Gemma4ImageProcessor",
14
+ "image_seq_length": 280,
15
+ "image_std": [
16
+ 1.0,
17
+ 1.0,
18
+ 1.0
19
+ ],
20
+ "max_soft_tokens": 280,
21
+ "patch_size": 16,
22
+ "pooling_kernel_size": 3,
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "height": 224,
27
+ "width": 224
28
+ }
29
+ },
30
+ "image_seq_length": 280,
31
+ "processor_class": "Gemma4Processor",
32
+ "feature_extractor": {
33
+ "feature_extractor_type": "Gemma4AudioFeatureExtractor",
34
+ "sampling_rate": 16000,
35
+ "num_mel_filters": 128,
36
+ "fft_length": 512,
37
+ "hop_length": 160,
38
+ "chunk_duration": 8.0,
39
+ "overlap_duration": 1.0
40
+ },
41
+ "audio_ms_per_token": 40
42
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc8d3a0ce36466ccc1278bf987df5f71db1719b9ca6b4118264f45cb627bfe0f
3
+ size 32169626
tokenizer_config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<|audio|>",
3
+ "backend": "tokenizers",
4
+ "boa_token": "<|audio>",
5
+ "boi_token": "<|image>",
6
+ "bos_token": "<bos>",
7
+ "eoa_token": "<audio|>",
8
+ "eoc_token": "<channel|>",
9
+ "eoi_token": "<image|>",
10
+ "eos_token": "<eos>",
11
+ "eot_token": "<turn|>",
12
+ "escape_token": "<|\"|>",
13
+ "etc_token": "<tool_call|>",
14
+ "etd_token": "<tool|>",
15
+ "etr_token": "<tool_response|>",
16
+ "extra_special_tokens": [
17
+ "<|video|>"
18
+ ],
19
+ "image_token": "<|image|>",
20
+ "is_local": true,
21
+ "mask_token": "<mask>",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "model_specific_special_tokens": {
24
+ "audio_token": "<|audio|>",
25
+ "boa_token": "<|audio>",
26
+ "boi_token": "<|image>",
27
+ "eoa_token": "<audio|>",
28
+ "eoc_token": "<channel|>",
29
+ "eoi_token": "<image|>",
30
+ "eot_token": "<turn|>",
31
+ "escape_token": "<|\"|>",
32
+ "etc_token": "<tool_call|>",
33
+ "etd_token": "<tool|>",
34
+ "etr_token": "<tool_response|>",
35
+ "image_token": "<|image|>",
36
+ "soc_token": "<|channel>",
37
+ "sot_token": "<|turn>",
38
+ "stc_token": "<|tool_call>",
39
+ "std_token": "<|tool>",
40
+ "str_token": "<|tool_response>",
41
+ "think_token": "<|think|>"
42
+ },
43
+ "pad_token": "<pad>",
44
+ "padding_side": "left",
45
+ "processor_class": "Gemma4Processor",
46
+ "response_schema": {
47
+ "properties": {
48
+ "content": {
49
+ "type": "string"
50
+ },
51
+ "role": {
52
+ "const": "assistant"
53
+ },
54
+ "thinking": {
55
+ "type": "string"
56
+ },
57
+ "tool_calls": {
58
+ "items": {
59
+ "properties": {
60
+ "function": {
61
+ "properties": {
62
+ "arguments": {
63
+ "additionalProperties": {},
64
+ "type": "object",
65
+ "x-parser": "gemma4-tool-call"
66
+ },
67
+ "name": {
68
+ "type": "string"
69
+ }
70
+ },
71
+ "type": "object",
72
+ "x-regex": "call\\:(?P<name>\\w+)(?P<arguments>\\{.*\\})"
73
+ },
74
+ "type": {
75
+ "const": "function"
76
+ }
77
+ },
78
+ "type": "object"
79
+ },
80
+ "type": "array",
81
+ "x-regex-iterator": "<\\|tool_call>(.*?)<tool_call\\|>"
82
+ }
83
+ },
84
+ "type": "object",
85
+ "x-regex": "(\\<\\|channel\\>thought\\n(?P<thinking>.*?)\\<channel\\|\\>)?(?P<content>(?:(?!\\<\\|tool_call\\>)(?!\\<turn\\|\\>).)+)?(?P<tool_calls>\\<\\|tool_call\\>.*\\<tool_call\\|\\>)?(?:\\<turn\\|\\>)?"
86
+ },
87
+ "soc_token": "<|channel>",
88
+ "sot_token": "<|turn>",
89
+ "stc_token": "<|tool_call>",
90
+ "std_token": "<|tool>",
91
+ "str_token": "<|tool_response>",
92
+ "think_token": "<|think|>",
93
+ "tokenizer_class": "GemmaTokenizer",
94
+ "unk_token": "<unk>",
95
+ "chat_template": "{%- macro format_parameters(properties, required) -%}\n {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}\n {%- set ns = namespace(found_first=false) -%}\n {%- for key, value in properties | dictsort -%}\n {%- set add_comma = false -%}\n {%- if key not in standard_keys -%}\n {%- if ns.found_first %},{% endif -%}\n {%- set ns.found_first = true -%}\n {{ key }}:{\n {%- if value['description'] -%}\n description:<|\"|>{{ value['description'] }}<|\"|>\n {%- set add_comma = true -%}\n {%- endif -%}\n {%- if value['type'] | upper == 'STRING' -%}\n {%- if value['enum'] -%}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n enum:{{ format_argument(value['enum']) }}\n {%- endif -%}\n {%- elif value['type'] | upper == 'ARRAY' -%}\n {%- if value['items'] is mapping and value['items'] -%}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n items:{\n {%- set ns_items = namespace(found_first=false) -%}\n {%- for item_key, item_value in value['items'] | dictsort -%}\n {%- if item_value is not none -%}\n {%- if ns_items.found_first %},{% endif -%}\n {%- set ns_items.found_first = true -%}\n {%- if item_key == 'properties' -%}\n properties:{\n {%- if item_value is mapping -%}\n {{- format_parameters(item_value, value['items']['required'] | default([])) -}}\n {%- endif -%}\n }\n {%- elif item_key == 'required' -%}\n required:[\n {%- for req_item in item_value -%}\n <|\"|>{{- req_item -}}<|\"|>\n {%- if not loop.last %},{% endif -%}\n {%- endfor -%}\n ]\n {%- elif item_key == 'type' -%}\n {%- if item_value is string -%}\n type:{{ format_argument(item_value | upper) }}\n {%- else -%}\n type:{{ format_argument(item_value | map('upper') | list) }}\n {%- endif -%}\n {%- else -%}\n {{ item_key }}:{{ format_argument(item_value) }}\n {%- endif -%}\n {%- endif -%}\n {%- endfor -%}\n }\n {%- endif -%}\n {%- endif -%}\n {%- if value['nullable'] %}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n nullable:true\n {%- endif -%}\n {%- if value['type'] | upper == 'OBJECT' -%}\n {%- if value['properties'] is defined and value['properties'] is mapping -%}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n properties:{\n {{- format_parameters(value['properties'], value['required'] | default([])) -}}\n }\n {%- elif value is mapping -%}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n properties:{\n {{- format_parameters(value, value['required'] | default([])) -}}\n }\n {%- endif -%}\n {%- if value['required'] -%}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n required:[\n {%- for item in value['required'] | default([]) -%}\n <|\"|>{{- item -}}<|\"|>\n {%- if not loop.last %},{% endif -%}\n {%- endfor -%}\n ]\n {%- endif -%}\n {%- endif -%}\n {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}\n type:<|\"|>{{ value['type'] | upper }}<|\"|>}\n {%- endif -%}\n {%- endfor -%}\n{%- endmacro -%}\n{%- macro format_function_declaration(tool_data) -%}\n declaration:{{- tool_data['function']['name'] -}}{description:<|\"|>{{- tool_data['function']['description'] -}}<|\"|>\n {%- set params = tool_data['function']['parameters'] -%}\n {%- if params -%}\n ,parameters:{\n {%- if params['properties'] -%}\n properties:{ {{- format_parameters(params['properties'], params['required']) -}} },\n {%- endif -%}\n {%- if params['required'] -%}\n required:[\n {%- for item in params['required'] -%}\n <|\"|>{{- item -}}<|\"|>\n {{- ',' if not loop.last -}}\n {%- endfor -%}\n ],\n {%- endif -%}\n {%- if params['type'] -%}\n type:<|\"|>{{- params['type'] | upper -}}<|\"|>}\n {%- endif -%}\n {%- endif -%}\n {%- if 'response' in tool_data['function'] -%}\n {%- set response_declaration = tool_data['function']['response'] -%}\n ,response:{\n {%- if response_declaration['description'] -%}\n description:<|\"|>{{- response_declaration['description'] -}}<|\"|>,\n {%- endif -%}\n {%- if response_declaration['type'] | upper == 'OBJECT' -%}\n type:<|\"|>{{- response_declaration['type'] | upper -}}<|\"|>}\n {%- endif -%}\n {%- endif -%}\n }\n{%- endmacro -%}\n{%- macro format_argument(argument, escape_keys=True) -%}\n {%- if argument is string -%}\n {{- '<|\"|>' + argument + '<|\"|>' -}}\n {%- elif argument is boolean -%}\n {{- 'true' if argument else 'false' -}}\n {%- elif argument is mapping -%}\n {{- '{' -}}\n {%- set ns = namespace(found_first=false) -%}\n {%- for key, value in argument | dictsort -%}\n {%- if ns.found_first %},{% endif -%}\n {%- set ns.found_first = true -%}\n {%- if escape_keys -%}\n {{- '<|\"|>' + key + '<|\"|>' -}}\n {%- else -%}\n {{- key -}}\n {%- endif -%}\n :{{- format_argument(value, escape_keys=escape_keys) -}}\n {%- endfor -%}\n {{- '}' -}}\n {%- elif argument is sequence -%}\n {{- '[' -}}\n {%- for item in argument -%}\n {{- format_argument(item, escape_keys=escape_keys) -}}\n {%- if not loop.last %},{% endif -%}\n {%- endfor -%}\n {{- ']' -}}\n {%- else -%}\n {{- argument -}}\n {%- endif -%}\n{%- endmacro -%}\n{%- macro strip_thinking(text) -%}\n {%- set ns = namespace(result='') -%}\n {%- for part in text.split('<channel|>') -%}\n {%- if '<|channel>' in part -%}\n {%- set ns.result = ns.result + part.split('<|channel>')[0] -%}\n {%- else -%}\n {%- set ns.result = ns.result + part -%}\n {%- endif -%}\n {%- endfor -%}\n {{- ns.result | trim -}}\n{%- endmacro -%}\n\n{%- macro format_tool_response_block(tool_name, response) -%}\n {{- '<|tool_response>' -}}\n {%- if response is mapping -%}\n {{- 'response:' + tool_name + '{' -}}\n {%- for key, value in response | dictsort -%}\n {{- key -}}:{{- format_argument(value, escape_keys=False) -}}\n {%- if not loop.last %},{% endif -%}\n {%- endfor -%}\n {{- '}' -}}\n {%- else -%}\n {{- 'response:' + tool_name + '{value:' + format_argument(response, escape_keys=False) + '}' -}}\n {%- endif -%}\n {{- '<tool_response|>' -}}\n{%- endmacro -%}\n\n{%- set ns = namespace(prev_message_type=None) -%}\n{%- set loop_messages = messages -%}\n{{- bos_token -}}\n{#- Handle System/Tool Definitions Block -#}\n{%- if (enable_thinking is defined and enable_thinking) or tools or messages[0]['role'] in ['system', 'developer'] -%}\n {{- '<|turn>system\\n' -}}\n\n {#- Inject Thinking token at the very top of the FIRST system turn -#}\n {%- if enable_thinking is defined and enable_thinking -%}\n {{- '<|think|>\\n' -}}\n {%- set ns.prev_message_type = 'think' -%}\n {%- endif -%}\n\n {%- if messages[0]['role'] in ['system', 'developer'] -%}\n {{- messages[0]['content'] | trim -}}\n {%- set loop_messages = messages[1:] -%}\n {%- endif -%}\n\n {%- if tools -%}\n {%- for tool in tools %}\n {{- '<|tool>' -}}\n {{- format_function_declaration(tool) | trim -}}\n {{- '<tool|>' -}}\n {%- endfor %}\n {%- set ns.prev_message_type = 'tool' -%}\n {%- endif -%}\n\n {{- '<turn|>\\n' -}}\n{%- endif %}\n\n{#- Pre-scan: find last user message index for reasoning guard -#}\n{%- set ns_turn = namespace(last_user_idx=-1) -%}\n{%- for i in range(loop_messages | length) -%}\n {%- if loop_messages[i]['role'] == 'user' -%}\n {%- set ns_turn.last_user_idx = i -%}\n {%- endif -%}\n{%- endfor -%}\n\n{#- Loop through messages -#}\n{%- for message in loop_messages -%}\n {%- if message['role'] != 'tool' -%}\n {%- set ns.prev_message_type = None -%}\n {%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}\n {#- Detect continuation: suppress duplicate <|turn>model when previous non-tool message was also assistant -#}\n {%- set prev_nt = namespace(role=None, found=false) -%}\n {%- if loop.index0 > 0 -%}\n {%- for j in range(loop.index0 - 1, -1, -1) -%}\n {%- if not prev_nt.found -%}\n {%- if loop_messages[j]['role'] != 'tool' -%}\n {%- set prev_nt.role = loop_messages[j]['role'] -%}\n {%- set prev_nt.found = true -%}\n {%- endif -%}\n {%- endif -%}\n {%- endfor -%}\n {%- endif -%}\n {%- set continue_same_model_turn = (role == 'model' and prev_nt.role == 'assistant') -%}\n {%- if not continue_same_model_turn -%}\n {{- '<|turn>' + role + '\\n' }}\n {%- endif -%}\n\n {#- Render reasoning/reasoning_content as thinking channel -#}\n {%- set thinking_text = message.get('reasoning') or message.get('reasoning_content') -%}\n {%- if thinking_text and loop.index0 > ns_turn.last_user_idx and message.get('tool_calls') -%}\n {{- '<|channel>thought\\n' + thinking_text + '\\n<channel|>' -}}\n {%- endif -%}\n\n {%- if message['tool_calls'] -%}\n {%- for tool_call in message['tool_calls'] -%}\n {%- set function = tool_call['function'] -%}\n {{- '<|tool_call>call:' + function['name'] + '{' -}}\n {%- if function['arguments'] is mapping -%}\n {%- set ns_args = namespace(found_first=false) -%}\n {%- for key, value in function['arguments'] | dictsort -%}\n {%- if ns_args.found_first %},{% endif -%}\n {%- set ns_args.found_first = true -%}\n {{- key -}}:{{- format_argument(value, escape_keys=False) -}}\n {%- endfor -%}\n {%- elif function['arguments'] is string -%}\n {{- function['arguments'] -}}\n {%- endif -%}\n {{- '}<tool_call|>' -}}\n {%- endfor -%}\n {%- set ns.prev_message_type = 'tool_call' -%}\n {%- endif -%}\n\n {%- set ns_tr_out = namespace(flag=false) -%}\n {%- if message.get('tool_responses') -%}\n {#- Legacy: tool_responses embedded on the assistant message (Google/Gemma native) -#}\n {%- for tool_response in message['tool_responses'] -%}\n {{- format_tool_response_block(tool_response['name'] | default('unknown'), tool_response['response']) -}}\n {%- set ns_tr_out.flag = true -%}\n {%- set ns.prev_message_type = 'tool_response' -%}\n {%- endfor -%}\n {%- elif message.get('tool_calls') -%}\n {#- OpenAI Chat Completions: forward-scan consecutive role:tool messages -#}\n {%- set ns_tool_scan = namespace(stopped=false) -%}\n {%- for k in range(loop.index0 + 1, loop_messages | length) -%}\n {%- if ns_tool_scan.stopped -%}\n {%- elif loop_messages[k]['role'] != 'tool' -%}\n {%- set ns_tool_scan.stopped = true -%}\n {%- else -%}\n {%- set follow = loop_messages[k] -%}\n {#- Resolve tool_call_id to function name -#}\n {%- set ns_tname = namespace(name=follow.get('name') | default('unknown')) -%}\n {%- for tc in message['tool_calls'] -%}\n {%- if tc.get('id') == follow.get('tool_call_id') -%}\n {%- set ns_tname.name = tc['function']['name'] -%}\n {%- endif -%}\n {%- endfor -%}\n {#- Handle content as string or content-parts array -#}\n {%- set tool_body = follow.get('content') -%}\n {%- if tool_body is string -%}\n {{- format_tool_response_block(ns_tname.name, tool_body) -}}\n {%- elif tool_body is sequence and tool_body is not string -%}\n {%- set ns_txt = namespace(s='') -%}\n {%- for part in tool_body -%}\n {%- if part.get('type') == 'text' -%}\n {%- set ns_txt.s = ns_txt.s + (part.get('text') | default('')) -%}\n {%- endif -%}\n {%- endfor -%}\n {{- format_tool_response_block(ns_tname.name, ns_txt.s) -}}\n {%- else -%}\n {{- format_tool_response_block(ns_tname.name, tool_body) -}}\n {%- endif -%}\n {%- set ns_tr_out.flag = true -%}\n {%- set ns.prev_message_type = 'tool_response' -%}\n {%- endif -%}\n {%- endfor -%}\n {%- endif -%}\n\n {%- if message['content'] is string -%}\n {%- if role == 'model' -%}\n {{- strip_thinking(message['content']) -}}\n {%- else -%}\n {{- message['content'] | trim -}}\n {%- endif -%}\n {%- elif message['content'] is sequence -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'text' -%}\n {%- if role == 'model' -%}\n {{- strip_thinking(item['text']) -}}\n {%- else -%}\n {{- item['text'] | trim -}}\n {%- endif -%}\n {%- elif item['type'] == 'image' -%}\n {{- '<|image|>' -}}\n {%- set ns.prev_message_type = 'image' -%}\n {%- elif item['type'] == 'audio' -%}\n {{- '<|audio|>' -}}\n {%- set ns.prev_message_type = 'audio' -%}\n {%- elif item['type'] == 'video' -%}\n {{- '<|video|>' -}}\n {%- set ns.prev_message_type = 'video' -%}\n {%- endif -%}\n {%- endfor -%}\n {%- endif -%}\n\n {%- if ns.prev_message_type == 'tool_call' and not ns_tr_out.flag -%}\n {{- '<|tool_response>' -}}\n {%- elif not (ns_tr_out.flag and not message.get('content')) -%}\n {{- '<turn|>\\n' -}}\n {%- endif -%}\n {%- endif -%}\n{%- endfor -%}\n\n{%- if add_generation_prompt -%}\n {%- if ns.prev_message_type != 'tool_response' and ns.prev_message_type != 'tool_call' -%}\n {{- '<|turn>model\\n' -}}\n {%- if not enable_thinking | default(false) -%}\n {{- '<|channel>thought\\n<channel|>' -}}\n {%- endif -%}\n {%- endif -%}\n{%- endif -%}\n"
96
+ }