File size: 371 Bytes
9028b94
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
default_stage:
  default_modifiers:
    QuantizationModifier:
      targets: [Linear]
      ignore: ['re:.*lm_head', 're:.*embed_tokens', 're:.*layers[.]0[.].*', 're:.*input_layernorm$',
        're:.*norm.*', 're:.*shared_experts.*', 're:.*block_sparse_moe[.]gate$', 're:.*router.*',
        're:.*post_attention_layernorm$', 're:.*self_attn.*']
      scheme: FP8_BLOCK