|
test_stage: |
|
obcq_modifiers: |
|
SmoothQuantModifier: |
|
smoothing_strength: 0.5 |
|
mappings: [ |
|
[["re:.*q_proj", "re:.*k_proj", "re:.*v_proj"], "re:.*input_layernorm"], |
|
[["re:.*gate_proj", "re:.*up_proj"], "re:.*post_attention_layernorm"] |
|
] |
|
QuantizationModifier: |
|
ignore: |
|
|
|
- LlamaRotaryEmbedding |
|
- LlamaRMSNorm |
|
- SiLUActivation |
|
|
|
- QuantizableMatMul |
|
|
|
- model.layers.31.mlp.down_proj |
|
- model.layers.28.mlp.down_proj |
|
- model.layers.29.mlp.down_proj |
|
- model.layers.30.mlp.down_proj |
|
- model.layers.27.mlp.down_pro |
|
post_oneshot_calibration: true |
|
scheme_overrides: |
|
Embedding: |
|
input_activations: null |
|
weights: |
|
num_bits: 8 |
|
symmetric: false |
|
SparseGPTModifier: |
|
sparsity: 0.5 |
|
block_size: 128 |
|
sequential_update: true |
|
quantize: true |
|
percdamp: 0.01 |
|
mask_structure: "0:0" |
|
targets: ["re:model.layers.\\d*$"] |