sadkins65's picture
Upload folder using huggingface_hub
a8e06b9 verified
raw
history blame
431 Bytes
quant_stage:
quant_modifiers:
vLLMQuantizationModifier:
ignore: [lm_head, model.layers.0.mlp.down_proj]
config_groups:
group_0:
weights: {num_bits: 8, type: int, symmetric: true, strategy: tensor}
input_activations: {num_bits: 8, type: int, symmetric: true, strategy: tensor}
targets: [Linear]
SparseGPTModifier: {sparsity: 0.0, quantize: true, sequential_update: false}