llada-8b-instruct-8bit-gptq / quantize_config.json
mrdmnd's picture
Upload 8 files
c48ebb0 verified
{
"bits": 8,
"dataset": "c4",
"group_size": 128,
"damp_percent": 0.1,
"desc_act": false,
"sym": true,
"true_sequential": true,
"quant_method": "gptq",
"modules_in_block_to_quantize": null,
"checkpoint_format": "gptq",
"meta": {
"quantizer": [
"optimum:1.26.1",
"gptqmodel:2.2.0"
]
}
}