Update README.md
Browse files
README.md
CHANGED
@@ -126,6 +126,8 @@ model_id = "Qwen/Qwen3-8B"
|
|
126 |
|
127 |
from torchao.quantization import Int4WeightOnlyConfig
|
128 |
quant_config = Int4WeightOnlyConfig(group_size=128, int4_packing_format="tile_packed_to_4d", int4_choose_qparams_algorithm="hqq")
|
|
|
|
|
129 |
quantization_config = TorchAoConfig(quant_type=quant_config)
|
130 |
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=quantization_config)
|
131 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
126 |
|
127 |
from torchao.quantization import Int4WeightOnlyConfig
|
128 |
quant_config = Int4WeightOnlyConfig(group_size=128, int4_packing_format="tile_packed_to_4d", int4_choose_qparams_algorithm="hqq")
|
129 |
+
# or for H100
|
130 |
+
# quant_config = Int4WeightOnlyConfig(group_size=128)
|
131 |
quantization_config = TorchAoConfig(quant_type=quant_config)
|
132 |
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=quantization_config)
|
133 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|